Merge pull request #7401 from filecoin-project/release/v1.11.3
Release/v1.11.3
This commit is contained in:
commit
a0ddb10deb
@ -92,6 +92,9 @@ jobs:
|
|||||||
- run: sudo apt-get install npm
|
- run: sudo apt-get install npm
|
||||||
- run:
|
- run:
|
||||||
command: make buildall
|
command: make buildall
|
||||||
|
- run:
|
||||||
|
name: check tag and version output match
|
||||||
|
command: ./scripts/version-check.sh ./lotus
|
||||||
- store_artifacts:
|
- store_artifacts:
|
||||||
path: lotus
|
path: lotus
|
||||||
- store_artifacts:
|
- store_artifacts:
|
||||||
@ -348,7 +351,7 @@ jobs:
|
|||||||
build-macos:
|
build-macos:
|
||||||
description: build darwin lotus binary
|
description: build darwin lotus binary
|
||||||
macos:
|
macos:
|
||||||
xcode: "10.0.0"
|
xcode: "12.5.0"
|
||||||
working_directory: ~/go/src/github.com/filecoin-project/lotus
|
working_directory: ~/go/src/github.com/filecoin-project/lotus
|
||||||
steps:
|
steps:
|
||||||
- prepare:
|
- prepare:
|
||||||
@ -367,11 +370,6 @@ jobs:
|
|||||||
name: Install Rust
|
name: Install Rust
|
||||||
command: |
|
command: |
|
||||||
curl https://sh.rustup.rs -sSf | sh -s -- -y
|
curl https://sh.rustup.rs -sSf | sh -s -- -y
|
||||||
- run:
|
|
||||||
name: Install jq
|
|
||||||
command: |
|
|
||||||
curl --location https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64 --output /usr/local/bin/jq
|
|
||||||
chmod +x /usr/local/bin/jq
|
|
||||||
- run:
|
- run:
|
||||||
name: Install hwloc
|
name: Install hwloc
|
||||||
command: |
|
command: |
|
||||||
@ -388,6 +386,9 @@ jobs:
|
|||||||
- run:
|
- run:
|
||||||
command: make build
|
command: make build
|
||||||
no_output_timeout: 30m
|
no_output_timeout: 30m
|
||||||
|
- run:
|
||||||
|
name: check tag and version output match
|
||||||
|
command: ./scripts/version-check.sh ./lotus
|
||||||
- store_artifacts:
|
- store_artifacts:
|
||||||
path: lotus
|
path: lotus
|
||||||
- store_artifacts:
|
- store_artifacts:
|
||||||
@ -810,11 +811,21 @@ workflows:
|
|||||||
suite: itest-deadlines
|
suite: itest-deadlines
|
||||||
target: "./itests/deadlines_test.go"
|
target: "./itests/deadlines_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-deals_512mb
|
||||||
|
suite: itest-deals_512mb
|
||||||
|
target: "./itests/deals_512mb_test.go"
|
||||||
|
|
||||||
- test:
|
- test:
|
||||||
name: test-itest-deals_concurrent
|
name: test-itest-deals_concurrent
|
||||||
suite: itest-deals_concurrent
|
suite: itest-deals_concurrent
|
||||||
target: "./itests/deals_concurrent_test.go"
|
target: "./itests/deals_concurrent_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-deals_max_staging_deals
|
||||||
|
suite: itest-deals_max_staging_deals
|
||||||
|
target: "./itests/deals_max_staging_deals_test.go"
|
||||||
|
|
||||||
- test:
|
- test:
|
||||||
name: test-itest-deals_offline
|
name: test-itest-deals_offline
|
||||||
suite: itest-deals_offline
|
suite: itest-deals_offline
|
||||||
@ -978,14 +989,7 @@ workflows:
|
|||||||
only:
|
only:
|
||||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
- build-lotus-soup
|
- build-lotus-soup
|
||||||
- build-macos:
|
- build-macos
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
ignore:
|
|
||||||
- /.*/
|
|
||||||
tags:
|
|
||||||
only:
|
|
||||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
|
||||||
- build-appimage:
|
- build-appimage:
|
||||||
filters:
|
filters:
|
||||||
branches:
|
branches:
|
||||||
|
@ -92,6 +92,9 @@ jobs:
|
|||||||
- run: sudo apt-get install npm
|
- run: sudo apt-get install npm
|
||||||
- run:
|
- run:
|
||||||
command: make buildall
|
command: make buildall
|
||||||
|
- run:
|
||||||
|
name: check tag and version output match
|
||||||
|
command: ./scripts/version-check.sh ./lotus
|
||||||
- store_artifacts:
|
- store_artifacts:
|
||||||
path: lotus
|
path: lotus
|
||||||
- store_artifacts:
|
- store_artifacts:
|
||||||
@ -348,7 +351,7 @@ jobs:
|
|||||||
build-macos:
|
build-macos:
|
||||||
description: build darwin lotus binary
|
description: build darwin lotus binary
|
||||||
macos:
|
macos:
|
||||||
xcode: "10.0.0"
|
xcode: "12.5.0"
|
||||||
working_directory: ~/go/src/github.com/filecoin-project/lotus
|
working_directory: ~/go/src/github.com/filecoin-project/lotus
|
||||||
steps:
|
steps:
|
||||||
- prepare:
|
- prepare:
|
||||||
@ -367,11 +370,6 @@ jobs:
|
|||||||
name: Install Rust
|
name: Install Rust
|
||||||
command: |
|
command: |
|
||||||
curl https://sh.rustup.rs -sSf | sh -s -- -y
|
curl https://sh.rustup.rs -sSf | sh -s -- -y
|
||||||
- run:
|
|
||||||
name: Install jq
|
|
||||||
command: |
|
|
||||||
curl --location https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64 --output /usr/local/bin/jq
|
|
||||||
chmod +x /usr/local/bin/jq
|
|
||||||
- run:
|
- run:
|
||||||
name: Install hwloc
|
name: Install hwloc
|
||||||
command: |
|
command: |
|
||||||
@ -388,6 +386,9 @@ jobs:
|
|||||||
- run:
|
- run:
|
||||||
command: make build
|
command: make build
|
||||||
no_output_timeout: 30m
|
no_output_timeout: 30m
|
||||||
|
- run:
|
||||||
|
name: check tag and version output match
|
||||||
|
command: ./scripts/version-check.sh ./lotus
|
||||||
- store_artifacts:
|
- store_artifacts:
|
||||||
path: lotus
|
path: lotus
|
||||||
- store_artifacts:
|
- store_artifacts:
|
||||||
@ -843,14 +844,7 @@ workflows:
|
|||||||
only:
|
only:
|
||||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
- build-lotus-soup
|
- build-lotus-soup
|
||||||
- build-macos:
|
- build-macos
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
ignore:
|
|
||||||
- /.*/
|
|
||||||
tags:
|
|
||||||
only:
|
|
||||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
|
||||||
- build-appimage:
|
- build-appimage:
|
||||||
filters:
|
filters:
|
||||||
branches:
|
branches:
|
||||||
|
32
.codecov.yml
32
.codecov.yml
@ -1,9 +1,25 @@
|
|||||||
|
ignore:
|
||||||
|
# Auto generated
|
||||||
|
- "^.*_gen.go$"
|
||||||
|
- "^.*/mock_full.go$"
|
||||||
|
# Old actors.
|
||||||
|
- "^chain/actors/builtin/[^/]*/(message|state|v)[0-4]\\.go$" # We test the latest version only.
|
||||||
|
# Tests
|
||||||
|
- "api/test/**"
|
||||||
|
- "conformance/**"
|
||||||
|
# Generators
|
||||||
|
- "gen/**"
|
||||||
|
- "chain/actors/agen/**"
|
||||||
|
# Non-critical utilities
|
||||||
|
- "api/docgen/**"
|
||||||
|
- "api/docgen-openrpc/**"
|
||||||
coverage:
|
coverage:
|
||||||
status:
|
status:
|
||||||
|
patch: off
|
||||||
project:
|
project:
|
||||||
tools-and-tests:
|
tools-and-tests:
|
||||||
target: auto
|
target: auto
|
||||||
threshold: 0.5%
|
threshold: 1%
|
||||||
informational: true
|
informational: true
|
||||||
paths:
|
paths:
|
||||||
- "testplans"
|
- "testplans"
|
||||||
@ -17,27 +33,27 @@ coverage:
|
|||||||
- "build"
|
- "build"
|
||||||
markets:
|
markets:
|
||||||
target: auto
|
target: auto
|
||||||
threshold: 0.5%
|
threshold: 1%
|
||||||
informational: false
|
informational: false
|
||||||
paths:
|
paths:
|
||||||
- "markets"
|
- "markets"
|
||||||
- "paychmgr"
|
- "paychmgr"
|
||||||
miner:
|
miner:
|
||||||
target: auto
|
target: auto
|
||||||
threshold: 0.5%
|
threshold: 1.5%
|
||||||
informational: false
|
informational: false
|
||||||
paths:
|
paths:
|
||||||
- "miner"
|
- "miner"
|
||||||
- "storage"
|
- "storage"
|
||||||
chain:
|
chain:
|
||||||
target: auto
|
target: auto
|
||||||
threshold: 0.5%
|
threshold: 1%
|
||||||
informational: false
|
informational: false
|
||||||
paths:
|
paths:
|
||||||
- "chain"
|
- "chain"
|
||||||
node:
|
node:
|
||||||
target: auto
|
target: auto
|
||||||
threshold: 0.5%
|
threshold: 1%
|
||||||
informational: false
|
informational: false
|
||||||
paths:
|
paths:
|
||||||
- "node"
|
- "node"
|
||||||
@ -50,8 +66,8 @@ coverage:
|
|||||||
- "journal"
|
- "journal"
|
||||||
cli:
|
cli:
|
||||||
target: auto
|
target: auto
|
||||||
threshold: 0.5%
|
threshold: 1%
|
||||||
informational: true
|
informational: true
|
||||||
paths:
|
paths:
|
||||||
- "cli"
|
- "cli"
|
||||||
- "cmd"
|
- "cmd"
|
||||||
|
116
CHANGELOG.md
116
CHANGELOG.md
@ -1,5 +1,121 @@
|
|||||||
# Lotus changelog
|
# Lotus changelog
|
||||||
|
|
||||||
|
# v1.11.3 / 2021-09-29
|
||||||
|
|
||||||
|
lotus v1.11.3 is a feature release that's **highly recommended to ALL lotus users to upgrade**, including node
|
||||||
|
operators, storage providers and clients. It includes many improvements and bug fixes that result in perf
|
||||||
|
improvements in different area, like deal making, sealing and so on.
|
||||||
|
|
||||||
|
## Highlights
|
||||||
|
|
||||||
|
- 🌟🌟Introduce `MaxStagingDealsBytes - reject new deals if our staging deals area is full ([filecoin-project/lotus#7276](https://github.com/filecoin-project/lotus/pull/7276))
|
||||||
|
- Set `MaxStagingDealsBytes` under the [Dealmaking] section of the markets' subsystem's `config.toml` to reject new incoming deals when the `deal-staging` directory of market subsystem's repo gets too large.
|
||||||
|
- 🌟🌟miner: Command to list/remove expired sectors locally ([filecoin-project/lotus#7140](https://github.com/filecoin-project/lotus/pull/7140))
|
||||||
|
- run `./lotus-miner sectors expired -h` for more details.
|
||||||
|
- 🚀update to ffi to update-bellperson-proofs-v9-0-2 ([filecoin-project/lotus#7369](https://github.com/filecoin-project/lotus/pull/7369))
|
||||||
|
- MinerX fellows(early testers of lotus releases) have reported faster WindowPoSt computation!
|
||||||
|
- 🌟dealpublisher: Fully validate deals before publishing ([filecoin-project/lotus#7234](https://github.com/filecoin-project/lotus/pull/7234))
|
||||||
|
- This excludes the expired deals before sending out a PSD message which reduces the chances of PSD message failure due to invalid deals.
|
||||||
|
- 🌟Simple alert system; FD limit alerts ([filecoin-project/lotus#7108](https://github.com/filecoin-project/lotus/pull/7108))
|
||||||
|
|
||||||
|
## New Features
|
||||||
|
|
||||||
|
- feat(ci): include version/cli checks in tagged releases ([filecoin-project/lotus#7331](https://github.com/filecoin-project/lotus/pull/7331))
|
||||||
|
- Show deal sizes is sealing sectors ([filecoin-project/lotus#7261](https://github.com/filecoin-project/lotus/pull/7261))
|
||||||
|
- config for disabling NAT port mapping ([filecoin-project/lotus#7204](https://github.com/filecoin-project/lotus/pull/7204))
|
||||||
|
- Add optional mined block list to miner info ([filecoin-project/lotus#7202](https://github.com/filecoin-project/lotus/pull/7202))
|
||||||
|
- Shed: Create a verifreg command for when VRK isn't a multisig ([filecoin-project/lotus#7099](https://github.com/filecoin-project/lotus/pull/7099))
|
||||||
|
|
||||||
|
## Improvements
|
||||||
|
|
||||||
|
- build macOS CI ([filecoin-project/lotus#7307](https://github.com/filecoin-project/lotus/pull/7307))
|
||||||
|
- itests: remove cid equality comparison ([filecoin-project/lotus#7292](https://github.com/filecoin-project/lotus/pull/7292))
|
||||||
|
- Add partition info to the 'sectors status' command ([filecoin-project/lotus#7246](https://github.com/filecoin-project/lotus/pull/7246))
|
||||||
|
- chain: Cleanup consensus logic ([filecoin-project/lotus#7255](https://github.com/filecoin-project/lotus/pull/7255))
|
||||||
|
- builder: Handle chainstore config in ConfigFullNode ([filecoin-project/lotus#7232](https://github.com/filecoin-project/lotus/pull/7232))
|
||||||
|
- gateway: check tipsets in ChainGetPath ([filecoin-project/lotus#7230](https://github.com/filecoin-project/lotus/pull/7230))
|
||||||
|
- Refactor events subsystem ([filecoin-project/lotus#7000](https://github.com/filecoin-project/lotus/pull/7000))
|
||||||
|
- test: re-enable disabled tests ([filecoin-project/lotus#7211](https://github.com/filecoin-project/lotus/pull/7211))
|
||||||
|
- Reduce lotus-miner startup spam ([filecoin-project/lotus#7205](https://github.com/filecoin-project/lotus/pull/7205))
|
||||||
|
- Catch deal slashed because sector was terminated ([filecoin-project/lotus#7201](https://github.com/filecoin-project/lotus/pull/7201))
|
||||||
|
- Insert miner and network power data as gibibytes to avoid int64 overflows ([filecoin-project/lotus#7194](https://github.com/filecoin-project/lotus/pull/7194))
|
||||||
|
- sealing: Check piece CIDs after AddPiece ([filecoin-project/lotus#7185](https://github.com/filecoin-project/lotus/pull/7185))
|
||||||
|
- markets: OnDealExpiredOrSlashed - get deal by proposal instead of deal ID ([filecoin-project/lotus#5431](https://github.com/filecoin-project/lotus/pull/5431))
|
||||||
|
- Incoming: improve a log message ([filecoin-project/lotus#7181](https://github.com/filecoin-project/lotus/pull/7181))
|
||||||
|
- journal: make current log file have a fixed named (#7112) ([filecoin-project/lotus#7112](https://github.com/filecoin-project/lotus/pull/7112))
|
||||||
|
- call string.Repeat always with positive int ([filecoin-project/lotus#7104](https://github. com/filecoin-project/lotus/pull/7104))
|
||||||
|
- itests: support larger sector sizes; add large deal test. ([filecoin-project/lotus#7148](https://github.com/filecoin-project/lotus/pull/7148))
|
||||||
|
- Ignore nil throttler ([filecoin-project/lotus#7169](https://github.com/filecoin-project/lotus/pull/7169))
|
||||||
|
|
||||||
|
## Bug Fixes
|
||||||
|
|
||||||
|
- fix: escape periods to match actual periods in version
|
||||||
|
- fix bug for CommittedCapacitySectorLifetime ([filecoin-project/lotus#7337](https://github.com/filecoin-project/lotus/pull/7337))
|
||||||
|
- fix a panic in HandleRecoverDealIDs ([filecoin-project/lotus#7336](https://github.com/filecoin-project/lotus/pull/7336))
|
||||||
|
- fix index out of range ([filecoin-project/lotus#7273](https://github.com/filecoin-project/lotus/pull/7273))
|
||||||
|
- fix: correctly handle null blocks when detecting an expensive fork ([filecoin-project/lotus#7210](https://github.com/filecoin-project/lotus/pull/7210))
|
||||||
|
- fix: make lotus soup use the correct dependencies ([filecoin-project/lotus#7221](https://github.com/filecoin-project/lotus/pull/7221))
|
||||||
|
- fix: init restore adds empty storage.json ([filecoin-project/lotus#7025](https://github.com/filecoin-project/lotus/pull/7025))
|
||||||
|
- fix: disable broken testground integration test ([filecoin-project/lotus#7187](https://github.com/filecoin-project/lotus/pull/7187))
|
||||||
|
- fix TestDealPublisher ([filecoin-project/lotus#7173](https://github.com/filecoin-project/lotus/pull/7173))
|
||||||
|
- fix: make TestTimedCacheBlockstoreSimple pass reliably ([filecoin-project/lotus#7174](https://github.com/filecoin-project/lotus/pull/7174))
|
||||||
|
- Fix throttling bug ([filecoin-project/lotus#7177](https://github.com/filecoin-project/lotus/pull/7177))
|
||||||
|
- sealing: Fix sector state accounting with FinalizeEarly ([filecoin-project/lotus#7256](https://github.com/filecoin-project/lotus/pull/7256))
|
||||||
|
- docker entrypoint.sh missing variable escape character ([filecoin-project/lotus#7291](https://github.com/filecoin-project/lotus/pull/7291))
|
||||||
|
- sealing: Fix retry loop in SubmitCommitAggregate ([filecoin-project/lotus#7245](https://github.com/filecoin-project/lotus/pull/7245))
|
||||||
|
- sectors expired: Handle precomitted and unproven sectors correctly ([filecoin-project/lotus#7236](https://github.com/filecoin-project/lotus/pull/7236))
|
||||||
|
- stores: Fix reserved disk usage log spam ([filecoin-project/lotus#7233](https://github.com/filecoin-project/lotus/pull/7233))
|
||||||
|
|
||||||
|
|
||||||
|
## Dependency Updates
|
||||||
|
|
||||||
|
- github.com/filecoin-project/go-fil-markets (v1.8.1 -> v1.12.0):
|
||||||
|
- github.com/filecoin-project/go-data-transfer (v1.7.8 -> v1.10.1):
|
||||||
|
- update to ffi to update-bellperson-proofs-v9-0-2 ([filecoin-project/lotus#7369](https://github.com/filecoin-project/lotus/pull/7369))
|
||||||
|
- fix(deps): use go-graphsync v0.9.3 with hotfix
|
||||||
|
- Update to unified go-graphsync v0.9.0 ([filecoin-project/lotus#7197](https://github.com/filecoin-project/lotus/pull/7197))
|
||||||
|
|
||||||
|
## Others
|
||||||
|
|
||||||
|
- v1.11.3-rc2 ([filecoin-project/lotus#7371](https://github.com/filecoin-project/lotus/pull/7371))
|
||||||
|
- v1.11.3-rc1 ([filecoin-project/lotus#7299](https://github.com/filecoin-project/lotus/pull/7299))
|
||||||
|
- Increase threshold from 0.5% to 1% ([filecoin-project/lotus#7262](https://github.com/filecoin-project/lotus/pull/7262))
|
||||||
|
- ci: exclude cruft from code coverage ([filecoin-project/lotus#7189](https://github.com/filecoin-project/lotus/pull/7189))
|
||||||
|
- Bump version to v1.11.3-dev ([filecoin-project/lotus#7180](https://github.com/filecoin-project/lotus/pull/7180))
|
||||||
|
- test: disable flaky TestBatchDealInput ([filecoin-project/lotus#7176](https://github.com/filecoin-project/lotus/pull/7176))
|
||||||
|
- Turn off patch ([filecoin-project/lotus#7172](https://github.com/filecoin-project/lotus/pull/7172))
|
||||||
|
- test: disable flaky TestSimultaneousTransferLimit ([filecoin-project/lotus#7153](https://github.com/filecoin-project/lotus/pull/7153))
|
||||||
|
|
||||||
|
|
||||||
|
## Contributors
|
||||||
|
|
||||||
|
| Contributor | Commits | Lines ± | Files Changed |
|
||||||
|
|-------------|---------|---------|---------------|
|
||||||
|
| @magik6k | 39 | +3311/-1825 | 179 |
|
||||||
|
| @Stebalien | 23 | +1935/-1417 | 84 |
|
||||||
|
| @dirkmc | 12 | +921/-732 | 111 |
|
||||||
|
| @dirkmc | 12 | +663/-790 | 30 |
|
||||||
|
| @hannahhoward | 3 | +482/-275 | 46 |
|
||||||
|
| @travisperson | 1 | +317/-65 | 5 |
|
||||||
|
| @jennijuju | 11 | +223/-126 | 24 |
|
||||||
|
| @hannahhoward | 7 | +257/-55 | 16 |
|
||||||
|
| @nonsense| 9 | +258/-37 | 19 |
|
||||||
|
| @raulk | 4 | +127/-36 | 13 |
|
||||||
|
| @raulk | 1 | +43/-60 | 15 |
|
||||||
|
| @arajasek | 4 | +74/-8 | 10 |
|
||||||
|
| @Frank | 2 | +68/-8 | 3 |
|
||||||
|
| @placer14| 2 | +52/-1 | 4 |
|
||||||
|
| @ldoublewood | 2 | +15/-13 | 3 |
|
||||||
|
| @lanzafame | 1 | +16/-2 | 1 |
|
||||||
|
| @aarshkshah1992 | 2 | +11/-6 | 2 |
|
||||||
|
| @ZenGround0 | 2 | +7/-6 | 2 |
|
||||||
|
| @ognots | 1 | +0/-10 | 2 |
|
||||||
|
| @KAYUII | 2 | +4/-4 | 2 |
|
||||||
|
| @lanzafame | 1 | +6/-0 | 1 |
|
||||||
|
| @jacobheun | 1 | +3/-3 | 1 |
|
||||||
|
| @frank | 1 | +4/-0 | 1 |
|
||||||
|
|
||||||
|
|
||||||
# v1.11.2 / 2021-09-06
|
# v1.11.2 / 2021-09-06
|
||||||
|
|
||||||
lotus v1.11.2 is a feature release that's **highly recommended ALL lotus users to upgrade**, including node operators,
|
lotus v1.11.2 is a feature release that's **highly recommended ALL lotus users to upgrade**, including node operators,
|
||||||
|
@ -5,6 +5,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
apitypes "github.com/filecoin-project/lotus/api/types"
|
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||||
|
"github.com/filecoin-project/lotus/journal/alerting"
|
||||||
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
|
|
||||||
@ -33,6 +34,10 @@ type Common interface {
|
|||||||
LogList(context.Context) ([]string, error) //perm:write
|
LogList(context.Context) ([]string, error) //perm:write
|
||||||
LogSetLevel(context.Context, string, string) error //perm:write
|
LogSetLevel(context.Context, string, string) error //perm:write
|
||||||
|
|
||||||
|
// LogAlerts returns list of all, active and inactive alerts tracked by the
|
||||||
|
// node
|
||||||
|
LogAlerts(ctx context.Context) ([]alerting.Alert, error) //perm:admin
|
||||||
|
|
||||||
// MethodGroup: Common
|
// MethodGroup: Common
|
||||||
|
|
||||||
// Version provides information about API provider
|
// Version provides information about API provider
|
||||||
|
@ -33,6 +33,7 @@ type Gateway interface {
|
|||||||
ChainHead(ctx context.Context) (*types.TipSet, error)
|
ChainHead(ctx context.Context) (*types.TipSet, error)
|
||||||
ChainGetBlockMessages(context.Context, cid.Cid) (*BlockMessages, error)
|
ChainGetBlockMessages(context.Context, cid.Cid) (*BlockMessages, error)
|
||||||
ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error)
|
ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error)
|
||||||
|
ChainGetPath(ctx context.Context, from, to types.TipSetKey) ([]*HeadChange, error)
|
||||||
ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error)
|
ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error)
|
||||||
ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error)
|
ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error)
|
||||||
ChainGetTipSetAfterHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error)
|
ChainGetTipSetAfterHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error)
|
||||||
|
@ -267,6 +267,11 @@ type SectorLog struct {
|
|||||||
Message string
|
Message string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type SectorPiece struct {
|
||||||
|
Piece abi.PieceInfo
|
||||||
|
DealInfo *PieceDealInfo // nil for pieces which do not appear in deals (e.g. filler pieces)
|
||||||
|
}
|
||||||
|
|
||||||
type SectorInfo struct {
|
type SectorInfo struct {
|
||||||
SectorID abi.SectorNumber
|
SectorID abi.SectorNumber
|
||||||
State SectorState
|
State SectorState
|
||||||
@ -274,6 +279,7 @@ type SectorInfo struct {
|
|||||||
CommR *cid.Cid
|
CommR *cid.Cid
|
||||||
Proof []byte
|
Proof []byte
|
||||||
Deals []abi.DealID
|
Deals []abi.DealID
|
||||||
|
Pieces []SectorPiece
|
||||||
Ticket SealTicket
|
Ticket SealTicket
|
||||||
Seed SealSeed
|
Seed SealSeed
|
||||||
PreCommitMsg *cid.Cid
|
PreCommitMsg *cid.Cid
|
||||||
|
@ -13,7 +13,6 @@ import (
|
|||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-bitfield"
|
"github.com/filecoin-project/go-bitfield"
|
||||||
"github.com/filecoin-project/go-multistore"
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
"github.com/ipfs/go-filestore"
|
"github.com/ipfs/go-filestore"
|
||||||
@ -90,7 +89,6 @@ func init() {
|
|||||||
addExample(pid)
|
addExample(pid)
|
||||||
addExample(&pid)
|
addExample(&pid)
|
||||||
|
|
||||||
multistoreIDExample := multistore.StoreID(50)
|
|
||||||
storeIDExample := imports.ID(50)
|
storeIDExample := imports.ID(50)
|
||||||
|
|
||||||
addExample(bitfield.NewFromSet([]uint64{5}))
|
addExample(bitfield.NewFromSet([]uint64{5}))
|
||||||
@ -124,8 +122,6 @@ func init() {
|
|||||||
addExample(datatransfer.Ongoing)
|
addExample(datatransfer.Ongoing)
|
||||||
addExample(storeIDExample)
|
addExample(storeIDExample)
|
||||||
addExample(&storeIDExample)
|
addExample(&storeIDExample)
|
||||||
addExample(multistoreIDExample)
|
|
||||||
addExample(&multistoreIDExample)
|
|
||||||
addExample(retrievalmarket.ClientEventDealAccepted)
|
addExample(retrievalmarket.ClientEventDealAccepted)
|
||||||
addExample(retrievalmarket.DealStatusNew)
|
addExample(retrievalmarket.DealStatusNew)
|
||||||
addExample(network.ReachabilityPublic)
|
addExample(network.ReachabilityPublic)
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
apitypes "github.com/filecoin-project/lotus/api/types"
|
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||||
miner "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
miner "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
types "github.com/filecoin-project/lotus/chain/types"
|
types "github.com/filecoin-project/lotus/chain/types"
|
||||||
|
alerting "github.com/filecoin-project/lotus/journal/alerting"
|
||||||
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||||
dtypes "github.com/filecoin-project/lotus/node/modules/dtypes"
|
dtypes "github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
imports "github.com/filecoin-project/lotus/node/repo/imports"
|
imports "github.com/filecoin-project/lotus/node/repo/imports"
|
||||||
@ -995,6 +996,21 @@ func (mr *MockFullNodeMockRecorder) ID(arg0 interface{}) *gomock.Call {
|
|||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ID", reflect.TypeOf((*MockFullNode)(nil).ID), arg0)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ID", reflect.TypeOf((*MockFullNode)(nil).ID), arg0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LogAlerts mocks base method.
|
||||||
|
func (m *MockFullNode) LogAlerts(arg0 context.Context) ([]alerting.Alert, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "LogAlerts", arg0)
|
||||||
|
ret0, _ := ret[0].([]alerting.Alert)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// LogAlerts indicates an expected call of LogAlerts.
|
||||||
|
func (mr *MockFullNodeMockRecorder) LogAlerts(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LogAlerts", reflect.TypeOf((*MockFullNode)(nil).LogAlerts), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
// LogList mocks base method.
|
// LogList mocks base method.
|
||||||
func (m *MockFullNode) LogList(arg0 context.Context) ([]string, error) {
|
func (m *MockFullNode) LogList(arg0 context.Context) ([]string, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
|
@ -27,6 +27,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
||||||
|
"github.com/filecoin-project/lotus/journal/alerting"
|
||||||
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
"github.com/filecoin-project/lotus/node/repo/imports"
|
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||||
@ -63,6 +64,8 @@ type CommonStruct struct {
|
|||||||
|
|
||||||
Discover func(p0 context.Context) (apitypes.OpenRPCDocument, error) `perm:"read"`
|
Discover func(p0 context.Context) (apitypes.OpenRPCDocument, error) `perm:"read"`
|
||||||
|
|
||||||
|
LogAlerts func(p0 context.Context) ([]alerting.Alert, error) `perm:"admin"`
|
||||||
|
|
||||||
LogList func(p0 context.Context) ([]string, error) `perm:"write"`
|
LogList func(p0 context.Context) ([]string, error) `perm:"write"`
|
||||||
|
|
||||||
LogSetLevel func(p0 context.Context, p1 string, p2 string) error `perm:"write"`
|
LogSetLevel func(p0 context.Context, p1 string, p2 string) error `perm:"write"`
|
||||||
@ -477,6 +480,8 @@ type GatewayStruct struct {
|
|||||||
|
|
||||||
ChainGetMessage func(p0 context.Context, p1 cid.Cid) (*types.Message, error) ``
|
ChainGetMessage func(p0 context.Context, p1 cid.Cid) (*types.Message, error) ``
|
||||||
|
|
||||||
|
ChainGetPath func(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*HeadChange, error) ``
|
||||||
|
|
||||||
ChainGetTipSet func(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) ``
|
ChainGetTipSet func(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) ``
|
||||||
|
|
||||||
ChainGetTipSetAfterHeight func(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) ``
|
ChainGetTipSetAfterHeight func(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) ``
|
||||||
@ -946,6 +951,17 @@ func (s *CommonStub) Discover(p0 context.Context) (apitypes.OpenRPCDocument, err
|
|||||||
return *new(apitypes.OpenRPCDocument), ErrNotSupported
|
return *new(apitypes.OpenRPCDocument), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *CommonStruct) LogAlerts(p0 context.Context) ([]alerting.Alert, error) {
|
||||||
|
if s.Internal.LogAlerts == nil {
|
||||||
|
return *new([]alerting.Alert), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.LogAlerts(p0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *CommonStub) LogAlerts(p0 context.Context) ([]alerting.Alert, error) {
|
||||||
|
return *new([]alerting.Alert), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *CommonStruct) LogList(p0 context.Context) ([]string, error) {
|
func (s *CommonStruct) LogList(p0 context.Context) ([]string, error) {
|
||||||
if s.Internal.LogList == nil {
|
if s.Internal.LogList == nil {
|
||||||
return *new([]string), ErrNotSupported
|
return *new([]string), ErrNotSupported
|
||||||
@ -3025,6 +3041,17 @@ func (s *GatewayStub) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Me
|
|||||||
return nil, ErrNotSupported
|
return nil, ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStruct) ChainGetPath(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*HeadChange, error) {
|
||||||
|
if s.Internal.ChainGetPath == nil {
|
||||||
|
return *new([]*HeadChange), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ChainGetPath(p0, p1, p2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStub) ChainGetPath(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*HeadChange, error) {
|
||||||
|
return *new([]*HeadChange), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *GatewayStruct) ChainGetTipSet(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) {
|
func (s *GatewayStruct) ChainGetTipSet(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) {
|
||||||
if s.Internal.ChainGetTipSet == nil {
|
if s.Internal.ChainGetTipSet == nil {
|
||||||
return nil, ErrNotSupported
|
return nil, ErrNotSupported
|
||||||
|
@ -23,6 +23,7 @@ import (
|
|||||||
apitypes "github.com/filecoin-project/lotus/api/types"
|
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||||
miner "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
miner "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
types "github.com/filecoin-project/lotus/chain/types"
|
types "github.com/filecoin-project/lotus/chain/types"
|
||||||
|
alerting "github.com/filecoin-project/lotus/journal/alerting"
|
||||||
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||||
dtypes "github.com/filecoin-project/lotus/node/modules/dtypes"
|
dtypes "github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
imports "github.com/filecoin-project/lotus/node/repo/imports"
|
imports "github.com/filecoin-project/lotus/node/repo/imports"
|
||||||
@ -950,6 +951,21 @@ func (mr *MockFullNodeMockRecorder) ID(arg0 interface{}) *gomock.Call {
|
|||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ID", reflect.TypeOf((*MockFullNode)(nil).ID), arg0)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ID", reflect.TypeOf((*MockFullNode)(nil).ID), arg0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LogAlerts mocks base method.
|
||||||
|
func (m *MockFullNode) LogAlerts(arg0 context.Context) ([]alerting.Alert, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "LogAlerts", arg0)
|
||||||
|
ret0, _ := ret[0].([]alerting.Alert)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// LogAlerts indicates an expected call of LogAlerts.
|
||||||
|
func (mr *MockFullNodeMockRecorder) LogAlerts(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LogAlerts", reflect.TypeOf((*MockFullNode)(nil).LogAlerts), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
// LogList mocks base method.
|
// LogList mocks base method.
|
||||||
func (m *MockFullNode) LogList(arg0 context.Context) ([]string, error) {
|
func (m *MockFullNode) LogList(arg0 context.Context) ([]string, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
|
@ -47,8 +47,12 @@ func (t *TimedCacheBlockstore) Start(_ context.Context) error {
|
|||||||
return fmt.Errorf("already started")
|
return fmt.Errorf("already started")
|
||||||
}
|
}
|
||||||
t.closeCh = make(chan struct{})
|
t.closeCh = make(chan struct{})
|
||||||
|
|
||||||
|
// Create this timer before starting the goroutine. Otherwise, creating the timer will race
|
||||||
|
// with adding time to the mock clock, and we could add time _first_, then stall waiting for
|
||||||
|
// a timer that'll never fire.
|
||||||
|
ticker := t.clock.Ticker(t.interval)
|
||||||
go func() {
|
go func() {
|
||||||
ticker := t.clock.Ticker(t.interval)
|
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
|
6
build/limits.go
Normal file
6
build/limits.go
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
package build
|
||||||
|
|
||||||
|
var (
|
||||||
|
DefaultFDLimit uint64 = 16 << 10
|
||||||
|
MinerFDLimit uint64 = 100_000
|
||||||
|
)
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -1,3 +1,4 @@
|
|||||||
|
//go:build debug || 2k
|
||||||
// +build debug 2k
|
// +build debug 2k
|
||||||
|
|
||||||
package build
|
package build
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//go:build butterflynet
|
||||||
// +build butterflynet
|
// +build butterflynet
|
||||||
|
|
||||||
package build
|
package build
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//go:build calibnet
|
||||||
// +build calibnet
|
// +build calibnet
|
||||||
|
|
||||||
package build
|
package build
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//go:build debug
|
||||||
// +build debug
|
// +build debug
|
||||||
|
|
||||||
package build
|
package build
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//go:build interopnet
|
||||||
// +build interopnet
|
// +build interopnet
|
||||||
|
|
||||||
package build
|
package build
|
||||||
|
@ -1,10 +1,5 @@
|
|||||||
// +build !debug
|
//go:build !debug && !2k && !testground && !calibnet && !nerpanet && !butterflynet && !interopnet
|
||||||
// +build !2k
|
// +build !debug,!2k,!testground,!calibnet,!nerpanet,!butterflynet,!interopnet
|
||||||
// +build !testground
|
|
||||||
// +build !calibnet
|
|
||||||
// +build !nerpanet
|
|
||||||
// +build !butterflynet
|
|
||||||
// +build !interopnet
|
|
||||||
|
|
||||||
package build
|
package build
|
||||||
|
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//go:build nerpanet
|
||||||
// +build nerpanet
|
// +build nerpanet
|
||||||
|
|
||||||
package build
|
package build
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//go:build !testground
|
||||||
// +build !testground
|
// +build !testground
|
||||||
|
|
||||||
package build
|
package build
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//go:build testground
|
||||||
// +build testground
|
// +build testground
|
||||||
|
|
||||||
// This file makes hardcoded parameters (const) configurable as vars.
|
// This file makes hardcoded parameters (const) configurable as vars.
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
//+build tools
|
//go:build tools
|
||||||
|
// +build tools
|
||||||
|
|
||||||
package build
|
package build
|
||||||
|
|
||||||
|
@ -40,7 +40,7 @@ func buildType() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// BuildVersion is the local build version
|
// BuildVersion is the local build version
|
||||||
const BuildVersion = "1.11.2"
|
const BuildVersion = "1.11.3"
|
||||||
|
|
||||||
func UserVersion() string {
|
func UserVersion() string {
|
||||||
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {
|
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {
|
||||||
|
@ -141,11 +141,28 @@ type Deadline interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Partition interface {
|
type Partition interface {
|
||||||
|
// AllSectors returns all sector numbers in this partition, including faulty, unproven, and terminated sectors
|
||||||
AllSectors() (bitfield.BitField, error)
|
AllSectors() (bitfield.BitField, error)
|
||||||
|
|
||||||
|
// Subset of sectors detected/declared faulty and not yet recovered (excl. from PoSt).
|
||||||
|
// Faults ∩ Terminated = ∅
|
||||||
FaultySectors() (bitfield.BitField, error)
|
FaultySectors() (bitfield.BitField, error)
|
||||||
|
|
||||||
|
// Subset of faulty sectors expected to recover on next PoSt
|
||||||
|
// Recoveries ∩ Terminated = ∅
|
||||||
RecoveringSectors() (bitfield.BitField, error)
|
RecoveringSectors() (bitfield.BitField, error)
|
||||||
|
|
||||||
|
// Live sectors are those that are not terminated (but may be faulty).
|
||||||
LiveSectors() (bitfield.BitField, error)
|
LiveSectors() (bitfield.BitField, error)
|
||||||
|
|
||||||
|
// Active sectors are those that are neither terminated nor faulty nor unproven, i.e. actively contributing power.
|
||||||
ActiveSectors() (bitfield.BitField, error)
|
ActiveSectors() (bitfield.BitField, error)
|
||||||
|
|
||||||
|
// Unproven sectors in this partition. This bitfield will be cleared on
|
||||||
|
// a successful window post (or at the end of the partition's next
|
||||||
|
// deadline). At that time, any still unproven sectors will be added to
|
||||||
|
// the faulty sector bitfield.
|
||||||
|
UnprovenSectors() (bitfield.BitField, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type SectorOnChainInfo struct {
|
type SectorOnChainInfo struct {
|
||||||
|
@ -200,11 +200,28 @@ type Deadline interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Partition interface {
|
type Partition interface {
|
||||||
|
// AllSectors returns all sector numbers in this partition, including faulty, unproven, and terminated sectors
|
||||||
AllSectors() (bitfield.BitField, error)
|
AllSectors() (bitfield.BitField, error)
|
||||||
|
|
||||||
|
// Subset of sectors detected/declared faulty and not yet recovered (excl. from PoSt).
|
||||||
|
// Faults ∩ Terminated = ∅
|
||||||
FaultySectors() (bitfield.BitField, error)
|
FaultySectors() (bitfield.BitField, error)
|
||||||
|
|
||||||
|
// Subset of faulty sectors expected to recover on next PoSt
|
||||||
|
// Recoveries ∩ Terminated = ∅
|
||||||
RecoveringSectors() (bitfield.BitField, error)
|
RecoveringSectors() (bitfield.BitField, error)
|
||||||
|
|
||||||
|
// Live sectors are those that are not terminated (but may be faulty).
|
||||||
LiveSectors() (bitfield.BitField, error)
|
LiveSectors() (bitfield.BitField, error)
|
||||||
|
|
||||||
|
// Active sectors are those that are neither terminated nor faulty nor unproven, i.e. actively contributing power.
|
||||||
ActiveSectors() (bitfield.BitField, error)
|
ActiveSectors() (bitfield.BitField, error)
|
||||||
|
|
||||||
|
// Unproven sectors in this partition. This bitfield will be cleared on
|
||||||
|
// a successful window post (or at the end of the partition's next
|
||||||
|
// deadline). At that time, any still unproven sectors will be added to
|
||||||
|
// the faulty sector bitfield.
|
||||||
|
UnprovenSectors() (bitfield.BitField, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type SectorOnChainInfo struct {
|
type SectorOnChainInfo struct {
|
||||||
|
@ -549,6 +549,10 @@ func (p *partition{{.v}}) RecoveringSectors() (bitfield.BitField, error) {
|
|||||||
return p.Partition.Recoveries, nil
|
return p.Partition.Recoveries, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *partition{{.v}}) UnprovenSectors() (bitfield.BitField, error) {
|
||||||
|
return {{if (ge .v 2)}}p.Partition.Unproven{{else}}bitfield.New(){{end}}, nil
|
||||||
|
}
|
||||||
|
|
||||||
func fromV{{.v}}SectorOnChainInfo(v{{.v}} miner{{.v}}.SectorOnChainInfo) SectorOnChainInfo {
|
func fromV{{.v}}SectorOnChainInfo(v{{.v}} miner{{.v}}.SectorOnChainInfo) SectorOnChainInfo {
|
||||||
{{if (ge .v 2)}}
|
{{if (ge .v 2)}}
|
||||||
return SectorOnChainInfo{
|
return SectorOnChainInfo{
|
||||||
|
@ -500,6 +500,10 @@ func (p *partition0) RecoveringSectors() (bitfield.BitField, error) {
|
|||||||
return p.Partition.Recoveries, nil
|
return p.Partition.Recoveries, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *partition0) UnprovenSectors() (bitfield.BitField, error) {
|
||||||
|
return bitfield.New(), nil
|
||||||
|
}
|
||||||
|
|
||||||
func fromV0SectorOnChainInfo(v0 miner0.SectorOnChainInfo) SectorOnChainInfo {
|
func fromV0SectorOnChainInfo(v0 miner0.SectorOnChainInfo) SectorOnChainInfo {
|
||||||
|
|
||||||
return (SectorOnChainInfo)(v0)
|
return (SectorOnChainInfo)(v0)
|
||||||
|
@ -530,6 +530,10 @@ func (p *partition2) RecoveringSectors() (bitfield.BitField, error) {
|
|||||||
return p.Partition.Recoveries, nil
|
return p.Partition.Recoveries, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *partition2) UnprovenSectors() (bitfield.BitField, error) {
|
||||||
|
return p.Partition.Unproven, nil
|
||||||
|
}
|
||||||
|
|
||||||
func fromV2SectorOnChainInfo(v2 miner2.SectorOnChainInfo) SectorOnChainInfo {
|
func fromV2SectorOnChainInfo(v2 miner2.SectorOnChainInfo) SectorOnChainInfo {
|
||||||
|
|
||||||
return SectorOnChainInfo{
|
return SectorOnChainInfo{
|
||||||
|
@ -531,6 +531,10 @@ func (p *partition3) RecoveringSectors() (bitfield.BitField, error) {
|
|||||||
return p.Partition.Recoveries, nil
|
return p.Partition.Recoveries, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *partition3) UnprovenSectors() (bitfield.BitField, error) {
|
||||||
|
return p.Partition.Unproven, nil
|
||||||
|
}
|
||||||
|
|
||||||
func fromV3SectorOnChainInfo(v3 miner3.SectorOnChainInfo) SectorOnChainInfo {
|
func fromV3SectorOnChainInfo(v3 miner3.SectorOnChainInfo) SectorOnChainInfo {
|
||||||
|
|
||||||
return SectorOnChainInfo{
|
return SectorOnChainInfo{
|
||||||
|
@ -531,6 +531,10 @@ func (p *partition4) RecoveringSectors() (bitfield.BitField, error) {
|
|||||||
return p.Partition.Recoveries, nil
|
return p.Partition.Recoveries, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *partition4) UnprovenSectors() (bitfield.BitField, error) {
|
||||||
|
return p.Partition.Unproven, nil
|
||||||
|
}
|
||||||
|
|
||||||
func fromV4SectorOnChainInfo(v4 miner4.SectorOnChainInfo) SectorOnChainInfo {
|
func fromV4SectorOnChainInfo(v4 miner4.SectorOnChainInfo) SectorOnChainInfo {
|
||||||
|
|
||||||
return SectorOnChainInfo{
|
return SectorOnChainInfo{
|
||||||
|
@ -531,6 +531,10 @@ func (p *partition5) RecoveringSectors() (bitfield.BitField, error) {
|
|||||||
return p.Partition.Recoveries, nil
|
return p.Partition.Recoveries, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *partition5) UnprovenSectors() (bitfield.BitField, error) {
|
||||||
|
return p.Partition.Unproven, nil
|
||||||
|
}
|
||||||
|
|
||||||
func fromV5SectorOnChainInfo(v5 miner5.SectorOnChainInfo) SectorOnChainInfo {
|
func fromV5SectorOnChainInfo(v5 miner5.SectorOnChainInfo) SectorOnChainInfo {
|
||||||
|
|
||||||
return SectorOnChainInfo{
|
return SectorOnChainInfo{
|
||||||
|
@ -177,6 +177,11 @@ func (db *DrandBeacon) VerifyEntry(curr types.BeaconEntry, prev types.BeaconEntr
|
|||||||
// TODO handle genesis better
|
// TODO handle genesis better
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if curr.Round != prev.Round+1 {
|
||||||
|
return xerrors.Errorf("invalid beacon entry: cur (%d) != prev (%d) + 1", curr.Round, prev.Round)
|
||||||
|
}
|
||||||
|
|
||||||
if be := db.getCachedValue(curr.Round); be != nil {
|
if be := db.getCachedValue(curr.Round); be != nil {
|
||||||
if !bytes.Equal(curr.Data, be.Data) {
|
if !bytes.Equal(curr.Data, be.Data) {
|
||||||
return xerrors.New("invalid beacon value, does not match cached good value")
|
return xerrors.New("invalid beacon value, does not match cached good value")
|
||||||
|
297
chain/consensus/filcns/compute_state.go
Normal file
297
chain/consensus/filcns/compute_state.go
Normal file
@ -0,0 +1,297 @@
|
|||||||
|
package filcns
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
cbg "github.com/whyrusleeping/cbor-gen"
|
||||||
|
"go.opencensus.io/stats"
|
||||||
|
"go.opencensus.io/trace"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||||
|
|
||||||
|
exported0 "github.com/filecoin-project/specs-actors/actors/builtin/exported"
|
||||||
|
exported2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/exported"
|
||||||
|
exported3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/exported"
|
||||||
|
exported4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/exported"
|
||||||
|
exported5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/exported"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin/cron"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin/reward"
|
||||||
|
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||||
|
"github.com/filecoin-project/lotus/chain/store"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/chain/vm"
|
||||||
|
"github.com/filecoin-project/lotus/metrics"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewActorRegistry() *vm.ActorRegistry {
|
||||||
|
inv := vm.NewActorRegistry()
|
||||||
|
|
||||||
|
// TODO: define all these properties on the actors themselves, in specs-actors.
|
||||||
|
|
||||||
|
inv.Register(vm.ActorsVersionPredicate(actors.Version0), exported0.BuiltinActors()...)
|
||||||
|
inv.Register(vm.ActorsVersionPredicate(actors.Version2), exported2.BuiltinActors()...)
|
||||||
|
inv.Register(vm.ActorsVersionPredicate(actors.Version3), exported3.BuiltinActors()...)
|
||||||
|
inv.Register(vm.ActorsVersionPredicate(actors.Version4), exported4.BuiltinActors()...)
|
||||||
|
inv.Register(vm.ActorsVersionPredicate(actors.Version5), exported5.BuiltinActors()...)
|
||||||
|
|
||||||
|
return inv
|
||||||
|
}
|
||||||
|
|
||||||
|
type TipSetExecutor struct{}
|
||||||
|
|
||||||
|
func NewTipSetExecutor() *TipSetExecutor {
|
||||||
|
return &TipSetExecutor{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TipSetExecutor) NewActorRegistry() *vm.ActorRegistry {
|
||||||
|
return NewActorRegistry()
|
||||||
|
}
|
||||||
|
|
||||||
|
type FilecoinBlockMessages struct {
|
||||||
|
store.BlockMessages
|
||||||
|
|
||||||
|
WinCount int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, sm *stmgr.StateManager, parentEpoch abi.ChainEpoch, pstate cid.Cid, bms []FilecoinBlockMessages, epoch abi.ChainEpoch, r vm.Rand, em stmgr.ExecMonitor, baseFee abi.TokenAmount, ts *types.TipSet) (cid.Cid, cid.Cid, error) {
|
||||||
|
done := metrics.Timer(ctx, metrics.VMApplyBlocksTotal)
|
||||||
|
defer done()
|
||||||
|
|
||||||
|
partDone := metrics.Timer(ctx, metrics.VMApplyEarly)
|
||||||
|
defer func() {
|
||||||
|
partDone()
|
||||||
|
}()
|
||||||
|
|
||||||
|
makeVmWithBaseState := func(base cid.Cid) (*vm.VM, error) {
|
||||||
|
vmopt := &vm.VMOpts{
|
||||||
|
StateBase: base,
|
||||||
|
Epoch: epoch,
|
||||||
|
Rand: r,
|
||||||
|
Bstore: sm.ChainStore().StateBlockstore(),
|
||||||
|
Actors: NewActorRegistry(),
|
||||||
|
Syscalls: sm.Syscalls,
|
||||||
|
CircSupplyCalc: sm.GetVMCirculatingSupply,
|
||||||
|
NtwkVersion: sm.GetNtwkVersion,
|
||||||
|
BaseFee: baseFee,
|
||||||
|
LookbackState: stmgr.LookbackStateGetterForTipset(sm, ts),
|
||||||
|
}
|
||||||
|
|
||||||
|
return sm.VMConstructor()(ctx, vmopt)
|
||||||
|
}
|
||||||
|
|
||||||
|
vmi, err := makeVmWithBaseState(pstate)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, cid.Undef, xerrors.Errorf("making vm: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
runCron := func(epoch abi.ChainEpoch) error {
|
||||||
|
cronMsg := &types.Message{
|
||||||
|
To: cron.Address,
|
||||||
|
From: builtin.SystemActorAddr,
|
||||||
|
Nonce: uint64(epoch),
|
||||||
|
Value: types.NewInt(0),
|
||||||
|
GasFeeCap: types.NewInt(0),
|
||||||
|
GasPremium: types.NewInt(0),
|
||||||
|
GasLimit: build.BlockGasLimit * 10000, // Make super sure this is never too little
|
||||||
|
Method: cron.Methods.EpochTick,
|
||||||
|
Params: nil,
|
||||||
|
}
|
||||||
|
ret, err := vmi.ApplyImplicitMessage(ctx, cronMsg)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if em != nil {
|
||||||
|
if err := em.MessageApplied(ctx, ts, cronMsg.Cid(), cronMsg, ret, true); err != nil {
|
||||||
|
return xerrors.Errorf("callback failed on cron message: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ret.ExitCode != 0 {
|
||||||
|
return xerrors.Errorf("CheckProofSubmissions exit was non-zero: %d", ret.ExitCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := parentEpoch; i < epoch; i++ {
|
||||||
|
if i > parentEpoch {
|
||||||
|
// run cron for null rounds if any
|
||||||
|
if err := runCron(i); err != nil {
|
||||||
|
return cid.Undef, cid.Undef, err
|
||||||
|
}
|
||||||
|
|
||||||
|
pstate, err = vmi.Flush(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, cid.Undef, xerrors.Errorf("flushing vm: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// handle state forks
|
||||||
|
// XXX: The state tree
|
||||||
|
newState, err := sm.HandleStateForks(ctx, pstate, i, em, ts)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, cid.Undef, xerrors.Errorf("error handling state forks: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if pstate != newState {
|
||||||
|
vmi, err = makeVmWithBaseState(newState)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, cid.Undef, xerrors.Errorf("making vm: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
vmi.SetBlockHeight(i + 1)
|
||||||
|
pstate = newState
|
||||||
|
}
|
||||||
|
|
||||||
|
partDone()
|
||||||
|
partDone = metrics.Timer(ctx, metrics.VMApplyMessages)
|
||||||
|
|
||||||
|
var receipts []cbg.CBORMarshaler
|
||||||
|
processedMsgs := make(map[cid.Cid]struct{})
|
||||||
|
for _, b := range bms {
|
||||||
|
penalty := types.NewInt(0)
|
||||||
|
gasReward := big.Zero()
|
||||||
|
|
||||||
|
for _, cm := range append(b.BlsMessages, b.SecpkMessages...) {
|
||||||
|
m := cm.VMMessage()
|
||||||
|
if _, found := processedMsgs[m.Cid()]; found {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
r, err := vmi.ApplyMessage(ctx, cm)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, cid.Undef, err
|
||||||
|
}
|
||||||
|
|
||||||
|
receipts = append(receipts, &r.MessageReceipt)
|
||||||
|
gasReward = big.Add(gasReward, r.GasCosts.MinerTip)
|
||||||
|
penalty = big.Add(penalty, r.GasCosts.MinerPenalty)
|
||||||
|
|
||||||
|
if em != nil {
|
||||||
|
if err := em.MessageApplied(ctx, ts, cm.Cid(), m, r, false); err != nil {
|
||||||
|
return cid.Undef, cid.Undef, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
processedMsgs[m.Cid()] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
params, err := actors.SerializeParams(&reward.AwardBlockRewardParams{
|
||||||
|
Miner: b.Miner,
|
||||||
|
Penalty: penalty,
|
||||||
|
GasReward: gasReward,
|
||||||
|
WinCount: b.WinCount,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, cid.Undef, xerrors.Errorf("failed to serialize award params: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rwMsg := &types.Message{
|
||||||
|
From: builtin.SystemActorAddr,
|
||||||
|
To: reward.Address,
|
||||||
|
Nonce: uint64(epoch),
|
||||||
|
Value: types.NewInt(0),
|
||||||
|
GasFeeCap: types.NewInt(0),
|
||||||
|
GasPremium: types.NewInt(0),
|
||||||
|
GasLimit: 1 << 30,
|
||||||
|
Method: reward.Methods.AwardBlockReward,
|
||||||
|
Params: params,
|
||||||
|
}
|
||||||
|
ret, actErr := vmi.ApplyImplicitMessage(ctx, rwMsg)
|
||||||
|
if actErr != nil {
|
||||||
|
return cid.Undef, cid.Undef, xerrors.Errorf("failed to apply reward message for miner %s: %w", b.Miner, actErr)
|
||||||
|
}
|
||||||
|
if em != nil {
|
||||||
|
if err := em.MessageApplied(ctx, ts, rwMsg.Cid(), rwMsg, ret, true); err != nil {
|
||||||
|
return cid.Undef, cid.Undef, xerrors.Errorf("callback failed on reward message: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ret.ExitCode != 0 {
|
||||||
|
return cid.Undef, cid.Undef, xerrors.Errorf("reward application message failed (exit %d): %s", ret.ExitCode, ret.ActorErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
partDone()
|
||||||
|
partDone = metrics.Timer(ctx, metrics.VMApplyCron)
|
||||||
|
|
||||||
|
if err := runCron(epoch); err != nil {
|
||||||
|
return cid.Cid{}, cid.Cid{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
partDone()
|
||||||
|
partDone = metrics.Timer(ctx, metrics.VMApplyFlush)
|
||||||
|
|
||||||
|
rectarr := blockadt.MakeEmptyArray(sm.ChainStore().ActorStore(ctx))
|
||||||
|
for i, receipt := range receipts {
|
||||||
|
if err := rectarr.Set(uint64(i), receipt); err != nil {
|
||||||
|
return cid.Undef, cid.Undef, xerrors.Errorf("failed to build receipts amt: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rectroot, err := rectarr.Root()
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, cid.Undef, xerrors.Errorf("failed to build receipts amt: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
st, err := vmi.Flush(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, cid.Undef, xerrors.Errorf("vm flush failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.Record(ctx, metrics.VMSends.M(int64(atomic.LoadUint64(&vm.StatSends))),
|
||||||
|
metrics.VMApplied.M(int64(atomic.LoadUint64(&vm.StatApplied))))
|
||||||
|
|
||||||
|
return st, rectroot, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TipSetExecutor) ExecuteTipSet(ctx context.Context, sm *stmgr.StateManager, ts *types.TipSet, em stmgr.ExecMonitor) (stateroot cid.Cid, rectsroot cid.Cid, err error) {
|
||||||
|
ctx, span := trace.StartSpan(ctx, "computeTipSetState")
|
||||||
|
defer span.End()
|
||||||
|
|
||||||
|
blks := ts.Blocks()
|
||||||
|
|
||||||
|
for i := 0; i < len(blks); i++ {
|
||||||
|
for j := i + 1; j < len(blks); j++ {
|
||||||
|
if blks[i].Miner == blks[j].Miner {
|
||||||
|
return cid.Undef, cid.Undef,
|
||||||
|
xerrors.Errorf("duplicate miner in a tipset (%s %s)",
|
||||||
|
blks[i].Miner, blks[j].Miner)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var parentEpoch abi.ChainEpoch
|
||||||
|
pstate := blks[0].ParentStateRoot
|
||||||
|
if blks[0].Height > 0 {
|
||||||
|
parent, err := sm.ChainStore().GetBlock(blks[0].Parents[0])
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, cid.Undef, xerrors.Errorf("getting parent block: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
parentEpoch = parent.Height
|
||||||
|
}
|
||||||
|
|
||||||
|
r := store.NewChainRand(sm.ChainStore(), ts.Cids())
|
||||||
|
|
||||||
|
blkmsgs, err := sm.ChainStore().BlockMsgsForTipset(ts)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, cid.Undef, xerrors.Errorf("getting block messages for tipset: %w", err)
|
||||||
|
}
|
||||||
|
fbmsgs := make([]FilecoinBlockMessages, len(blkmsgs))
|
||||||
|
for i := range fbmsgs {
|
||||||
|
fbmsgs[i].BlockMessages = blkmsgs[i]
|
||||||
|
fbmsgs[i].WinCount = ts.Blocks()[i].ElectionProof.WinCount
|
||||||
|
}
|
||||||
|
baseFee := blks[0].ParentBaseFee
|
||||||
|
|
||||||
|
return t.ApplyBlocks(ctx, sm, parentEpoch, pstate, fbmsgs, blks[0].Height, r, em, baseFee, ts)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ stmgr.Executor = &TipSetExecutor{}
|
847
chain/consensus/filcns/filecoin.go
Normal file
847
chain/consensus/filcns/filecoin.go
Normal file
@ -0,0 +1,847 @@
|
|||||||
|
package filcns
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/go-multierror"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
cbor "github.com/ipfs/go-ipld-cbor"
|
||||||
|
logging "github.com/ipfs/go-log/v2"
|
||||||
|
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||||
|
cbg "github.com/whyrusleeping/cbor-gen"
|
||||||
|
"go.opencensus.io/stats"
|
||||||
|
"go.opencensus.io/trace"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
|
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||||
|
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
|
||||||
|
|
||||||
|
bstore "github.com/filecoin-project/lotus/blockstore"
|
||||||
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
"github.com/filecoin-project/lotus/chain"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
|
||||||
|
"github.com/filecoin-project/lotus/chain/beacon"
|
||||||
|
"github.com/filecoin-project/lotus/chain/consensus"
|
||||||
|
"github.com/filecoin-project/lotus/chain/state"
|
||||||
|
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||||
|
"github.com/filecoin-project/lotus/chain/store"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/chain/vm"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||||
|
"github.com/filecoin-project/lotus/lib/async"
|
||||||
|
"github.com/filecoin-project/lotus/lib/sigs"
|
||||||
|
"github.com/filecoin-project/lotus/metrics"
|
||||||
|
)
|
||||||
|
|
||||||
|
var log = logging.Logger("fil-consensus")
|
||||||
|
|
||||||
|
type FilecoinEC struct {
|
||||||
|
// The interface for accessing and putting tipsets into local storage
|
||||||
|
store *store.ChainStore
|
||||||
|
|
||||||
|
// handle to the random beacon for verification
|
||||||
|
beacon beacon.Schedule
|
||||||
|
|
||||||
|
// the state manager handles making state queries
|
||||||
|
sm *stmgr.StateManager
|
||||||
|
|
||||||
|
verifier ffiwrapper.Verifier
|
||||||
|
|
||||||
|
genesis *types.TipSet
|
||||||
|
}
|
||||||
|
|
||||||
|
// Blocks that are more than MaxHeightDrift epochs above
|
||||||
|
// the theoretical max height based on systime are quickly rejected
|
||||||
|
const MaxHeightDrift = 5
|
||||||
|
|
||||||
|
func NewFilecoinExpectedConsensus(sm *stmgr.StateManager, beacon beacon.Schedule, verifier ffiwrapper.Verifier, genesis chain.Genesis) consensus.Consensus {
|
||||||
|
if build.InsecurePoStValidation {
|
||||||
|
log.Warn("*********************************************************************************************")
|
||||||
|
log.Warn(" [INSECURE-POST-VALIDATION] Insecure test validation is enabled. If you see this outside of a test, it is a severe bug! ")
|
||||||
|
log.Warn("*********************************************************************************************")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &FilecoinEC{
|
||||||
|
store: sm.ChainStore(),
|
||||||
|
beacon: beacon,
|
||||||
|
sm: sm,
|
||||||
|
verifier: verifier,
|
||||||
|
genesis: genesis,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock) (err error) {
|
||||||
|
if err := blockSanityChecks(b.Header); err != nil {
|
||||||
|
return xerrors.Errorf("incoming header failed basic sanity checks: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
h := b.Header
|
||||||
|
|
||||||
|
baseTs, err := filec.store.LoadTipSet(types.NewTipSetKey(h.Parents...))
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("load parent tipset failed (%s): %w", h.Parents, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
winPoStNv := filec.sm.GetNtwkVersion(ctx, baseTs.Height())
|
||||||
|
|
||||||
|
lbts, lbst, err := stmgr.GetLookbackTipSetForRound(ctx, filec.sm, baseTs, h.Height)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to get lookback tipset for block: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
prevBeacon, err := filec.store.GetLatestBeaconEntry(baseTs)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to get latest beacon entry: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// fast checks first
|
||||||
|
if h.Height <= baseTs.Height() {
|
||||||
|
return xerrors.Errorf("block height not greater than parent height: %d != %d", h.Height, baseTs.Height())
|
||||||
|
}
|
||||||
|
|
||||||
|
nulls := h.Height - (baseTs.Height() + 1)
|
||||||
|
if tgtTs := baseTs.MinTimestamp() + build.BlockDelaySecs*uint64(nulls+1); h.Timestamp != tgtTs {
|
||||||
|
return xerrors.Errorf("block has wrong timestamp: %d != %d", h.Timestamp, tgtTs)
|
||||||
|
}
|
||||||
|
|
||||||
|
now := uint64(build.Clock.Now().Unix())
|
||||||
|
if h.Timestamp > now+build.AllowableClockDriftSecs {
|
||||||
|
return xerrors.Errorf("block was from the future (now=%d, blk=%d): %w", now, h.Timestamp, consensus.ErrTemporal)
|
||||||
|
}
|
||||||
|
if h.Timestamp > now {
|
||||||
|
log.Warn("Got block from the future, but within threshold", h.Timestamp, build.Clock.Now().Unix())
|
||||||
|
}
|
||||||
|
|
||||||
|
msgsCheck := async.Err(func() error {
|
||||||
|
if b.Cid() == build.WhitelistedBlock {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := filec.checkBlockMessages(ctx, b, baseTs); err != nil {
|
||||||
|
return xerrors.Errorf("block had invalid messages: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
minerCheck := async.Err(func() error {
|
||||||
|
if err := filec.minerIsValid(ctx, h.Miner, baseTs); err != nil {
|
||||||
|
return xerrors.Errorf("minerIsValid failed: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
baseFeeCheck := async.Err(func() error {
|
||||||
|
baseFee, err := filec.store.ComputeBaseFee(ctx, baseTs)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("computing base fee: %w", err)
|
||||||
|
}
|
||||||
|
if types.BigCmp(baseFee, b.Header.ParentBaseFee) != 0 {
|
||||||
|
return xerrors.Errorf("base fee doesn't match: %s (header) != %s (computed)",
|
||||||
|
b.Header.ParentBaseFee, baseFee)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
pweight, err := filec.store.Weight(ctx, baseTs)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting parent weight: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if types.BigCmp(pweight, b.Header.ParentWeight) != 0 {
|
||||||
|
return xerrors.Errorf("parrent weight different: %s (header) != %s (computed)",
|
||||||
|
b.Header.ParentWeight, pweight)
|
||||||
|
}
|
||||||
|
|
||||||
|
stateRootCheck := async.Err(func() error {
|
||||||
|
stateroot, precp, err := filec.sm.TipSetState(ctx, baseTs)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("get tipsetstate(%d, %s) failed: %w", h.Height, h.Parents, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if stateroot != h.ParentStateRoot {
|
||||||
|
msgs, err := filec.store.MessagesForTipset(baseTs)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("failed to load messages for tipset during tipset state mismatch error: ", err)
|
||||||
|
} else {
|
||||||
|
log.Warn("Messages for tipset with mismatching state:")
|
||||||
|
for i, m := range msgs {
|
||||||
|
mm := m.VMMessage()
|
||||||
|
log.Warnf("Message[%d]: from=%s to=%s method=%d params=%x", i, mm.From, mm.To, mm.Method, mm.Params)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return xerrors.Errorf("parent state root did not match computed state (%s != %s)", stateroot, h.ParentStateRoot)
|
||||||
|
}
|
||||||
|
|
||||||
|
if precp != h.ParentMessageReceipts {
|
||||||
|
return xerrors.Errorf("parent receipts root did not match computed value (%s != %s)", precp, h.ParentMessageReceipts)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
// Stuff that needs worker address
|
||||||
|
waddr, err := stmgr.GetMinerWorkerRaw(ctx, filec.sm, lbst, h.Miner)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("GetMinerWorkerRaw failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
winnerCheck := async.Err(func() error {
|
||||||
|
if h.ElectionProof.WinCount < 1 {
|
||||||
|
return xerrors.Errorf("block is not claiming to be a winner")
|
||||||
|
}
|
||||||
|
|
||||||
|
eligible, err := stmgr.MinerEligibleToMine(ctx, filec.sm, h.Miner, baseTs, lbts)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("determining if miner has min power failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !eligible {
|
||||||
|
return xerrors.New("block's miner is ineligible to mine")
|
||||||
|
}
|
||||||
|
|
||||||
|
rBeacon := *prevBeacon
|
||||||
|
if len(h.BeaconEntries) != 0 {
|
||||||
|
rBeacon = h.BeaconEntries[len(h.BeaconEntries)-1]
|
||||||
|
}
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
if err := h.Miner.MarshalCBOR(buf); err != nil {
|
||||||
|
return xerrors.Errorf("failed to marshal miner address to cbor: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
vrfBase, err := store.DrawRandomness(rBeacon.Data, crypto.DomainSeparationTag_ElectionProofProduction, h.Height, buf.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("could not draw randomness: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := VerifyElectionPoStVRF(ctx, waddr, vrfBase, h.ElectionProof.VRFProof); err != nil {
|
||||||
|
return xerrors.Errorf("validating block election proof failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
slashed, err := stmgr.GetMinerSlashed(ctx, filec.sm, baseTs, h.Miner)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to check if block miner was slashed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if slashed {
|
||||||
|
return xerrors.Errorf("received block was from slashed or invalid miner")
|
||||||
|
}
|
||||||
|
|
||||||
|
mpow, tpow, _, err := stmgr.GetPowerRaw(ctx, filec.sm, lbst, h.Miner)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed getting power: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
j := h.ElectionProof.ComputeWinCount(mpow.QualityAdjPower, tpow.QualityAdjPower)
|
||||||
|
if h.ElectionProof.WinCount != j {
|
||||||
|
return xerrors.Errorf("miner claims wrong number of wins: miner: %d, computed: %d", h.ElectionProof.WinCount, j)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
blockSigCheck := async.Err(func() error {
|
||||||
|
if err := sigs.CheckBlockSignature(ctx, h, waddr); err != nil {
|
||||||
|
return xerrors.Errorf("check block signature failed: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
beaconValuesCheck := async.Err(func() error {
|
||||||
|
if os.Getenv("LOTUS_IGNORE_DRAND") == "_yes_" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := beacon.ValidateBlockValues(filec.beacon, h, baseTs.Height(), *prevBeacon); err != nil {
|
||||||
|
return xerrors.Errorf("failed to validate blocks random beacon values: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
tktsCheck := async.Err(func() error {
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
if err := h.Miner.MarshalCBOR(buf); err != nil {
|
||||||
|
return xerrors.Errorf("failed to marshal miner address to cbor: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if h.Height > build.UpgradeSmokeHeight {
|
||||||
|
buf.Write(baseTs.MinTicket().VRFProof)
|
||||||
|
}
|
||||||
|
|
||||||
|
beaconBase := *prevBeacon
|
||||||
|
if len(h.BeaconEntries) != 0 {
|
||||||
|
beaconBase = h.BeaconEntries[len(h.BeaconEntries)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
vrfBase, err := store.DrawRandomness(beaconBase.Data, crypto.DomainSeparationTag_TicketProduction, h.Height-build.TicketRandomnessLookback, buf.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to compute vrf base for ticket: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = VerifyElectionPoStVRF(ctx, waddr, vrfBase, h.Ticket.VRFProof)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("validating block tickets failed: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
wproofCheck := async.Err(func() error {
|
||||||
|
if err := filec.VerifyWinningPoStProof(ctx, winPoStNv, h, *prevBeacon, lbst, waddr); err != nil {
|
||||||
|
return xerrors.Errorf("invalid election post: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
await := []async.ErrorFuture{
|
||||||
|
minerCheck,
|
||||||
|
tktsCheck,
|
||||||
|
blockSigCheck,
|
||||||
|
beaconValuesCheck,
|
||||||
|
wproofCheck,
|
||||||
|
winnerCheck,
|
||||||
|
msgsCheck,
|
||||||
|
baseFeeCheck,
|
||||||
|
stateRootCheck,
|
||||||
|
}
|
||||||
|
|
||||||
|
var merr error
|
||||||
|
for _, fut := range await {
|
||||||
|
if err := fut.AwaitContext(ctx); err != nil {
|
||||||
|
merr = multierror.Append(merr, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if merr != nil {
|
||||||
|
mulErr := merr.(*multierror.Error)
|
||||||
|
mulErr.ErrorFormat = func(es []error) string {
|
||||||
|
if len(es) == 1 {
|
||||||
|
return fmt.Sprintf("1 error occurred:\n\t* %+v\n\n", es[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
points := make([]string, len(es))
|
||||||
|
for i, err := range es {
|
||||||
|
points[i] = fmt.Sprintf("* %+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf(
|
||||||
|
"%d errors occurred:\n\t%s\n\n",
|
||||||
|
len(es), strings.Join(points, "\n\t"))
|
||||||
|
}
|
||||||
|
return mulErr
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func blockSanityChecks(h *types.BlockHeader) error {
|
||||||
|
if h.ElectionProof == nil {
|
||||||
|
return xerrors.Errorf("block cannot have nil election proof")
|
||||||
|
}
|
||||||
|
|
||||||
|
if h.Ticket == nil {
|
||||||
|
return xerrors.Errorf("block cannot have nil ticket")
|
||||||
|
}
|
||||||
|
|
||||||
|
if h.BlockSig == nil {
|
||||||
|
return xerrors.Errorf("block had nil signature")
|
||||||
|
}
|
||||||
|
|
||||||
|
if h.BLSAggregate == nil {
|
||||||
|
return xerrors.Errorf("block had nil bls aggregate signature")
|
||||||
|
}
|
||||||
|
|
||||||
|
if h.Miner.Protocol() != address.ID {
|
||||||
|
return xerrors.Errorf("block had non-ID miner address")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (filec *FilecoinEC) VerifyWinningPoStProof(ctx context.Context, nv network.Version, h *types.BlockHeader, prevBeacon types.BeaconEntry, lbst cid.Cid, waddr address.Address) error {
|
||||||
|
if build.InsecurePoStValidation {
|
||||||
|
if len(h.WinPoStProof) == 0 {
|
||||||
|
return xerrors.Errorf("[INSECURE-POST-VALIDATION] No winning post proof given")
|
||||||
|
}
|
||||||
|
|
||||||
|
if string(h.WinPoStProof[0].ProofBytes) == "valid proof" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return xerrors.Errorf("[INSECURE-POST-VALIDATION] winning post was invalid")
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
if err := h.Miner.MarshalCBOR(buf); err != nil {
|
||||||
|
return xerrors.Errorf("failed to marshal miner address: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rbase := prevBeacon
|
||||||
|
if len(h.BeaconEntries) > 0 {
|
||||||
|
rbase = h.BeaconEntries[len(h.BeaconEntries)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
rand, err := store.DrawRandomness(rbase.Data, crypto.DomainSeparationTag_WinningPoStChallengeSeed, h.Height, buf.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to get randomness for verifying winning post proof: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
mid, err := address.IDFromAddress(h.Miner)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to get ID from miner address %s: %w", h.Miner, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sectors, err := stmgr.GetSectorsForWinningPoSt(ctx, nv, filec.verifier, filec.sm, lbst, h.Miner, rand)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting winning post sector set: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ok, err := ffiwrapper.ProofVerifier.VerifyWinningPoSt(ctx, proof2.WinningPoStVerifyInfo{
|
||||||
|
Randomness: rand,
|
||||||
|
Proofs: h.WinPoStProof,
|
||||||
|
ChallengedSectors: sectors,
|
||||||
|
Prover: abi.ActorID(mid),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to verify election post: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
log.Errorf("invalid winning post (block: %s, %x; %v)", h.Cid(), rand, sectors)
|
||||||
|
return xerrors.Errorf("winning post was invalid")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: We should extract this somewhere else and make the message pool and miner use the same logic
|
||||||
|
func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBlock, baseTs *types.TipSet) error {
|
||||||
|
{
|
||||||
|
var sigCids []cid.Cid // this is what we get for people not wanting the marshalcbor method on the cid type
|
||||||
|
var pubks [][]byte
|
||||||
|
|
||||||
|
for _, m := range b.BlsMessages {
|
||||||
|
sigCids = append(sigCids, m.Cid())
|
||||||
|
|
||||||
|
pubk, err := filec.sm.GetBlsPublicKey(ctx, m.From, baseTs)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to load bls public to validate block: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pubks = append(pubks, pubk)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := consensus.VerifyBlsAggregate(ctx, b.Header.BLSAggregate, sigCids, pubks); err != nil {
|
||||||
|
return xerrors.Errorf("bls aggregate signature was invalid: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
nonces := make(map[address.Address]uint64)
|
||||||
|
|
||||||
|
stateroot, _, err := filec.sm.TipSetState(ctx, baseTs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
st, err := state.LoadStateTree(filec.store.ActorStore(ctx), stateroot)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to load base state tree: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
nv := filec.sm.GetNtwkVersion(ctx, b.Header.Height)
|
||||||
|
pl := vm.PricelistByEpoch(baseTs.Height())
|
||||||
|
var sumGasLimit int64
|
||||||
|
checkMsg := func(msg types.ChainMsg) error {
|
||||||
|
m := msg.VMMessage()
|
||||||
|
|
||||||
|
// Phase 1: syntactic validation, as defined in the spec
|
||||||
|
minGas := pl.OnChainMessage(msg.ChainLength())
|
||||||
|
if err := m.ValidForBlockInclusion(minGas.Total(), nv); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidForBlockInclusion checks if any single message does not exceed BlockGasLimit
|
||||||
|
// So below is overflow safe
|
||||||
|
sumGasLimit += m.GasLimit
|
||||||
|
if sumGasLimit > build.BlockGasLimit {
|
||||||
|
return xerrors.Errorf("block gas limit exceeded")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Phase 2: (Partial) semantic validation:
|
||||||
|
// the sender exists and is an account actor, and the nonces make sense
|
||||||
|
var sender address.Address
|
||||||
|
if filec.sm.GetNtwkVersion(ctx, b.Header.Height) >= network.Version13 {
|
||||||
|
sender, err = st.LookupID(m.From)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
sender = m.From
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := nonces[sender]; !ok {
|
||||||
|
// `GetActor` does not validate that this is an account actor.
|
||||||
|
act, err := st.GetActor(sender)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to get actor: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !builtin.IsAccountActor(act.Code) {
|
||||||
|
return xerrors.New("Sender must be an account actor")
|
||||||
|
}
|
||||||
|
nonces[sender] = act.Nonce
|
||||||
|
}
|
||||||
|
|
||||||
|
if nonces[sender] != m.Nonce {
|
||||||
|
return xerrors.Errorf("wrong nonce (exp: %d, got: %d)", nonces[sender], m.Nonce)
|
||||||
|
}
|
||||||
|
nonces[sender]++
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate message arrays in a temporary blockstore.
|
||||||
|
tmpbs := bstore.NewMemory()
|
||||||
|
tmpstore := blockadt.WrapStore(ctx, cbor.NewCborStore(tmpbs))
|
||||||
|
|
||||||
|
bmArr := blockadt.MakeEmptyArray(tmpstore)
|
||||||
|
for i, m := range b.BlsMessages {
|
||||||
|
if err := checkMsg(m); err != nil {
|
||||||
|
return xerrors.Errorf("block had invalid bls message at index %d: %w", i, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := store.PutMessage(tmpbs, m)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
k := cbg.CborCid(c)
|
||||||
|
if err := bmArr.Set(uint64(i), &k); err != nil {
|
||||||
|
return xerrors.Errorf("failed to put bls message at index %d: %w", i, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
smArr := blockadt.MakeEmptyArray(tmpstore)
|
||||||
|
for i, m := range b.SecpkMessages {
|
||||||
|
if err := checkMsg(m); err != nil {
|
||||||
|
return xerrors.Errorf("block had invalid secpk message at index %d: %w", i, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// `From` being an account actor is only validated inside the `vm.ResolveToKeyAddr` call
|
||||||
|
// in `StateManager.ResolveToKeyAddress` here (and not in `checkMsg`).
|
||||||
|
kaddr, err := filec.sm.ResolveToKeyAddress(ctx, m.Message.From, baseTs)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to resolve key addr: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := sigs.Verify(&m.Signature, kaddr, m.Message.Cid().Bytes()); err != nil {
|
||||||
|
return xerrors.Errorf("secpk message %s has invalid signature: %w", m.Cid(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := store.PutMessage(tmpbs, m)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err)
|
||||||
|
}
|
||||||
|
k := cbg.CborCid(c)
|
||||||
|
if err := smArr.Set(uint64(i), &k); err != nil {
|
||||||
|
return xerrors.Errorf("failed to put secpk message at index %d: %w", i, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bmroot, err := bmArr.Root()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
smroot, err := smArr.Root()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
mrcid, err := tmpstore.Put(ctx, &types.MsgMeta{
|
||||||
|
BlsMessages: bmroot,
|
||||||
|
SecpkMessages: smroot,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.Header.Messages != mrcid {
|
||||||
|
return fmt.Errorf("messages didnt match message root in header")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finally, flush.
|
||||||
|
return vm.Copy(ctx, tmpbs, filec.store.ChainBlockstore(), mrcid)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (filec *FilecoinEC) IsEpochBeyondCurrMax(epoch abi.ChainEpoch) bool {
|
||||||
|
if filec.genesis == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
now := uint64(build.Clock.Now().Unix())
|
||||||
|
return epoch > (abi.ChainEpoch((now-filec.genesis.MinTimestamp())/build.BlockDelaySecs) + MaxHeightDrift)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (filec *FilecoinEC) minerIsValid(ctx context.Context, maddr address.Address, baseTs *types.TipSet) error {
|
||||||
|
act, err := filec.sm.LoadActor(ctx, power.Address, baseTs)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to load power actor: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
powState, err := power.Load(filec.store.ActorStore(ctx), act)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to load power actor state: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, exist, err := powState.MinerPower(maddr)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to look up miner's claim: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !exist {
|
||||||
|
return xerrors.New("miner isn't valid")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func VerifyElectionPoStVRF(ctx context.Context, worker address.Address, rand []byte, evrf []byte) error {
|
||||||
|
return VerifyVRF(ctx, worker, rand, evrf)
|
||||||
|
}
|
||||||
|
|
||||||
|
func VerifyVRF(ctx context.Context, worker address.Address, vrfBase, vrfproof []byte) error {
|
||||||
|
_, span := trace.StartSpan(ctx, "VerifyVRF")
|
||||||
|
defer span.End()
|
||||||
|
|
||||||
|
sig := &crypto.Signature{
|
||||||
|
Type: crypto.SigTypeBLS,
|
||||||
|
Data: vrfproof,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := sigs.Verify(sig, worker, vrfBase); err != nil {
|
||||||
|
return xerrors.Errorf("vrf was invalid: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var ErrSoftFailure = errors.New("soft validation failure")
|
||||||
|
var ErrInsufficientPower = errors.New("incoming block's miner does not have minimum power")
|
||||||
|
|
||||||
|
func (filec *FilecoinEC) ValidateBlockPubsub(ctx context.Context, self bool, msg *pubsub.Message) (pubsub.ValidationResult, string) {
|
||||||
|
if self {
|
||||||
|
return filec.validateLocalBlock(ctx, msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// track validation time
|
||||||
|
begin := build.Clock.Now()
|
||||||
|
defer func() {
|
||||||
|
log.Debugf("block validation time: %s", build.Clock.Since(begin))
|
||||||
|
}()
|
||||||
|
|
||||||
|
stats.Record(ctx, metrics.BlockReceived.M(1))
|
||||||
|
|
||||||
|
recordFailureFlagPeer := func(what string) {
|
||||||
|
// bv.Validate will flag the peer in that case
|
||||||
|
panic(what)
|
||||||
|
}
|
||||||
|
|
||||||
|
blk, what, err := filec.decodeAndCheckBlock(msg)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("got invalid block over pubsub: ", err)
|
||||||
|
recordFailureFlagPeer(what)
|
||||||
|
return pubsub.ValidationReject, what
|
||||||
|
}
|
||||||
|
|
||||||
|
// validate the block meta: the Message CID in the header must match the included messages
|
||||||
|
err = filec.validateMsgMeta(ctx, blk)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("error validating message metadata: %s", err)
|
||||||
|
recordFailureFlagPeer("invalid_block_meta")
|
||||||
|
return pubsub.ValidationReject, "invalid_block_meta"
|
||||||
|
}
|
||||||
|
|
||||||
|
reject, err := filec.validateBlockHeader(ctx, blk.Header)
|
||||||
|
if err != nil {
|
||||||
|
if reject == "" {
|
||||||
|
log.Warn("ignoring block msg: ", err)
|
||||||
|
return pubsub.ValidationIgnore, reject
|
||||||
|
}
|
||||||
|
recordFailureFlagPeer(reject)
|
||||||
|
return pubsub.ValidationReject, reject
|
||||||
|
}
|
||||||
|
|
||||||
|
// all good, accept the block
|
||||||
|
msg.ValidatorData = blk
|
||||||
|
stats.Record(ctx, metrics.BlockValidationSuccess.M(1))
|
||||||
|
return pubsub.ValidationAccept, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (filec *FilecoinEC) validateLocalBlock(ctx context.Context, msg *pubsub.Message) (pubsub.ValidationResult, string) {
|
||||||
|
stats.Record(ctx, metrics.BlockPublished.M(1))
|
||||||
|
|
||||||
|
if size := msg.Size(); size > 1<<20-1<<15 {
|
||||||
|
log.Errorf("ignoring oversize block (%dB)", size)
|
||||||
|
return pubsub.ValidationIgnore, "oversize_block"
|
||||||
|
}
|
||||||
|
|
||||||
|
blk, what, err := filec.decodeAndCheckBlock(msg)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("got invalid local block: %s", err)
|
||||||
|
return pubsub.ValidationIgnore, what
|
||||||
|
}
|
||||||
|
|
||||||
|
msg.ValidatorData = blk
|
||||||
|
stats.Record(ctx, metrics.BlockValidationSuccess.M(1))
|
||||||
|
return pubsub.ValidationAccept, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (filec *FilecoinEC) decodeAndCheckBlock(msg *pubsub.Message) (*types.BlockMsg, string, error) {
|
||||||
|
blk, err := types.DecodeBlockMsg(msg.GetData())
|
||||||
|
if err != nil {
|
||||||
|
return nil, "invalid", xerrors.Errorf("error decoding block: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count := len(blk.BlsMessages) + len(blk.SecpkMessages); count > build.BlockMessageLimit {
|
||||||
|
return nil, "too_many_messages", fmt.Errorf("block contains too many messages (%d)", count)
|
||||||
|
}
|
||||||
|
|
||||||
|
// make sure we have a signature
|
||||||
|
if blk.Header.BlockSig == nil {
|
||||||
|
return nil, "missing_signature", fmt.Errorf("block without a signature")
|
||||||
|
}
|
||||||
|
|
||||||
|
return blk, "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (filec *FilecoinEC) validateMsgMeta(ctx context.Context, msg *types.BlockMsg) error {
|
||||||
|
// TODO there has to be a simpler way to do this without the blockstore dance
|
||||||
|
// block headers use adt0
|
||||||
|
store := blockadt.WrapStore(ctx, cbor.NewCborStore(bstore.NewMemory()))
|
||||||
|
bmArr := blockadt.MakeEmptyArray(store)
|
||||||
|
smArr := blockadt.MakeEmptyArray(store)
|
||||||
|
|
||||||
|
for i, m := range msg.BlsMessages {
|
||||||
|
c := cbg.CborCid(m)
|
||||||
|
if err := bmArr.Set(uint64(i), &c); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, m := range msg.SecpkMessages {
|
||||||
|
c := cbg.CborCid(m)
|
||||||
|
if err := smArr.Set(uint64(i), &c); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bmroot, err := bmArr.Root()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
smroot, err := smArr.Root()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
mrcid, err := store.Put(store.Context(), &types.MsgMeta{
|
||||||
|
BlsMessages: bmroot,
|
||||||
|
SecpkMessages: smroot,
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if msg.Header.Messages != mrcid {
|
||||||
|
return fmt.Errorf("messages didn't match root cid in header")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (filec *FilecoinEC) validateBlockHeader(ctx context.Context, b *types.BlockHeader) (rejectReason string, err error) {
|
||||||
|
|
||||||
|
// we want to ensure that it is a block from a known miner; we reject blocks from unknown miners
|
||||||
|
// to prevent spam attacks.
|
||||||
|
// the logic works as follows: we lookup the miner in the chain for its key.
|
||||||
|
// if we can find it then it's a known miner and we can validate the signature.
|
||||||
|
// if we can't find it, we check whether we are (near) synced in the chain.
|
||||||
|
// if we are not synced we cannot validate the block and we must ignore it.
|
||||||
|
// if we are synced and the miner is unknown, then the block is rejcected.
|
||||||
|
key, err := filec.checkPowerAndGetWorkerKey(ctx, b)
|
||||||
|
if err != nil {
|
||||||
|
if err != ErrSoftFailure && filec.isChainNearSynced() {
|
||||||
|
log.Warnf("received block from unknown miner or miner that doesn't meet min power over pubsub; rejecting message")
|
||||||
|
return "unknown_miner", err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Warnf("cannot validate block message; unknown miner or miner that doesn't meet min power in unsynced chain: %s", b.Cid())
|
||||||
|
return "", err // ignore
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.ElectionProof.WinCount < 1 {
|
||||||
|
log.Errorf("block is not claiming to be winning")
|
||||||
|
return "not_winning", xerrors.Errorf("block not winning")
|
||||||
|
}
|
||||||
|
|
||||||
|
err = sigs.CheckBlockSignature(ctx, b, key)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("block signature verification failed: %s", err)
|
||||||
|
return "signature_verification_failed", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (filec *FilecoinEC) checkPowerAndGetWorkerKey(ctx context.Context, bh *types.BlockHeader) (address.Address, error) {
|
||||||
|
// we check that the miner met the minimum power at the lookback tipset
|
||||||
|
|
||||||
|
baseTs := filec.store.GetHeaviestTipSet()
|
||||||
|
lbts, lbst, err := stmgr.GetLookbackTipSetForRound(ctx, filec.sm, baseTs, bh.Height)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("failed to load lookback tipset for incoming block: %s", err)
|
||||||
|
return address.Undef, ErrSoftFailure
|
||||||
|
}
|
||||||
|
|
||||||
|
key, err := stmgr.GetMinerWorkerRaw(ctx, filec.sm, lbst, bh.Miner)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("failed to resolve worker key for miner %s: %s", bh.Miner, err)
|
||||||
|
return address.Undef, ErrSoftFailure
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: we check to see if the miner was eligible in the lookback
|
||||||
|
// tipset - 1 for historical reasons. DO NOT use the lookback state
|
||||||
|
// returned by GetLookbackTipSetForRound.
|
||||||
|
|
||||||
|
eligible, err := stmgr.MinerEligibleToMine(ctx, filec.sm, bh.Miner, baseTs, lbts)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("failed to determine if incoming block's miner has minimum power: %s", err)
|
||||||
|
return address.Undef, ErrSoftFailure
|
||||||
|
}
|
||||||
|
|
||||||
|
if !eligible {
|
||||||
|
log.Warnf("incoming block's miner is ineligible")
|
||||||
|
return address.Undef, ErrInsufficientPower
|
||||||
|
}
|
||||||
|
|
||||||
|
return key, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (filec *FilecoinEC) isChainNearSynced() bool {
|
||||||
|
ts := filec.store.GetHeaviestTipSet()
|
||||||
|
timestamp := ts.MinTimestamp()
|
||||||
|
timestampTime := time.Unix(int64(timestamp), 0)
|
||||||
|
return build.Clock.Since(timestampTime) < 6*time.Hour
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ consensus.Consensus = &FilecoinEC{}
|
@ -1,38 +1,36 @@
|
|||||||
package gen
|
package filcns
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/crypto"
|
"github.com/ipfs/go-cid"
|
||||||
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
|
|
||||||
cid "github.com/ipfs/go-cid"
|
|
||||||
cbg "github.com/whyrusleeping/cbor-gen"
|
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
ffi "github.com/filecoin-project/filecoin-ffi"
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/chain/consensus"
|
||||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w api.Wallet, bt *api.BlockTemplate) (*types.FullBlock, error) {
|
func (filec *FilecoinEC) CreateBlock(ctx context.Context, w api.Wallet, bt *api.BlockTemplate) (*types.FullBlock, error) {
|
||||||
|
pts, err := filec.sm.ChainStore().LoadTipSet(bt.Parents)
|
||||||
pts, err := sm.ChainStore().LoadTipSet(bt.Parents)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("failed to load parent tipset: %w", err)
|
return nil, xerrors.Errorf("failed to load parent tipset: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
st, recpts, err := sm.TipSetState(ctx, pts)
|
st, recpts, err := filec.sm.TipSetState(ctx, pts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("failed to load tipset state: %w", err)
|
return nil, xerrors.Errorf("failed to load tipset state: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, lbst, err := stmgr.GetLookbackTipSetForRound(ctx, sm, pts, bt.Epoch)
|
_, lbst, err := stmgr.GetLookbackTipSetForRound(ctx, filec.sm, pts, bt.Epoch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("getting lookback miner actor state: %w", err)
|
return nil, xerrors.Errorf("getting lookback miner actor state: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
worker, err := stmgr.GetMinerWorkerRaw(ctx, sm, lbst, bt.Miner)
|
worker, err := stmgr.GetMinerWorkerRaw(ctx, filec.sm, lbst, bt.Miner)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("failed to get miner worker: %w", err)
|
return nil, xerrors.Errorf("failed to get miner worker: %w", err)
|
||||||
}
|
}
|
||||||
@ -61,14 +59,14 @@ func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w api.Wallet,
|
|||||||
blsSigs = append(blsSigs, msg.Signature)
|
blsSigs = append(blsSigs, msg.Signature)
|
||||||
blsMessages = append(blsMessages, &msg.Message)
|
blsMessages = append(blsMessages, &msg.Message)
|
||||||
|
|
||||||
c, err := sm.ChainStore().PutMessage(&msg.Message)
|
c, err := filec.sm.ChainStore().PutMessage(&msg.Message)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
blsMsgCids = append(blsMsgCids, c)
|
blsMsgCids = append(blsMsgCids, c)
|
||||||
} else {
|
} else {
|
||||||
c, err := sm.ChainStore().PutMessage(msg)
|
c, err := filec.sm.ChainStore().PutMessage(msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -79,12 +77,12 @@ func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w api.Wallet,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
store := sm.ChainStore().ActorStore(ctx)
|
store := filec.sm.ChainStore().ActorStore(ctx)
|
||||||
blsmsgroot, err := toArray(store, blsMsgCids)
|
blsmsgroot, err := consensus.ToMessagesArray(store, blsMsgCids)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("building bls amt: %w", err)
|
return nil, xerrors.Errorf("building bls amt: %w", err)
|
||||||
}
|
}
|
||||||
secpkmsgroot, err := toArray(store, secpkMsgCids)
|
secpkmsgroot, err := consensus.ToMessagesArray(store, secpkMsgCids)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("building secpk amt: %w", err)
|
return nil, xerrors.Errorf("building secpk amt: %w", err)
|
||||||
}
|
}
|
||||||
@ -98,19 +96,19 @@ func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w api.Wallet,
|
|||||||
}
|
}
|
||||||
next.Messages = mmcid
|
next.Messages = mmcid
|
||||||
|
|
||||||
aggSig, err := aggregateSignatures(blsSigs)
|
aggSig, err := consensus.AggregateSignatures(blsSigs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
next.BLSAggregate = aggSig
|
next.BLSAggregate = aggSig
|
||||||
pweight, err := sm.ChainStore().Weight(ctx, pts)
|
pweight, err := filec.sm.ChainStore().Weight(ctx, pts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
next.ParentWeight = pweight
|
next.ParentWeight = pweight
|
||||||
|
|
||||||
baseFee, err := sm.ChainStore().ComputeBaseFee(ctx, pts)
|
baseFee, err := filec.sm.ChainStore().ComputeBaseFee(ctx, pts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("computing base fee: %w", err)
|
return nil, xerrors.Errorf("computing base fee: %w", err)
|
||||||
}
|
}
|
||||||
@ -138,41 +136,3 @@ func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w api.Wallet,
|
|||||||
|
|
||||||
return fullBlock, nil
|
return fullBlock, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func aggregateSignatures(sigs []crypto.Signature) (*crypto.Signature, error) {
|
|
||||||
sigsS := make([]ffi.Signature, len(sigs))
|
|
||||||
for i := 0; i < len(sigs); i++ {
|
|
||||||
copy(sigsS[i][:], sigs[i].Data[:ffi.SignatureBytes])
|
|
||||||
}
|
|
||||||
|
|
||||||
aggSig := ffi.Aggregate(sigsS)
|
|
||||||
if aggSig == nil {
|
|
||||||
if len(sigs) > 0 {
|
|
||||||
return nil, xerrors.Errorf("bls.Aggregate returned nil with %d signatures", len(sigs))
|
|
||||||
}
|
|
||||||
|
|
||||||
zeroSig := ffi.CreateZeroSignature()
|
|
||||||
|
|
||||||
// Note: for blst this condition should not happen - nil should not
|
|
||||||
// be returned
|
|
||||||
return &crypto.Signature{
|
|
||||||
Type: crypto.SigTypeBLS,
|
|
||||||
Data: zeroSig[:],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
return &crypto.Signature{
|
|
||||||
Type: crypto.SigTypeBLS,
|
|
||||||
Data: aggSig[:],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func toArray(store blockadt.Store, cids []cid.Cid) (cid.Cid, error) {
|
|
||||||
arr := blockadt.MakeEmptyArray(store)
|
|
||||||
for i, c := range cids {
|
|
||||||
oc := cbg.CborCid(c)
|
|
||||||
if err := arr.Set(uint64(i), &oc); err != nil {
|
|
||||||
return cid.Undef, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return arr.Root()
|
|
||||||
}
|
|
@ -1,4 +1,4 @@
|
|||||||
package stmgr
|
package filcns
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@ -13,6 +13,7 @@ import (
|
|||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-state-types/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
|
"github.com/filecoin-project/go-state-types/rt"
|
||||||
|
|
||||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||||
@ -31,15 +32,16 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/multisig"
|
"github.com/filecoin-project/lotus/chain/actors/builtin/multisig"
|
||||||
"github.com/filecoin-project/lotus/chain/state"
|
"github.com/filecoin-project/lotus/chain/state"
|
||||||
|
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||||
"github.com/filecoin-project/lotus/chain/store"
|
"github.com/filecoin-project/lotus/chain/store"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/chain/vm"
|
"github.com/filecoin-project/lotus/chain/vm"
|
||||||
)
|
)
|
||||||
|
|
||||||
func DefaultUpgradeSchedule() UpgradeSchedule {
|
func DefaultUpgradeSchedule() stmgr.UpgradeSchedule {
|
||||||
var us UpgradeSchedule
|
var us stmgr.UpgradeSchedule
|
||||||
|
|
||||||
updates := []Upgrade{{
|
updates := []stmgr.Upgrade{{
|
||||||
Height: build.UpgradeBreezeHeight,
|
Height: build.UpgradeBreezeHeight,
|
||||||
Network: network.Version1,
|
Network: network.Version1,
|
||||||
Migration: UpgradeFaucetBurnRecovery,
|
Migration: UpgradeFaucetBurnRecovery,
|
||||||
@ -88,7 +90,7 @@ func DefaultUpgradeSchedule() UpgradeSchedule {
|
|||||||
Height: build.UpgradeTrustHeight,
|
Height: build.UpgradeTrustHeight,
|
||||||
Network: network.Version10,
|
Network: network.Version10,
|
||||||
Migration: UpgradeActorsV3,
|
Migration: UpgradeActorsV3,
|
||||||
PreMigrations: []PreMigration{{
|
PreMigrations: []stmgr.PreMigration{{
|
||||||
PreMigration: PreUpgradeActorsV3,
|
PreMigration: PreUpgradeActorsV3,
|
||||||
StartWithin: 120,
|
StartWithin: 120,
|
||||||
DontStartWithin: 60,
|
DontStartWithin: 60,
|
||||||
@ -108,7 +110,7 @@ func DefaultUpgradeSchedule() UpgradeSchedule {
|
|||||||
Height: build.UpgradeTurboHeight,
|
Height: build.UpgradeTurboHeight,
|
||||||
Network: network.Version12,
|
Network: network.Version12,
|
||||||
Migration: UpgradeActorsV4,
|
Migration: UpgradeActorsV4,
|
||||||
PreMigrations: []PreMigration{{
|
PreMigrations: []stmgr.PreMigration{{
|
||||||
PreMigration: PreUpgradeActorsV4,
|
PreMigration: PreUpgradeActorsV4,
|
||||||
StartWithin: 120,
|
StartWithin: 120,
|
||||||
DontStartWithin: 60,
|
DontStartWithin: 60,
|
||||||
@ -124,7 +126,7 @@ func DefaultUpgradeSchedule() UpgradeSchedule {
|
|||||||
Height: build.UpgradeHyperdriveHeight,
|
Height: build.UpgradeHyperdriveHeight,
|
||||||
Network: network.Version13,
|
Network: network.Version13,
|
||||||
Migration: UpgradeActorsV5,
|
Migration: UpgradeActorsV5,
|
||||||
PreMigrations: []PreMigration{{
|
PreMigrations: []stmgr.PreMigration{{
|
||||||
PreMigration: PreUpgradeActorsV5,
|
PreMigration: PreUpgradeActorsV5,
|
||||||
StartWithin: 120,
|
StartWithin: 120,
|
||||||
DontStartWithin: 60,
|
DontStartWithin: 60,
|
||||||
@ -147,7 +149,7 @@ func DefaultUpgradeSchedule() UpgradeSchedule {
|
|||||||
return us
|
return us
|
||||||
}
|
}
|
||||||
|
|
||||||
func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ MigrationCache, em ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
func UpgradeFaucetBurnRecovery(ctx context.Context, sm *stmgr.StateManager, _ stmgr.MigrationCache, em stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||||
// Some initial parameters
|
// Some initial parameters
|
||||||
FundsForMiners := types.FromFil(1_000_000)
|
FundsForMiners := types.FromFil(1_000_000)
|
||||||
LookbackEpoch := abi.ChainEpoch(32000)
|
LookbackEpoch := abi.ChainEpoch(32000)
|
||||||
@ -249,7 +251,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ Migratio
|
|||||||
|
|
||||||
// Execute transfers from previous step
|
// Execute transfers from previous step
|
||||||
for _, t := range transfers {
|
for _, t := range transfers {
|
||||||
if err := doTransfer(tree, t.From, t.To, t.Amt, transferCb); err != nil {
|
if err := stmgr.DoTransfer(tree, t.From, t.To, t.Amt, transferCb); err != nil {
|
||||||
return cid.Undef, xerrors.Errorf("transfer %s %s->%s failed: %w", t.Amt, t.From, t.To, err)
|
return cid.Undef, xerrors.Errorf("transfer %s %s->%s failed: %w", t.Amt, t.From, t.To, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -352,7 +354,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ Migratio
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, t := range transfersBack {
|
for _, t := range transfersBack {
|
||||||
if err := doTransfer(tree, t.From, t.To, t.Amt, transferCb); err != nil {
|
if err := stmgr.DoTransfer(tree, t.From, t.To, t.Amt, transferCb); err != nil {
|
||||||
return cid.Undef, xerrors.Errorf("transfer %s %s->%s failed: %w", t.Amt, t.From, t.To, err)
|
return cid.Undef, xerrors.Errorf("transfer %s %s->%s failed: %w", t.Amt, t.From, t.To, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -362,7 +364,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ Migratio
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return cid.Undef, xerrors.Errorf("failed to load burnt funds actor: %w", err)
|
return cid.Undef, xerrors.Errorf("failed to load burnt funds actor: %w", err)
|
||||||
}
|
}
|
||||||
if err := doTransfer(tree, builtin0.BurntFundsActorAddr, builtin.ReserveAddress, burntAct.Balance, transferCb); err != nil {
|
if err := stmgr.DoTransfer(tree, builtin0.BurntFundsActorAddr, builtin.ReserveAddress, burntAct.Balance, transferCb); err != nil {
|
||||||
return cid.Undef, xerrors.Errorf("failed to unburn funds: %w", err)
|
return cid.Undef, xerrors.Errorf("failed to unburn funds: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -378,7 +380,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ Migratio
|
|||||||
}
|
}
|
||||||
|
|
||||||
difference := types.BigSub(DesiredReimbursementBalance, reimb.Balance)
|
difference := types.BigSub(DesiredReimbursementBalance, reimb.Balance)
|
||||||
if err := doTransfer(tree, builtin.ReserveAddress, reimbAddr, difference, transferCb); err != nil {
|
if err := stmgr.DoTransfer(tree, builtin.ReserveAddress, reimbAddr, difference, transferCb); err != nil {
|
||||||
return cid.Undef, xerrors.Errorf("failed to top up reimbursement account: %w", err)
|
return cid.Undef, xerrors.Errorf("failed to top up reimbursement account: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -400,14 +402,14 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ Migratio
|
|||||||
if em != nil {
|
if em != nil {
|
||||||
// record the transfer in execution traces
|
// record the transfer in execution traces
|
||||||
|
|
||||||
fakeMsg := makeFakeMsg(builtin.SystemActorAddr, builtin.SystemActorAddr, big.Zero(), uint64(epoch))
|
fakeMsg := stmgr.MakeFakeMsg(builtin.SystemActorAddr, builtin.SystemActorAddr, big.Zero(), uint64(epoch))
|
||||||
|
|
||||||
if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{
|
if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{
|
||||||
MessageReceipt: *makeFakeRct(),
|
MessageReceipt: *stmgr.MakeFakeRct(),
|
||||||
ActorErr: nil,
|
ActorErr: nil,
|
||||||
ExecutionTrace: types.ExecutionTrace{
|
ExecutionTrace: types.ExecutionTrace{
|
||||||
Msg: fakeMsg,
|
Msg: fakeMsg,
|
||||||
MsgRct: makeFakeRct(),
|
MsgRct: stmgr.MakeFakeRct(),
|
||||||
Error: "",
|
Error: "",
|
||||||
Duration: 0,
|
Duration: 0,
|
||||||
GasCharges: nil,
|
GasCharges: nil,
|
||||||
@ -423,8 +425,8 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ Migratio
|
|||||||
return tree.Flush(ctx)
|
return tree.Flush(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func UpgradeIgnition(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
func UpgradeIgnition(ctx context.Context, sm *stmgr.StateManager, _ stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||||
store := sm.cs.ActorStore(ctx)
|
store := sm.ChainStore().ActorStore(ctx)
|
||||||
|
|
||||||
if build.UpgradeLiftoffHeight <= epoch {
|
if build.UpgradeLiftoffHeight <= epoch {
|
||||||
return cid.Undef, xerrors.Errorf("liftoff height must be beyond ignition height")
|
return cid.Undef, xerrors.Errorf("liftoff height must be beyond ignition height")
|
||||||
@ -440,7 +442,7 @@ func UpgradeIgnition(ctx context.Context, sm *StateManager, _ MigrationCache, cb
|
|||||||
return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
|
return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = setNetworkName(ctx, store, tree, "ignition")
|
err = stmgr.SetNetworkName(ctx, store, tree, "ignition")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cid.Undef, xerrors.Errorf("setting network name: %w", err)
|
return cid.Undef, xerrors.Errorf("setting network name: %w", err)
|
||||||
}
|
}
|
||||||
@ -478,7 +480,7 @@ func UpgradeIgnition(ctx context.Context, sm *StateManager, _ MigrationCache, cb
|
|||||||
return tree.Flush(ctx)
|
return tree.Flush(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func splitGenesisMultisig0(ctx context.Context, em ExecMonitor, addr address.Address, store adt0.Store, tree *state.StateTree, portions uint64, epoch abi.ChainEpoch, ts *types.TipSet) error {
|
func splitGenesisMultisig0(ctx context.Context, em stmgr.ExecMonitor, addr address.Address, store adt0.Store, tree *state.StateTree, portions uint64, epoch abi.ChainEpoch, ts *types.TipSet) error {
|
||||||
if portions < 1 {
|
if portions < 1 {
|
||||||
return xerrors.Errorf("cannot split into 0 portions")
|
return xerrors.Errorf("cannot split into 0 portions")
|
||||||
}
|
}
|
||||||
@ -553,7 +555,7 @@ func splitGenesisMultisig0(ctx context.Context, em ExecMonitor, addr address.Add
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i < portions {
|
for i < portions {
|
||||||
keyAddr, err := makeKeyAddr(addr, i)
|
keyAddr, err := stmgr.MakeKeyAddr(addr, i)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("creating key address: %w", err)
|
return xerrors.Errorf("creating key address: %w", err)
|
||||||
}
|
}
|
||||||
@ -568,7 +570,7 @@ func splitGenesisMultisig0(ctx context.Context, em ExecMonitor, addr address.Add
|
|||||||
return xerrors.Errorf("setting new msig actor state: %w", err)
|
return xerrors.Errorf("setting new msig actor state: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := doTransfer(tree, addr, idAddr, newIbal, transferCb); err != nil {
|
if err := stmgr.DoTransfer(tree, addr, idAddr, newIbal, transferCb); err != nil {
|
||||||
return xerrors.Errorf("transferring split msig balance: %w", err)
|
return xerrors.Errorf("transferring split msig balance: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -578,14 +580,14 @@ func splitGenesisMultisig0(ctx context.Context, em ExecMonitor, addr address.Add
|
|||||||
if em != nil {
|
if em != nil {
|
||||||
// record the transfer in execution traces
|
// record the transfer in execution traces
|
||||||
|
|
||||||
fakeMsg := makeFakeMsg(builtin.SystemActorAddr, addr, big.Zero(), uint64(epoch))
|
fakeMsg := stmgr.MakeFakeMsg(builtin.SystemActorAddr, addr, big.Zero(), uint64(epoch))
|
||||||
|
|
||||||
if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{
|
if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{
|
||||||
MessageReceipt: *makeFakeRct(),
|
MessageReceipt: *stmgr.MakeFakeRct(),
|
||||||
ActorErr: nil,
|
ActorErr: nil,
|
||||||
ExecutionTrace: types.ExecutionTrace{
|
ExecutionTrace: types.ExecutionTrace{
|
||||||
Msg: fakeMsg,
|
Msg: fakeMsg,
|
||||||
MsgRct: makeFakeRct(),
|
MsgRct: stmgr.MakeFakeRct(),
|
||||||
Error: "",
|
Error: "",
|
||||||
Duration: 0,
|
Duration: 0,
|
||||||
GasCharges: nil,
|
GasCharges: nil,
|
||||||
@ -602,8 +604,8 @@ func splitGenesisMultisig0(ctx context.Context, em ExecMonitor, addr address.Add
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TODO: After the Liftoff epoch, refactor this to use resetMultisigVesting
|
// TODO: After the Liftoff epoch, refactor this to use resetMultisigVesting
|
||||||
func resetGenesisMsigs0(ctx context.Context, sm *StateManager, store adt0.Store, tree *state.StateTree, startEpoch abi.ChainEpoch) error {
|
func resetGenesisMsigs0(ctx context.Context, sm *stmgr.StateManager, store adt0.Store, tree *state.StateTree, startEpoch abi.ChainEpoch) error {
|
||||||
gb, err := sm.cs.GetGenesis()
|
gb, err := sm.ChainStore().GetGenesis()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("getting genesis block: %w", err)
|
return xerrors.Errorf("getting genesis block: %w", err)
|
||||||
}
|
}
|
||||||
@ -613,7 +615,7 @@ func resetGenesisMsigs0(ctx context.Context, sm *StateManager, store adt0.Store,
|
|||||||
return xerrors.Errorf("getting genesis tipset: %w", err)
|
return xerrors.Errorf("getting genesis tipset: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cst := cbor.NewCborStore(sm.cs.StateBlockstore())
|
cst := cbor.NewCborStore(sm.ChainStore().StateBlockstore())
|
||||||
genesisTree, err := state.LoadStateTree(cst, gts.ParentState())
|
genesisTree, err := state.LoadStateTree(cst, gts.ParentState())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("loading state tree: %w", err)
|
return xerrors.Errorf("loading state tree: %w", err)
|
||||||
@ -683,9 +685,9 @@ func resetMultisigVesting0(ctx context.Context, store adt0.Store, tree *state.St
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func UpgradeRefuel(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
func UpgradeRefuel(ctx context.Context, sm *stmgr.StateManager, _ stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||||
|
|
||||||
store := sm.cs.ActorStore(ctx)
|
store := sm.ChainStore().ActorStore(ctx)
|
||||||
tree, err := sm.StateTree(root)
|
tree, err := sm.StateTree(root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
|
return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
|
||||||
@ -709,8 +711,8 @@ func UpgradeRefuel(ctx context.Context, sm *StateManager, _ MigrationCache, cb E
|
|||||||
return tree.Flush(ctx)
|
return tree.Flush(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func UpgradeActorsV2(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
func UpgradeActorsV2(ctx context.Context, sm *stmgr.StateManager, _ stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||||
buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync())
|
buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), blockstore.NewMemorySync())
|
||||||
store := store.ActorStore(ctx, buf)
|
store := store.ActorStore(ctx, buf)
|
||||||
|
|
||||||
info, err := store.Put(ctx, new(types.StateInfo0))
|
info, err := store.Put(ctx, new(types.StateInfo0))
|
||||||
@ -755,13 +757,13 @@ func UpgradeActorsV2(ctx context.Context, sm *StateManager, _ MigrationCache, cb
|
|||||||
return newRoot, nil
|
return newRoot, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func UpgradeLiftoff(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
func UpgradeLiftoff(ctx context.Context, sm *stmgr.StateManager, _ stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||||
tree, err := sm.StateTree(root)
|
tree, err := sm.StateTree(root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
|
return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = setNetworkName(ctx, sm.cs.ActorStore(ctx), tree, "mainnet")
|
err = stmgr.SetNetworkName(ctx, sm.ChainStore().ActorStore(ctx), tree, "mainnet")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cid.Undef, xerrors.Errorf("setting network name: %w", err)
|
return cid.Undef, xerrors.Errorf("setting network name: %w", err)
|
||||||
}
|
}
|
||||||
@ -769,12 +771,12 @@ func UpgradeLiftoff(ctx context.Context, sm *StateManager, _ MigrationCache, cb
|
|||||||
return tree.Flush(ctx)
|
return tree.Flush(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func UpgradeCalico(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
func UpgradeCalico(ctx context.Context, sm *stmgr.StateManager, _ stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||||
if build.BuildType != build.BuildMainnet {
|
if build.BuildType != build.BuildMainnet {
|
||||||
return root, nil
|
return root, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
store := sm.cs.ActorStore(ctx)
|
store := sm.ChainStore().ActorStore(ctx)
|
||||||
var stateRoot types.StateRoot
|
var stateRoot types.StateRoot
|
||||||
if err := store.Get(ctx, root, &stateRoot); err != nil {
|
if err := store.Get(ctx, root, &stateRoot); err != nil {
|
||||||
return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
|
return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
|
||||||
@ -815,7 +817,7 @@ func UpgradeCalico(ctx context.Context, sm *StateManager, _ MigrationCache, cb E
|
|||||||
return newRoot, nil
|
return newRoot, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func UpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
func UpgradeActorsV3(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||||
// Use all the CPUs except 3.
|
// Use all the CPUs except 3.
|
||||||
workerCount := runtime.NumCPU() - 3
|
workerCount := runtime.NumCPU() - 3
|
||||||
if workerCount <= 0 {
|
if workerCount <= 0 {
|
||||||
@ -839,7 +841,7 @@ func UpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache
|
|||||||
}
|
}
|
||||||
|
|
||||||
if build.BuildType == build.BuildMainnet {
|
if build.BuildType == build.BuildMainnet {
|
||||||
err := terminateActor(ctx, tree, build.ZeroAddress, cb, epoch, ts)
|
err := stmgr.TerminateActor(ctx, tree, build.ZeroAddress, cb, epoch, ts)
|
||||||
if err != nil && !xerrors.Is(err, types.ErrActorNotFound) {
|
if err != nil && !xerrors.Is(err, types.ErrActorNotFound) {
|
||||||
return cid.Undef, xerrors.Errorf("deleting zero bls actor: %w", err)
|
return cid.Undef, xerrors.Errorf("deleting zero bls actor: %w", err)
|
||||||
}
|
}
|
||||||
@ -853,7 +855,7 @@ func UpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache
|
|||||||
return newRoot, nil
|
return newRoot, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func PreUpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
|
func PreUpgradeActorsV3(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
|
||||||
// Use half the CPUs for pre-migration, but leave at least 3.
|
// Use half the CPUs for pre-migration, but leave at least 3.
|
||||||
workerCount := runtime.NumCPU()
|
workerCount := runtime.NumCPU()
|
||||||
if workerCount <= 4 {
|
if workerCount <= 4 {
|
||||||
@ -867,11 +869,11 @@ func PreUpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCa
|
|||||||
}
|
}
|
||||||
|
|
||||||
func upgradeActorsV3Common(
|
func upgradeActorsV3Common(
|
||||||
ctx context.Context, sm *StateManager, cache MigrationCache,
|
ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache,
|
||||||
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
|
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
|
||||||
config nv10.Config,
|
config nv10.Config,
|
||||||
) (cid.Cid, error) {
|
) (cid.Cid, error) {
|
||||||
buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync())
|
buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), blockstore.NewMemorySync())
|
||||||
store := store.ActorStore(ctx, buf)
|
store := store.ActorStore(ctx, buf)
|
||||||
|
|
||||||
// Load the state root.
|
// Load the state root.
|
||||||
@ -917,7 +919,7 @@ func upgradeActorsV3Common(
|
|||||||
return newRoot, nil
|
return newRoot, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func UpgradeActorsV4(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
func UpgradeActorsV4(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||||
// Use all the CPUs except 3.
|
// Use all the CPUs except 3.
|
||||||
workerCount := runtime.NumCPU() - 3
|
workerCount := runtime.NumCPU() - 3
|
||||||
if workerCount <= 0 {
|
if workerCount <= 0 {
|
||||||
@ -939,7 +941,7 @@ func UpgradeActorsV4(ctx context.Context, sm *StateManager, cache MigrationCache
|
|||||||
return newRoot, nil
|
return newRoot, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func PreUpgradeActorsV4(ctx context.Context, sm *StateManager, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
|
func PreUpgradeActorsV4(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
|
||||||
// Use half the CPUs for pre-migration, but leave at least 3.
|
// Use half the CPUs for pre-migration, but leave at least 3.
|
||||||
workerCount := runtime.NumCPU()
|
workerCount := runtime.NumCPU()
|
||||||
if workerCount <= 4 {
|
if workerCount <= 4 {
|
||||||
@ -953,11 +955,11 @@ func PreUpgradeActorsV4(ctx context.Context, sm *StateManager, cache MigrationCa
|
|||||||
}
|
}
|
||||||
|
|
||||||
func upgradeActorsV4Common(
|
func upgradeActorsV4Common(
|
||||||
ctx context.Context, sm *StateManager, cache MigrationCache,
|
ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache,
|
||||||
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
|
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
|
||||||
config nv12.Config,
|
config nv12.Config,
|
||||||
) (cid.Cid, error) {
|
) (cid.Cid, error) {
|
||||||
buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync())
|
buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), blockstore.NewMemorySync())
|
||||||
store := store.ActorStore(ctx, buf)
|
store := store.ActorStore(ctx, buf)
|
||||||
|
|
||||||
// Load the state root.
|
// Load the state root.
|
||||||
@ -1003,7 +1005,7 @@ func upgradeActorsV4Common(
|
|||||||
return newRoot, nil
|
return newRoot, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func UpgradeActorsV5(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
func UpgradeActorsV5(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||||
// Use all the CPUs except 3.
|
// Use all the CPUs except 3.
|
||||||
workerCount := runtime.NumCPU() - 3
|
workerCount := runtime.NumCPU() - 3
|
||||||
if workerCount <= 0 {
|
if workerCount <= 0 {
|
||||||
@ -1025,7 +1027,7 @@ func UpgradeActorsV5(ctx context.Context, sm *StateManager, cache MigrationCache
|
|||||||
return newRoot, nil
|
return newRoot, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func PreUpgradeActorsV5(ctx context.Context, sm *StateManager, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
|
func PreUpgradeActorsV5(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
|
||||||
// Use half the CPUs for pre-migration, but leave at least 3.
|
// Use half the CPUs for pre-migration, but leave at least 3.
|
||||||
workerCount := runtime.NumCPU()
|
workerCount := runtime.NumCPU()
|
||||||
if workerCount <= 4 {
|
if workerCount <= 4 {
|
||||||
@ -1039,11 +1041,11 @@ func PreUpgradeActorsV5(ctx context.Context, sm *StateManager, cache MigrationCa
|
|||||||
}
|
}
|
||||||
|
|
||||||
func upgradeActorsV5Common(
|
func upgradeActorsV5Common(
|
||||||
ctx context.Context, sm *StateManager, cache MigrationCache,
|
ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache,
|
||||||
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
|
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
|
||||||
config nv13.Config,
|
config nv13.Config,
|
||||||
) (cid.Cid, error) {
|
) (cid.Cid, error) {
|
||||||
buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync())
|
buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), blockstore.NewMemorySync())
|
||||||
store := store.ActorStore(ctx, buf)
|
store := store.ActorStore(ctx, buf)
|
||||||
|
|
||||||
// Load the state root.
|
// Load the state root.
|
||||||
@ -1088,3 +1090,18 @@ func upgradeActorsV5Common(
|
|||||||
|
|
||||||
return newRoot, nil
|
return newRoot, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type migrationLogger struct{}
|
||||||
|
|
||||||
|
func (ml migrationLogger) Log(level rt.LogLevel, msg string, args ...interface{}) {
|
||||||
|
switch level {
|
||||||
|
case rt.DEBUG:
|
||||||
|
log.Debugf(msg, args...)
|
||||||
|
case rt.INFO:
|
||||||
|
log.Infof(msg, args...)
|
||||||
|
case rt.WARN:
|
||||||
|
log.Warnf(msg, args...)
|
||||||
|
case rt.ERROR:
|
||||||
|
log.Errorf(msg, args...)
|
||||||
|
}
|
||||||
|
}
|
@ -1,22 +1,25 @@
|
|||||||
package store
|
package filcns
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
|
|
||||||
|
|
||||||
big2 "github.com/filecoin-project/go-state-types/big"
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
|
||||||
"github.com/filecoin-project/lotus/chain/state"
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
|
||||||
cbor "github.com/ipfs/go-ipld-cbor"
|
cbor "github.com/ipfs/go-ipld-cbor"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
big2 "github.com/filecoin-project/go-state-types/big"
|
||||||
|
|
||||||
|
bstore "github.com/filecoin-project/lotus/blockstore"
|
||||||
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
|
||||||
|
"github.com/filecoin-project/lotus/chain/state"
|
||||||
|
"github.com/filecoin-project/lotus/chain/store"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
var zero = types.NewInt(0)
|
var zero = types.NewInt(0)
|
||||||
|
|
||||||
func (cs *ChainStore) Weight(ctx context.Context, ts *types.TipSet) (types.BigInt, error) {
|
func Weight(ctx context.Context, stateBs bstore.Blockstore, ts *types.TipSet) (types.BigInt, error) {
|
||||||
if ts == nil {
|
if ts == nil {
|
||||||
return types.NewInt(0), nil
|
return types.NewInt(0), nil
|
||||||
}
|
}
|
||||||
@ -28,7 +31,7 @@ func (cs *ChainStore) Weight(ctx context.Context, ts *types.TipSet) (types.BigIn
|
|||||||
|
|
||||||
tpow := big2.Zero()
|
tpow := big2.Zero()
|
||||||
{
|
{
|
||||||
cst := cbor.NewCborStore(cs.StateBlockstore())
|
cst := cbor.NewCborStore(stateBs)
|
||||||
state, err := state.LoadStateTree(cst, ts.ParentState())
|
state, err := state.LoadStateTree(cst, ts.ParentState())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.NewInt(0), xerrors.Errorf("load state tree: %w", err)
|
return types.NewInt(0), xerrors.Errorf("load state tree: %w", err)
|
||||||
@ -39,7 +42,7 @@ func (cs *ChainStore) Weight(ctx context.Context, ts *types.TipSet) (types.BigIn
|
|||||||
return types.NewInt(0), xerrors.Errorf("get power actor: %w", err)
|
return types.NewInt(0), xerrors.Errorf("get power actor: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
powState, err := power.Load(cs.ActorStore(ctx), act)
|
powState, err := power.Load(store.ActorStore(ctx, stateBs), act)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.NewInt(0), xerrors.Errorf("failed to load power actor state: %w", err)
|
return types.NewInt(0), xerrors.Errorf("failed to load power actor state: %w", err)
|
||||||
}
|
}
|
19
chain/consensus/iface.go
Normal file
19
chain/consensus/iface.go
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
package consensus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Consensus interface {
|
||||||
|
ValidateBlock(ctx context.Context, b *types.FullBlock) (err error)
|
||||||
|
ValidateBlockPubsub(ctx context.Context, self bool, msg *pubsub.Message) (pubsub.ValidationResult, string)
|
||||||
|
IsEpochBeyondCurrMax(epoch abi.ChainEpoch) bool
|
||||||
|
|
||||||
|
CreateBlock(ctx context.Context, w api.Wallet, bt *api.BlockTemplate) (*types.FullBlock, error)
|
||||||
|
}
|
83
chain/consensus/utils.go
Normal file
83
chain/consensus/utils.go
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
package consensus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
cbg "github.com/whyrusleeping/cbor-gen"
|
||||||
|
"go.opencensus.io/trace"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
ffi "github.com/filecoin-project/filecoin-ffi"
|
||||||
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
|
)
|
||||||
|
|
||||||
|
var ErrTemporal = errors.New("temporal error")
|
||||||
|
|
||||||
|
func VerifyBlsAggregate(ctx context.Context, sig *crypto.Signature, msgs []cid.Cid, pubks [][]byte) error {
|
||||||
|
_, span := trace.StartSpan(ctx, "syncer.VerifyBlsAggregate")
|
||||||
|
defer span.End()
|
||||||
|
span.AddAttributes(
|
||||||
|
trace.Int64Attribute("msgCount", int64(len(msgs))),
|
||||||
|
)
|
||||||
|
|
||||||
|
msgsS := make([]ffi.Message, len(msgs))
|
||||||
|
pubksS := make([]ffi.PublicKey, len(msgs))
|
||||||
|
for i := 0; i < len(msgs); i++ {
|
||||||
|
msgsS[i] = msgs[i].Bytes()
|
||||||
|
copy(pubksS[i][:], pubks[i][:ffi.PublicKeyBytes])
|
||||||
|
}
|
||||||
|
|
||||||
|
sigS := new(ffi.Signature)
|
||||||
|
copy(sigS[:], sig.Data[:ffi.SignatureBytes])
|
||||||
|
|
||||||
|
if len(msgs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
valid := ffi.HashVerify(sigS, msgsS, pubksS)
|
||||||
|
if !valid {
|
||||||
|
return xerrors.New("bls aggregate signature failed to verify")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func AggregateSignatures(sigs []crypto.Signature) (*crypto.Signature, error) {
|
||||||
|
sigsS := make([]ffi.Signature, len(sigs))
|
||||||
|
for i := 0; i < len(sigs); i++ {
|
||||||
|
copy(sigsS[i][:], sigs[i].Data[:ffi.SignatureBytes])
|
||||||
|
}
|
||||||
|
|
||||||
|
aggSig := ffi.Aggregate(sigsS)
|
||||||
|
if aggSig == nil {
|
||||||
|
if len(sigs) > 0 {
|
||||||
|
return nil, xerrors.Errorf("bls.Aggregate returned nil with %d signatures", len(sigs))
|
||||||
|
}
|
||||||
|
|
||||||
|
zeroSig := ffi.CreateZeroSignature()
|
||||||
|
|
||||||
|
// Note: for blst this condition should not happen - nil should not
|
||||||
|
// be returned
|
||||||
|
return &crypto.Signature{
|
||||||
|
Type: crypto.SigTypeBLS,
|
||||||
|
Data: zeroSig[:],
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
return &crypto.Signature{
|
||||||
|
Type: crypto.SigTypeBLS,
|
||||||
|
Data: aggSig[:],
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ToMessagesArray(store blockadt.Store, cids []cid.Cid) (cid.Cid, error) {
|
||||||
|
arr := blockadt.MakeEmptyArray(store)
|
||||||
|
for i, c := range cids {
|
||||||
|
oc := cbg.CborCid(c)
|
||||||
|
if err := arr.Set(uint64(i), &oc); err != nil {
|
||||||
|
return cid.Undef, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return arr.Root()
|
||||||
|
}
|
33
chain/events/cache.go
Normal file
33
chain/events/cache.go
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
package events
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
)
|
||||||
|
|
||||||
|
type uncachedAPI interface {
|
||||||
|
ChainNotify(context.Context) (<-chan []*api.HeadChange, error)
|
||||||
|
ChainGetPath(ctx context.Context, from, to types.TipSetKey) ([]*api.HeadChange, error)
|
||||||
|
StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error)
|
||||||
|
|
||||||
|
StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) // optional / for CalledMsg
|
||||||
|
}
|
||||||
|
|
||||||
|
type cache struct {
|
||||||
|
*tipSetCache
|
||||||
|
*messageCache
|
||||||
|
uncachedAPI
|
||||||
|
}
|
||||||
|
|
||||||
|
func newCache(api EventAPI, gcConfidence abi.ChainEpoch) *cache {
|
||||||
|
return &cache{
|
||||||
|
newTSCache(api, gcConfidence),
|
||||||
|
newMessageCache(api),
|
||||||
|
api,
|
||||||
|
}
|
||||||
|
}
|
@ -2,18 +2,14 @@ package events
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
"golang.org/x/xerrors"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/store"
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -25,209 +21,46 @@ type (
|
|||||||
RevertHandler func(ctx context.Context, ts *types.TipSet) error
|
RevertHandler func(ctx context.Context, ts *types.TipSet) error
|
||||||
)
|
)
|
||||||
|
|
||||||
type heightHandler struct {
|
// A TipSetObserver receives notifications of tipsets
|
||||||
confidence int
|
type TipSetObserver interface {
|
||||||
called bool
|
Apply(ctx context.Context, from, to *types.TipSet) error
|
||||||
|
Revert(ctx context.Context, from, to *types.TipSet) error
|
||||||
handle HeightHandler
|
|
||||||
revert RevertHandler
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type EventAPI interface {
|
type EventAPI interface {
|
||||||
ChainNotify(context.Context) (<-chan []*api.HeadChange, error)
|
ChainNotify(context.Context) (<-chan []*api.HeadChange, error)
|
||||||
ChainGetBlockMessages(context.Context, cid.Cid) (*api.BlockMessages, error)
|
ChainGetBlockMessages(context.Context, cid.Cid) (*api.BlockMessages, error)
|
||||||
ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error)
|
ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error)
|
||||||
|
ChainGetTipSetAfterHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error)
|
||||||
ChainHead(context.Context) (*types.TipSet, error)
|
ChainHead(context.Context) (*types.TipSet, error)
|
||||||
StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error)
|
StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error)
|
||||||
ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error)
|
ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error)
|
||||||
|
ChainGetPath(ctx context.Context, from, to types.TipSetKey) ([]*api.HeadChange, error)
|
||||||
|
|
||||||
StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) // optional / for CalledMsg
|
StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) // optional / for CalledMsg
|
||||||
}
|
}
|
||||||
|
|
||||||
type Events struct {
|
type Events struct {
|
||||||
api EventAPI
|
*observer
|
||||||
|
*heightEvents
|
||||||
tsc *tipSetCache
|
|
||||||
lk sync.Mutex
|
|
||||||
|
|
||||||
ready chan struct{}
|
|
||||||
readyOnce sync.Once
|
|
||||||
|
|
||||||
heightEvents
|
|
||||||
*hcEvents
|
*hcEvents
|
||||||
|
|
||||||
observers []TipSetObserver
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewEventsWithConfidence(ctx context.Context, api EventAPI, gcConfidence abi.ChainEpoch) *Events {
|
func NewEventsWithConfidence(ctx context.Context, api EventAPI, gcConfidence abi.ChainEpoch) (*Events, error) {
|
||||||
tsc := newTSCache(gcConfidence, api)
|
cache := newCache(api, gcConfidence)
|
||||||
|
|
||||||
e := &Events{
|
ob := newObserver(cache, gcConfidence)
|
||||||
api: api,
|
if err := ob.start(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
tsc: tsc,
|
|
||||||
|
|
||||||
heightEvents: heightEvents{
|
|
||||||
tsc: tsc,
|
|
||||||
ctx: ctx,
|
|
||||||
gcConfidence: gcConfidence,
|
|
||||||
|
|
||||||
heightTriggers: map[uint64]*heightHandler{},
|
|
||||||
htTriggerHeights: map[abi.ChainEpoch][]uint64{},
|
|
||||||
htHeights: map[abi.ChainEpoch][]uint64{},
|
|
||||||
},
|
|
||||||
|
|
||||||
hcEvents: newHCEvents(ctx, api, tsc, uint64(gcConfidence)),
|
|
||||||
ready: make(chan struct{}),
|
|
||||||
observers: []TipSetObserver{},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
go e.listenHeadChanges(ctx)
|
he := newHeightEvents(cache, ob, gcConfidence)
|
||||||
|
headChange := newHCEvents(cache, ob)
|
||||||
|
|
||||||
// Wait for the first tipset to be seen or bail if shutting down
|
return &Events{ob, he, headChange}, nil
|
||||||
select {
|
|
||||||
case <-e.ready:
|
|
||||||
case <-ctx.Done():
|
|
||||||
}
|
|
||||||
|
|
||||||
return e
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewEvents(ctx context.Context, api EventAPI) *Events {
|
func NewEvents(ctx context.Context, api EventAPI) (*Events, error) {
|
||||||
gcConfidence := 2 * build.ForkLengthThreshold
|
gcConfidence := 2 * build.ForkLengthThreshold
|
||||||
return NewEventsWithConfidence(ctx, api, gcConfidence)
|
return NewEventsWithConfidence(ctx, api, gcConfidence)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Events) listenHeadChanges(ctx context.Context) {
|
|
||||||
for {
|
|
||||||
if err := e.listenHeadChangesOnce(ctx); err != nil {
|
|
||||||
log.Errorf("listen head changes errored: %s", err)
|
|
||||||
} else {
|
|
||||||
log.Warn("listenHeadChanges quit")
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case <-build.Clock.After(time.Second):
|
|
||||||
case <-ctx.Done():
|
|
||||||
log.Warnf("not restarting listenHeadChanges: context error: %s", ctx.Err())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Info("restarting listenHeadChanges")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Events) listenHeadChangesOnce(ctx context.Context) error {
|
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
notifs, err := e.api.ChainNotify(ctx)
|
|
||||||
if err != nil {
|
|
||||||
// Retry is handled by caller
|
|
||||||
return xerrors.Errorf("listenHeadChanges ChainNotify call failed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var cur []*api.HeadChange
|
|
||||||
var ok bool
|
|
||||||
|
|
||||||
// Wait for first tipset or bail
|
|
||||||
select {
|
|
||||||
case cur, ok = <-notifs:
|
|
||||||
if !ok {
|
|
||||||
return xerrors.Errorf("notification channel closed")
|
|
||||||
}
|
|
||||||
case <-ctx.Done():
|
|
||||||
return ctx.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(cur) != 1 {
|
|
||||||
return xerrors.Errorf("unexpected initial head notification length: %d", len(cur))
|
|
||||||
}
|
|
||||||
|
|
||||||
if cur[0].Type != store.HCCurrent {
|
|
||||||
return xerrors.Errorf("expected first head notification type to be 'current', was '%s'", cur[0].Type)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := e.tsc.add(cur[0].Val); err != nil {
|
|
||||||
log.Warnf("tsc.add: adding current tipset failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
e.readyOnce.Do(func() {
|
|
||||||
e.lastTs = cur[0].Val
|
|
||||||
// Signal that we have seen first tipset
|
|
||||||
close(e.ready)
|
|
||||||
})
|
|
||||||
|
|
||||||
for notif := range notifs {
|
|
||||||
var rev, app []*types.TipSet
|
|
||||||
for _, notif := range notif {
|
|
||||||
switch notif.Type {
|
|
||||||
case store.HCRevert:
|
|
||||||
rev = append(rev, notif.Val)
|
|
||||||
case store.HCApply:
|
|
||||||
app = append(app, notif.Val)
|
|
||||||
default:
|
|
||||||
log.Warnf("unexpected head change notification type: '%s'", notif.Type)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := e.headChange(ctx, rev, app); err != nil {
|
|
||||||
log.Warnf("headChange failed: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// sync with fake chainstore (for tests)
|
|
||||||
if fcs, ok := e.api.(interface{ notifDone() }); ok {
|
|
||||||
fcs.notifDone()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Events) headChange(ctx context.Context, rev, app []*types.TipSet) error {
|
|
||||||
if len(app) == 0 {
|
|
||||||
return xerrors.New("events.headChange expected at least one applied tipset")
|
|
||||||
}
|
|
||||||
|
|
||||||
e.lk.Lock()
|
|
||||||
defer e.lk.Unlock()
|
|
||||||
|
|
||||||
if err := e.headChangeAt(rev, app); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := e.observeChanges(ctx, rev, app); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return e.processHeadChangeEvent(rev, app)
|
|
||||||
}
|
|
||||||
|
|
||||||
// A TipSetObserver receives notifications of tipsets
|
|
||||||
type TipSetObserver interface {
|
|
||||||
Apply(ctx context.Context, ts *types.TipSet) error
|
|
||||||
Revert(ctx context.Context, ts *types.TipSet) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: add a confidence level so we can have observers with difference levels of confidence
|
|
||||||
func (e *Events) Observe(obs TipSetObserver) error {
|
|
||||||
e.lk.Lock()
|
|
||||||
defer e.lk.Unlock()
|
|
||||||
e.observers = append(e.observers, obs)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// observeChanges expects caller to hold e.lk
|
|
||||||
func (e *Events) observeChanges(ctx context.Context, rev, app []*types.TipSet) error {
|
|
||||||
for _, ts := range rev {
|
|
||||||
for _, o := range e.observers {
|
|
||||||
_ = o.Revert(ctx, ts)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, ts := range app {
|
|
||||||
for _, o := range e.observers {
|
|
||||||
_ = o.Apply(ctx, ts)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
@ -5,9 +5,6 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
|
||||||
lru "github.com/hashicorp/golang-lru"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
@ -35,7 +32,7 @@ type eventData interface{}
|
|||||||
// `prevTs` is the previous tipset, eg the "from" tipset for a state change.
|
// `prevTs` is the previous tipset, eg the "from" tipset for a state change.
|
||||||
// `ts` is the event tipset, eg the tipset in which the `msg` is included.
|
// `ts` is the event tipset, eg the tipset in which the `msg` is included.
|
||||||
// `curH`-`ts.Height` = `confidence`
|
// `curH`-`ts.Height` = `confidence`
|
||||||
type EventHandler func(data eventData, prevTs, ts *types.TipSet, curH abi.ChainEpoch) (more bool, err error)
|
type EventHandler func(ctx context.Context, data eventData, prevTs, ts *types.TipSet, curH abi.ChainEpoch) (more bool, err error)
|
||||||
|
|
||||||
// CheckFunc is used for atomicity guarantees. If the condition the callbacks
|
// CheckFunc is used for atomicity guarantees. If the condition the callbacks
|
||||||
// wait for has already happened in tipset `ts`
|
// wait for has already happened in tipset `ts`
|
||||||
@ -43,7 +40,7 @@ type EventHandler func(data eventData, prevTs, ts *types.TipSet, curH abi.ChainE
|
|||||||
// If `done` is true, timeout won't be triggered
|
// If `done` is true, timeout won't be triggered
|
||||||
// If `more` is false, no messages will be sent to EventHandler (RevertHandler
|
// If `more` is false, no messages will be sent to EventHandler (RevertHandler
|
||||||
// may still be called)
|
// may still be called)
|
||||||
type CheckFunc func(ts *types.TipSet) (done bool, more bool, err error)
|
type CheckFunc func(ctx context.Context, ts *types.TipSet) (done bool, more bool, err error)
|
||||||
|
|
||||||
// Keep track of information for an event handler
|
// Keep track of information for an event handler
|
||||||
type handlerInfo struct {
|
type handlerInfo struct {
|
||||||
@ -60,10 +57,9 @@ type handlerInfo struct {
|
|||||||
// until the required confidence is reached
|
// until the required confidence is reached
|
||||||
type queuedEvent struct {
|
type queuedEvent struct {
|
||||||
trigger triggerID
|
trigger triggerID
|
||||||
|
data eventData
|
||||||
|
|
||||||
prevH abi.ChainEpoch
|
prevTipset, tipset *types.TipSet
|
||||||
h abi.ChainEpoch
|
|
||||||
data eventData
|
|
||||||
|
|
||||||
called bool
|
called bool
|
||||||
}
|
}
|
||||||
@ -71,19 +67,17 @@ type queuedEvent struct {
|
|||||||
// Manages chain head change events, which may be forward (new tipset added to
|
// Manages chain head change events, which may be forward (new tipset added to
|
||||||
// chain) or backward (chain branch discarded in favour of heavier branch)
|
// chain) or backward (chain branch discarded in favour of heavier branch)
|
||||||
type hcEvents struct {
|
type hcEvents struct {
|
||||||
cs EventAPI
|
cs EventAPI
|
||||||
tsc *tipSetCache
|
|
||||||
ctx context.Context
|
|
||||||
gcConfidence uint64
|
|
||||||
|
|
||||||
|
lk sync.Mutex
|
||||||
lastTs *types.TipSet
|
lastTs *types.TipSet
|
||||||
|
|
||||||
lk sync.Mutex
|
|
||||||
|
|
||||||
ctr triggerID
|
ctr triggerID
|
||||||
|
|
||||||
|
// TODO: get rid of trigger IDs and just use pointers as keys.
|
||||||
triggers map[triggerID]*handlerInfo
|
triggers map[triggerID]*handlerInfo
|
||||||
|
|
||||||
|
// TODO: instead of scheduling events in the future, look at the chain in the past. We can sip the "confidence" queue entirely.
|
||||||
// maps block heights to events
|
// maps block heights to events
|
||||||
// [triggerH][msgH][event]
|
// [triggerH][msgH][event]
|
||||||
confQueue map[triggerH]map[msgH][]*queuedEvent
|
confQueue map[triggerH]map[msgH][]*queuedEvent
|
||||||
@ -98,83 +92,77 @@ type hcEvents struct {
|
|||||||
watcherEvents
|
watcherEvents
|
||||||
}
|
}
|
||||||
|
|
||||||
func newHCEvents(ctx context.Context, cs EventAPI, tsc *tipSetCache, gcConfidence uint64) *hcEvents {
|
func newHCEvents(api EventAPI, obs *observer) *hcEvents {
|
||||||
e := hcEvents{
|
e := &hcEvents{
|
||||||
ctx: ctx,
|
cs: api,
|
||||||
cs: cs,
|
|
||||||
tsc: tsc,
|
|
||||||
gcConfidence: gcConfidence,
|
|
||||||
|
|
||||||
confQueue: map[triggerH]map[msgH][]*queuedEvent{},
|
confQueue: map[triggerH]map[msgH][]*queuedEvent{},
|
||||||
revertQueue: map[msgH][]triggerH{},
|
revertQueue: map[msgH][]triggerH{},
|
||||||
triggers: map[triggerID]*handlerInfo{},
|
triggers: map[triggerID]*handlerInfo{},
|
||||||
timeouts: map[abi.ChainEpoch]map[triggerID]int{},
|
timeouts: map[abi.ChainEpoch]map[triggerID]int{},
|
||||||
}
|
}
|
||||||
|
|
||||||
e.messageEvents = newMessageEvents(ctx, &e, cs)
|
e.messageEvents = newMessageEvents(e, api)
|
||||||
e.watcherEvents = newWatcherEvents(ctx, &e, cs)
|
e.watcherEvents = newWatcherEvents(e, api)
|
||||||
|
|
||||||
return &e
|
// We need to take the lock as the observer could immediately try calling us.
|
||||||
|
e.lk.Lock()
|
||||||
|
e.lastTs = obs.Observe((*hcEventsObserver)(e))
|
||||||
|
e.lk.Unlock()
|
||||||
|
|
||||||
|
return e
|
||||||
}
|
}
|
||||||
|
|
||||||
// Called when there is a change to the head with tipsets to be
|
type hcEventsObserver hcEvents
|
||||||
// reverted / applied
|
|
||||||
func (e *hcEvents) processHeadChangeEvent(rev, app []*types.TipSet) error {
|
func (e *hcEventsObserver) Apply(ctx context.Context, from, to *types.TipSet) error {
|
||||||
e.lk.Lock()
|
e.lk.Lock()
|
||||||
defer e.lk.Unlock()
|
defer e.lk.Unlock()
|
||||||
|
|
||||||
for _, ts := range rev {
|
defer func() { e.lastTs = to }()
|
||||||
e.handleReverts(ts)
|
|
||||||
e.lastTs = ts
|
// Check if the head change caused any state changes that we were
|
||||||
|
// waiting for
|
||||||
|
stateChanges := e.checkStateChanges(from, to)
|
||||||
|
|
||||||
|
// Queue up calls until there have been enough blocks to reach
|
||||||
|
// confidence on the state changes
|
||||||
|
for tid, data := range stateChanges {
|
||||||
|
e.queueForConfidence(tid, data, from, to)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, ts := range app {
|
// Check if the head change included any new message calls
|
||||||
// Check if the head change caused any state changes that we were
|
newCalls := e.checkNewCalls(ctx, from, to)
|
||||||
// waiting for
|
|
||||||
stateChanges := e.watcherEvents.checkStateChanges(e.lastTs, ts)
|
|
||||||
|
|
||||||
// Queue up calls until there have been enough blocks to reach
|
// Queue up calls until there have been enough blocks to reach
|
||||||
// confidence on the state changes
|
// confidence on the message calls
|
||||||
for tid, data := range stateChanges {
|
for tid, calls := range newCalls {
|
||||||
e.queueForConfidence(tid, data, e.lastTs, ts)
|
for _, data := range calls {
|
||||||
|
e.queueForConfidence(tid, data, nil, to)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if the head change included any new message calls
|
|
||||||
newCalls, err := e.messageEvents.checkNewCalls(ts)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Queue up calls until there have been enough blocks to reach
|
|
||||||
// confidence on the message calls
|
|
||||||
for tid, calls := range newCalls {
|
|
||||||
for _, data := range calls {
|
|
||||||
e.queueForConfidence(tid, data, nil, ts)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for at := e.lastTs.Height(); at <= ts.Height(); at++ {
|
|
||||||
// Apply any queued events and timeouts that were targeted at the
|
|
||||||
// current chain height
|
|
||||||
e.applyWithConfidence(ts, at)
|
|
||||||
e.applyTimeouts(ts)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the latest known tipset
|
|
||||||
e.lastTs = ts
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for at := from.Height() + 1; at <= to.Height(); at++ {
|
||||||
|
// Apply any queued events and timeouts that were targeted at the
|
||||||
|
// current chain height
|
||||||
|
e.applyWithConfidence(ctx, at)
|
||||||
|
e.applyTimeouts(ctx, at, to)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *hcEvents) handleReverts(ts *types.TipSet) {
|
func (e *hcEventsObserver) Revert(ctx context.Context, from, to *types.TipSet) error {
|
||||||
reverts, ok := e.revertQueue[ts.Height()]
|
e.lk.Lock()
|
||||||
|
defer e.lk.Unlock()
|
||||||
|
|
||||||
|
defer func() { e.lastTs = to }()
|
||||||
|
|
||||||
|
reverts, ok := e.revertQueue[from.Height()]
|
||||||
if !ok {
|
if !ok {
|
||||||
return // nothing to do
|
return nil // nothing to do
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, triggerH := range reverts {
|
for _, triggerH := range reverts {
|
||||||
toRevert := e.confQueue[triggerH][ts.Height()]
|
toRevert := e.confQueue[triggerH][from.Height()]
|
||||||
for _, event := range toRevert {
|
for _, event := range toRevert {
|
||||||
if !event.called {
|
if !event.called {
|
||||||
continue // event wasn't apply()-ied yet
|
continue // event wasn't apply()-ied yet
|
||||||
@ -182,24 +170,21 @@ func (e *hcEvents) handleReverts(ts *types.TipSet) {
|
|||||||
|
|
||||||
trigger := e.triggers[event.trigger]
|
trigger := e.triggers[event.trigger]
|
||||||
|
|
||||||
if err := trigger.revert(e.ctx, ts); err != nil {
|
if err := trigger.revert(ctx, from); err != nil {
|
||||||
log.Errorf("reverting chain trigger (@H %d, triggered @ %d) failed: %s", ts.Height(), triggerH, err)
|
log.Errorf("reverting chain trigger (@H %d, triggered @ %d) failed: %s", from.Height(), triggerH, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
delete(e.confQueue[triggerH], ts.Height())
|
delete(e.confQueue[triggerH], from.Height())
|
||||||
}
|
}
|
||||||
delete(e.revertQueue, ts.Height())
|
delete(e.revertQueue, from.Height())
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Queue up events until the chain has reached a height that reflects the
|
// Queue up events until the chain has reached a height that reflects the
|
||||||
// desired confidence
|
// desired confidence
|
||||||
func (e *hcEvents) queueForConfidence(trigID uint64, data eventData, prevTs, ts *types.TipSet) {
|
func (e *hcEventsObserver) queueForConfidence(trigID uint64, data eventData, prevTs, ts *types.TipSet) {
|
||||||
trigger := e.triggers[trigID]
|
trigger := e.triggers[trigID]
|
||||||
|
|
||||||
prevH := NoHeight
|
|
||||||
if prevTs != nil {
|
|
||||||
prevH = prevTs.Height()
|
|
||||||
}
|
|
||||||
appliedH := ts.Height()
|
appliedH := ts.Height()
|
||||||
|
|
||||||
triggerH := appliedH + abi.ChainEpoch(trigger.confidence)
|
triggerH := appliedH + abi.ChainEpoch(trigger.confidence)
|
||||||
@ -211,28 +196,23 @@ func (e *hcEvents) queueForConfidence(trigID uint64, data eventData, prevTs, ts
|
|||||||
}
|
}
|
||||||
|
|
||||||
byOrigH[appliedH] = append(byOrigH[appliedH], &queuedEvent{
|
byOrigH[appliedH] = append(byOrigH[appliedH], &queuedEvent{
|
||||||
trigger: trigID,
|
trigger: trigID,
|
||||||
prevH: prevH,
|
data: data,
|
||||||
h: appliedH,
|
tipset: ts,
|
||||||
data: data,
|
prevTipset: prevTs,
|
||||||
})
|
})
|
||||||
|
|
||||||
e.revertQueue[appliedH] = append(e.revertQueue[appliedH], triggerH)
|
e.revertQueue[appliedH] = append(e.revertQueue[appliedH], triggerH)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply any events that were waiting for this chain height for confidence
|
// Apply any events that were waiting for this chain height for confidence
|
||||||
func (e *hcEvents) applyWithConfidence(ts *types.TipSet, height abi.ChainEpoch) {
|
func (e *hcEventsObserver) applyWithConfidence(ctx context.Context, height abi.ChainEpoch) {
|
||||||
byOrigH, ok := e.confQueue[height]
|
byOrigH, ok := e.confQueue[height]
|
||||||
if !ok {
|
if !ok {
|
||||||
return // no triggers at this height
|
return // no triggers at this height
|
||||||
}
|
}
|
||||||
|
|
||||||
for origH, events := range byOrigH {
|
for origH, events := range byOrigH {
|
||||||
triggerTs, err := e.tsc.get(origH)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("events: applyWithConfidence didn't find tipset for event; wanted %d; current %d", origH, height)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, event := range events {
|
for _, event := range events {
|
||||||
if event.called {
|
if event.called {
|
||||||
continue
|
continue
|
||||||
@ -243,18 +223,7 @@ func (e *hcEvents) applyWithConfidence(ts *types.TipSet, height abi.ChainEpoch)
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Previous tipset - this is relevant for example in a state change
|
more, err := trigger.handle(ctx, event.data, event.prevTipset, event.tipset, height)
|
||||||
// from one tipset to another
|
|
||||||
var prevTs *types.TipSet
|
|
||||||
if event.prevH != NoHeight {
|
|
||||||
prevTs, err = e.tsc.get(event.prevH)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("events: applyWithConfidence didn't find tipset for previous event; wanted %d; current %d", event.prevH, height)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
more, err := trigger.handle(event.data, prevTs, triggerTs, height)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("chain trigger (@H %d, triggered @ %d) failed: %s", origH, height, err)
|
log.Errorf("chain trigger (@H %d, triggered @ %d) failed: %s", origH, height, err)
|
||||||
continue // don't revert failed calls
|
continue // don't revert failed calls
|
||||||
@ -273,8 +242,8 @@ func (e *hcEvents) applyWithConfidence(ts *types.TipSet, height abi.ChainEpoch)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Apply any timeouts that expire at this height
|
// Apply any timeouts that expire at this height
|
||||||
func (e *hcEvents) applyTimeouts(ts *types.TipSet) {
|
func (e *hcEventsObserver) applyTimeouts(ctx context.Context, at abi.ChainEpoch, ts *types.TipSet) {
|
||||||
triggers, ok := e.timeouts[ts.Height()]
|
triggers, ok := e.timeouts[at]
|
||||||
if !ok {
|
if !ok {
|
||||||
return // nothing to do
|
return // nothing to do
|
||||||
}
|
}
|
||||||
@ -288,14 +257,15 @@ func (e *hcEvents) applyTimeouts(ts *types.TipSet) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
timeoutTs, err := e.tsc.get(ts.Height() - abi.ChainEpoch(trigger.confidence))
|
// This should be cached.
|
||||||
|
timeoutTs, err := e.cs.ChainGetTipSetAfterHeight(ctx, at-abi.ChainEpoch(trigger.confidence), ts.Key())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("events: applyTimeouts didn't find tipset for event; wanted %d; current %d", ts.Height()-abi.ChainEpoch(trigger.confidence), ts.Height())
|
log.Errorf("events: applyTimeouts didn't find tipset for event; wanted %d; current %d", at-abi.ChainEpoch(trigger.confidence), at)
|
||||||
}
|
}
|
||||||
|
|
||||||
more, err := trigger.handle(nil, nil, timeoutTs, ts.Height())
|
more, err := trigger.handle(ctx, nil, nil, timeoutTs, at)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("chain trigger (call @H %d, called @ %d) failed: %s", timeoutTs.Height(), ts.Height(), err)
|
log.Errorf("chain trigger (call @H %d, called @ %d) failed: %s", timeoutTs.Height(), at, err)
|
||||||
continue // don't revert failed calls
|
continue // don't revert failed calls
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -309,24 +279,19 @@ func (e *hcEvents) applyTimeouts(ts *types.TipSet) {
|
|||||||
// - RevertHandler: called if the chain head changes causing the event to revert
|
// - RevertHandler: called if the chain head changes causing the event to revert
|
||||||
// - confidence: wait this many tipsets before calling EventHandler
|
// - confidence: wait this many tipsets before calling EventHandler
|
||||||
// - timeout: at this chain height, timeout on waiting for this event
|
// - timeout: at this chain height, timeout on waiting for this event
|
||||||
func (e *hcEvents) onHeadChanged(check CheckFunc, hnd EventHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch) (triggerID, error) {
|
func (e *hcEvents) onHeadChanged(ctx context.Context, check CheckFunc, hnd EventHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch) (triggerID, error) {
|
||||||
e.lk.Lock()
|
e.lk.Lock()
|
||||||
defer e.lk.Unlock()
|
defer e.lk.Unlock()
|
||||||
|
|
||||||
// Check if the event has already occurred
|
// Check if the event has already occurred
|
||||||
ts, err := e.tsc.best()
|
done, more, err := check(ctx, e.lastTs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, xerrors.Errorf("error getting best tipset: %w", err)
|
return 0, xerrors.Errorf("called check error (h: %d): %w", e.lastTs.Height(), err)
|
||||||
}
|
|
||||||
done, more, err := check(ts)
|
|
||||||
if err != nil {
|
|
||||||
return 0, xerrors.Errorf("called check error (h: %d): %w", ts.Height(), err)
|
|
||||||
}
|
}
|
||||||
if done {
|
if done {
|
||||||
timeout = NoTimeout
|
timeout = NoTimeout
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a trigger for the event
|
|
||||||
id := e.ctr
|
id := e.ctr
|
||||||
e.ctr++
|
e.ctr++
|
||||||
|
|
||||||
@ -354,12 +319,11 @@ func (e *hcEvents) onHeadChanged(check CheckFunc, hnd EventHandler, rev RevertHa
|
|||||||
// headChangeAPI is used to allow the composed event APIs to call back to hcEvents
|
// headChangeAPI is used to allow the composed event APIs to call back to hcEvents
|
||||||
// to listen for changes
|
// to listen for changes
|
||||||
type headChangeAPI interface {
|
type headChangeAPI interface {
|
||||||
onHeadChanged(check CheckFunc, hnd EventHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch) (triggerID, error)
|
onHeadChanged(ctx context.Context, check CheckFunc, hnd EventHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch) (triggerID, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// watcherEvents watches for a state change
|
// watcherEvents watches for a state change
|
||||||
type watcherEvents struct {
|
type watcherEvents struct {
|
||||||
ctx context.Context
|
|
||||||
cs EventAPI
|
cs EventAPI
|
||||||
hcAPI headChangeAPI
|
hcAPI headChangeAPI
|
||||||
|
|
||||||
@ -367,9 +331,8 @@ type watcherEvents struct {
|
|||||||
matchers map[triggerID]StateMatchFunc
|
matchers map[triggerID]StateMatchFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
func newWatcherEvents(ctx context.Context, hcAPI headChangeAPI, cs EventAPI) watcherEvents {
|
func newWatcherEvents(hcAPI headChangeAPI, cs EventAPI) watcherEvents {
|
||||||
return watcherEvents{
|
return watcherEvents{
|
||||||
ctx: ctx,
|
|
||||||
cs: cs,
|
cs: cs,
|
||||||
hcAPI: hcAPI,
|
hcAPI: hcAPI,
|
||||||
matchers: make(map[triggerID]StateMatchFunc),
|
matchers: make(map[triggerID]StateMatchFunc),
|
||||||
@ -425,7 +388,7 @@ type StateMatchFunc func(oldTs, newTs *types.TipSet) (bool, StateChange, error)
|
|||||||
// * `StateChangeHandler` is called when the specified state change was observed
|
// * `StateChangeHandler` is called when the specified state change was observed
|
||||||
// on-chain, and a confidence threshold was reached, or the specified `timeout`
|
// on-chain, and a confidence threshold was reached, or the specified `timeout`
|
||||||
// height was reached with no state change observed. When this callback is
|
// height was reached with no state change observed. When this callback is
|
||||||
// invoked on a timeout, `oldState` and `newState` are set to nil.
|
// invoked on a timeout, `oldTs` and `states are set to nil.
|
||||||
// This callback returns a boolean specifying whether further notifications
|
// This callback returns a boolean specifying whether further notifications
|
||||||
// should be sent, like `more` return param from `CheckFunc` above.
|
// should be sent, like `more` return param from `CheckFunc` above.
|
||||||
//
|
//
|
||||||
@ -438,7 +401,7 @@ type StateMatchFunc func(oldTs, newTs *types.TipSet) (bool, StateChange, error)
|
|||||||
// the state change is queued up until the confidence interval has elapsed (and
|
// the state change is queued up until the confidence interval has elapsed (and
|
||||||
// `StateChangeHandler` is called)
|
// `StateChangeHandler` is called)
|
||||||
func (we *watcherEvents) StateChanged(check CheckFunc, scHnd StateChangeHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch, mf StateMatchFunc) error {
|
func (we *watcherEvents) StateChanged(check CheckFunc, scHnd StateChangeHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch, mf StateMatchFunc) error {
|
||||||
hnd := func(data eventData, prevTs, ts *types.TipSet, height abi.ChainEpoch) (bool, error) {
|
hnd := func(ctx context.Context, data eventData, prevTs, ts *types.TipSet, height abi.ChainEpoch) (bool, error) {
|
||||||
states, ok := data.(StateChange)
|
states, ok := data.(StateChange)
|
||||||
if data != nil && !ok {
|
if data != nil && !ok {
|
||||||
panic("expected StateChange")
|
panic("expected StateChange")
|
||||||
@ -447,7 +410,7 @@ func (we *watcherEvents) StateChanged(check CheckFunc, scHnd StateChangeHandler,
|
|||||||
return scHnd(prevTs, ts, states, height)
|
return scHnd(prevTs, ts, states, height)
|
||||||
}
|
}
|
||||||
|
|
||||||
id, err := we.hcAPI.onHeadChanged(check, hnd, rev, confidence, timeout)
|
id, err := we.hcAPI.onHeadChanged(context.TODO(), check, hnd, rev, confidence, timeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -461,43 +424,29 @@ func (we *watcherEvents) StateChanged(check CheckFunc, scHnd StateChangeHandler,
|
|||||||
|
|
||||||
// messageEvents watches for message calls to actors
|
// messageEvents watches for message calls to actors
|
||||||
type messageEvents struct {
|
type messageEvents struct {
|
||||||
ctx context.Context
|
|
||||||
cs EventAPI
|
cs EventAPI
|
||||||
hcAPI headChangeAPI
|
hcAPI headChangeAPI
|
||||||
|
|
||||||
lk sync.RWMutex
|
lk sync.RWMutex
|
||||||
matchers map[triggerID]MsgMatchFunc
|
matchers map[triggerID]MsgMatchFunc
|
||||||
|
|
||||||
blockMsgLk sync.Mutex
|
|
||||||
blockMsgCache *lru.ARCCache
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newMessageEvents(ctx context.Context, hcAPI headChangeAPI, cs EventAPI) messageEvents {
|
func newMessageEvents(hcAPI headChangeAPI, cs EventAPI) messageEvents {
|
||||||
blsMsgCache, _ := lru.NewARC(500)
|
|
||||||
return messageEvents{
|
return messageEvents{
|
||||||
ctx: ctx,
|
cs: cs,
|
||||||
cs: cs,
|
hcAPI: hcAPI,
|
||||||
hcAPI: hcAPI,
|
matchers: make(map[triggerID]MsgMatchFunc),
|
||||||
matchers: make(map[triggerID]MsgMatchFunc),
|
|
||||||
blockMsgLk: sync.Mutex{},
|
|
||||||
blockMsgCache: blsMsgCache,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if there are any new actor calls
|
// Check if there are any new actor calls
|
||||||
func (me *messageEvents) checkNewCalls(ts *types.TipSet) (map[triggerID][]eventData, error) {
|
func (me *messageEvents) checkNewCalls(ctx context.Context, from, to *types.TipSet) map[triggerID][]eventData {
|
||||||
pts, err := me.cs.ChainGetTipSet(me.ctx, ts.Parents()) // we actually care about messages in the parent tipset here
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("getting parent tipset in checkNewCalls: %s", err)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
me.lk.RLock()
|
me.lk.RLock()
|
||||||
defer me.lk.RUnlock()
|
defer me.lk.RUnlock()
|
||||||
|
|
||||||
// For each message in the tipset
|
// For each message in the tipset
|
||||||
res := make(map[triggerID][]eventData)
|
res := make(map[triggerID][]eventData)
|
||||||
me.messagesForTs(pts, func(msg *types.Message) {
|
me.messagesForTs(from, func(msg *types.Message) {
|
||||||
// TODO: provide receipts
|
// TODO: provide receipts
|
||||||
|
|
||||||
// Run each trigger's matcher against the message
|
// Run each trigger's matcher against the message
|
||||||
@ -516,47 +465,32 @@ func (me *messageEvents) checkNewCalls(ts *types.TipSet) (map[triggerID][]eventD
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
return res, nil
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the messages in a tipset
|
// Get the messages in a tipset
|
||||||
func (me *messageEvents) messagesForTs(ts *types.TipSet, consume func(*types.Message)) {
|
func (me *messageEvents) messagesForTs(ts *types.TipSet, consume func(*types.Message)) {
|
||||||
seen := map[cid.Cid]struct{}{}
|
seen := map[cid.Cid]struct{}{}
|
||||||
|
|
||||||
for _, tsb := range ts.Blocks() {
|
for i, tsb := range ts.Cids() {
|
||||||
me.blockMsgLk.Lock()
|
msgs, err := me.cs.ChainGetBlockMessages(context.TODO(), tsb)
|
||||||
msgsI, ok := me.blockMsgCache.Get(tsb.Cid())
|
if err != nil {
|
||||||
var err error
|
log.Errorf("messagesForTs MessagesForBlock failed (ts.H=%d, Bcid:%s, B.Mcid:%s): %s",
|
||||||
if !ok {
|
ts.Height(), tsb, ts.Blocks()[i].Messages, err)
|
||||||
msgsI, err = me.cs.ChainGetBlockMessages(context.TODO(), tsb.Cid())
|
continue
|
||||||
if err != nil {
|
|
||||||
log.Errorf("messagesForTs MessagesForBlock failed (ts.H=%d, Bcid:%s, B.Mcid:%s): %s", ts.Height(), tsb.Cid(), tsb.Messages, err)
|
|
||||||
// this is quite bad, but probably better than missing all the other updates
|
|
||||||
me.blockMsgLk.Unlock()
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
me.blockMsgCache.Add(tsb.Cid(), msgsI)
|
|
||||||
}
|
}
|
||||||
me.blockMsgLk.Unlock()
|
for i, c := range msgs.Cids {
|
||||||
msgs := msgsI.(*api.BlockMessages)
|
// We iterate over the CIDs to avoid having to recompute them.
|
||||||
for _, m := range msgs.BlsMessages {
|
_, ok := seen[c]
|
||||||
_, ok := seen[m.Cid()]
|
|
||||||
if ok {
|
if ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
seen[m.Cid()] = struct{}{}
|
seen[c] = struct{}{}
|
||||||
|
if i < len(msgs.BlsMessages) {
|
||||||
consume(m)
|
consume(msgs.BlsMessages[i])
|
||||||
}
|
} else {
|
||||||
|
consume(&msgs.SecpkMessages[i-len(msgs.BlsMessages)].Message)
|
||||||
for _, m := range msgs.SecpkMessages {
|
|
||||||
_, ok := seen[m.Message.Cid()]
|
|
||||||
if ok {
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
seen[m.Message.Cid()] = struct{}{}
|
|
||||||
|
|
||||||
consume(&m.Message)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -596,14 +530,14 @@ type MsgMatchFunc func(msg *types.Message) (matched bool, err error)
|
|||||||
// * `MsgMatchFunc` is called against each message. If there is a match, the
|
// * `MsgMatchFunc` is called against each message. If there is a match, the
|
||||||
// message is queued up until the confidence interval has elapsed (and
|
// message is queued up until the confidence interval has elapsed (and
|
||||||
// `MsgHandler` is called)
|
// `MsgHandler` is called)
|
||||||
func (me *messageEvents) Called(check CheckFunc, msgHnd MsgHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch, mf MsgMatchFunc) error {
|
func (me *messageEvents) Called(ctx context.Context, check CheckFunc, msgHnd MsgHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch, mf MsgMatchFunc) error {
|
||||||
hnd := func(data eventData, prevTs, ts *types.TipSet, height abi.ChainEpoch) (bool, error) {
|
hnd := func(ctx context.Context, data eventData, prevTs, ts *types.TipSet, height abi.ChainEpoch) (bool, error) {
|
||||||
msg, ok := data.(*types.Message)
|
msg, ok := data.(*types.Message)
|
||||||
if data != nil && !ok {
|
if data != nil && !ok {
|
||||||
panic("expected msg")
|
panic("expected msg")
|
||||||
}
|
}
|
||||||
|
|
||||||
ml, err := me.cs.StateSearchMsg(me.ctx, ts.Key(), msg.Cid(), stmgr.LookbackNoLimit, true)
|
ml, err := me.cs.StateSearchMsg(ctx, ts.Key(), msg.Cid(), stmgr.LookbackNoLimit, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@ -615,7 +549,7 @@ func (me *messageEvents) Called(check CheckFunc, msgHnd MsgHandler, rev RevertHa
|
|||||||
return msgHnd(msg, &ml.Receipt, ts, height)
|
return msgHnd(msg, &ml.Receipt, ts, height)
|
||||||
}
|
}
|
||||||
|
|
||||||
id, err := me.hcAPI.onHeadChanged(check, hnd, rev, confidence, timeout)
|
id, err := me.hcAPI.onHeadChanged(ctx, check, hnd, rev, confidence, timeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -629,5 +563,5 @@ func (me *messageEvents) Called(check CheckFunc, msgHnd MsgHandler, rev RevertHa
|
|||||||
|
|
||||||
// Convenience function for checking and matching messages
|
// Convenience function for checking and matching messages
|
||||||
func (me *messageEvents) CalledMsg(ctx context.Context, hnd MsgHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch, msg types.ChainMsg) error {
|
func (me *messageEvents) CalledMsg(ctx context.Context, hnd MsgHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch, msg types.ChainMsg) error {
|
||||||
return me.Called(me.CheckMsg(ctx, msg, hnd), hnd, rev, confidence, timeout, me.MatchMsg(msg.VMMessage()))
|
return me.Called(ctx, me.CheckMsg(msg, hnd), hnd, rev, confidence, timeout, me.MatchMsg(msg.VMMessage()))
|
||||||
}
|
}
|
||||||
|
@ -11,199 +11,235 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
type heightEvents struct {
|
type heightHandler struct {
|
||||||
lk sync.Mutex
|
ts *types.TipSet
|
||||||
tsc *tipSetCache
|
height abi.ChainEpoch
|
||||||
gcConfidence abi.ChainEpoch
|
called bool
|
||||||
|
|
||||||
ctr triggerID
|
handle HeightHandler
|
||||||
|
revert RevertHandler
|
||||||
heightTriggers map[triggerID]*heightHandler
|
|
||||||
|
|
||||||
htTriggerHeights map[triggerH][]triggerID
|
|
||||||
htHeights map[msgH][]triggerID
|
|
||||||
|
|
||||||
ctx context.Context
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *heightEvents) headChangeAt(rev, app []*types.TipSet) error {
|
type heightEvents struct {
|
||||||
ctx, span := trace.StartSpan(e.ctx, "events.HeightHeadChange")
|
api EventAPI
|
||||||
defer span.End()
|
gcConfidence abi.ChainEpoch
|
||||||
span.AddAttributes(trace.Int64Attribute("endHeight", int64(app[0].Height())))
|
|
||||||
span.AddAttributes(trace.Int64Attribute("reverts", int64(len(rev))))
|
|
||||||
span.AddAttributes(trace.Int64Attribute("applies", int64(len(app))))
|
|
||||||
|
|
||||||
e.lk.Lock()
|
lk sync.Mutex
|
||||||
defer e.lk.Unlock()
|
head *types.TipSet
|
||||||
for _, ts := range rev {
|
tsHeights, triggerHeights map[abi.ChainEpoch][]*heightHandler
|
||||||
// TODO: log error if h below gcconfidence
|
lastGc abi.ChainEpoch //nolint:structcheck
|
||||||
// revert height-based triggers
|
}
|
||||||
|
|
||||||
revert := func(h abi.ChainEpoch, ts *types.TipSet) {
|
func newHeightEvents(api EventAPI, obs *observer, gcConfidence abi.ChainEpoch) *heightEvents {
|
||||||
for _, tid := range e.htHeights[h] {
|
he := &heightEvents{
|
||||||
ctx, span := trace.StartSpan(ctx, "events.HeightRevert")
|
api: api,
|
||||||
|
gcConfidence: gcConfidence,
|
||||||
rev := e.heightTriggers[tid].revert
|
tsHeights: map[abi.ChainEpoch][]*heightHandler{},
|
||||||
e.lk.Unlock()
|
triggerHeights: map[abi.ChainEpoch][]*heightHandler{},
|
||||||
err := rev(ctx, ts)
|
|
||||||
e.lk.Lock()
|
|
||||||
e.heightTriggers[tid].called = false
|
|
||||||
|
|
||||||
span.End()
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("reverting chain trigger (@H %d): %s", h, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
revert(ts.Height(), ts)
|
|
||||||
|
|
||||||
subh := ts.Height() - 1
|
|
||||||
for {
|
|
||||||
cts, err := e.tsc.get(subh)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if cts != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
revert(subh, ts)
|
|
||||||
subh--
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := e.tsc.revert(ts); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
he.lk.Lock()
|
||||||
for i := range app {
|
he.head = obs.Observe((*heightEventsObserver)(he))
|
||||||
ts := app[i]
|
he.lk.Unlock()
|
||||||
|
return he
|
||||||
if err := e.tsc.add(ts); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// height triggers
|
|
||||||
|
|
||||||
apply := func(h abi.ChainEpoch, ts *types.TipSet) error {
|
|
||||||
for _, tid := range e.htTriggerHeights[h] {
|
|
||||||
hnd := e.heightTriggers[tid]
|
|
||||||
if hnd.called {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
triggerH := h - abi.ChainEpoch(hnd.confidence)
|
|
||||||
|
|
||||||
incTs, err := e.tsc.getNonNull(triggerH)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, span := trace.StartSpan(ctx, "events.HeightApply")
|
|
||||||
span.AddAttributes(trace.BoolAttribute("immediate", false))
|
|
||||||
handle := hnd.handle
|
|
||||||
e.lk.Unlock()
|
|
||||||
err = handle(ctx, incTs, h)
|
|
||||||
e.lk.Lock()
|
|
||||||
hnd.called = true
|
|
||||||
span.End()
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("chain trigger (@H %d, called @ %d) failed: %+v", triggerH, ts.Height(), err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := apply(ts.Height(), ts); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
subh := ts.Height() - 1
|
|
||||||
for {
|
|
||||||
cts, err := e.tsc.get(subh)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if cts != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := apply(subh, ts); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
subh--
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChainAt invokes the specified `HeightHandler` when the chain reaches the
|
// ChainAt invokes the specified `HeightHandler` when the chain reaches the
|
||||||
// specified height+confidence threshold. If the chain is rolled-back under the
|
// specified height+confidence threshold. If the chain is rolled-back under the
|
||||||
// specified height, `RevertHandler` will be called.
|
// specified height, `RevertHandler` will be called.
|
||||||
//
|
//
|
||||||
// ts passed to handlers is the tipset at the specified, or above, if lower tipsets were null
|
// ts passed to handlers is the tipset at the specified epoch, or above if lower tipsets were null.
|
||||||
func (e *heightEvents) ChainAt(hnd HeightHandler, rev RevertHandler, confidence int, h abi.ChainEpoch) error {
|
//
|
||||||
e.lk.Lock() // Tricky locking, check your locks if you modify this function!
|
// The context governs cancellations of this call, it won't cancel the event handler.
|
||||||
|
func (e *heightEvents) ChainAt(ctx context.Context, hnd HeightHandler, rev RevertHandler, confidence int, h abi.ChainEpoch) error {
|
||||||
best, err := e.tsc.best()
|
if abi.ChainEpoch(confidence) > e.gcConfidence {
|
||||||
if err != nil {
|
// Need this to be able to GC effectively.
|
||||||
e.lk.Unlock()
|
return xerrors.Errorf("confidence cannot be greater than gcConfidence: %d > %d", confidence, e.gcConfidence)
|
||||||
return xerrors.Errorf("error getting best tipset: %w", err)
|
|
||||||
}
|
}
|
||||||
|
handler := &heightHandler{
|
||||||
bestH := best.Height()
|
height: h,
|
||||||
if bestH >= h+abi.ChainEpoch(confidence) {
|
|
||||||
ts, err := e.tsc.getNonNull(h)
|
|
||||||
if err != nil {
|
|
||||||
log.Warnf("events.ChainAt: calling HandleFunc with nil tipset, not found in cache: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
e.lk.Unlock()
|
|
||||||
ctx, span := trace.StartSpan(e.ctx, "events.HeightApply")
|
|
||||||
span.AddAttributes(trace.BoolAttribute("immediate", true))
|
|
||||||
|
|
||||||
err = hnd(ctx, ts, bestH)
|
|
||||||
span.End()
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
e.lk.Lock()
|
|
||||||
best, err = e.tsc.best()
|
|
||||||
if err != nil {
|
|
||||||
e.lk.Unlock()
|
|
||||||
return xerrors.Errorf("error getting best tipset: %w", err)
|
|
||||||
}
|
|
||||||
bestH = best.Height()
|
|
||||||
}
|
|
||||||
|
|
||||||
defer e.lk.Unlock()
|
|
||||||
|
|
||||||
if bestH >= h+abi.ChainEpoch(confidence)+e.gcConfidence {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
triggerAt := h + abi.ChainEpoch(confidence)
|
|
||||||
|
|
||||||
id := e.ctr
|
|
||||||
e.ctr++
|
|
||||||
|
|
||||||
e.heightTriggers[id] = &heightHandler{
|
|
||||||
confidence: confidence,
|
|
||||||
|
|
||||||
handle: hnd,
|
handle: hnd,
|
||||||
revert: rev,
|
revert: rev,
|
||||||
}
|
}
|
||||||
|
triggerAt := h + abi.ChainEpoch(confidence)
|
||||||
|
|
||||||
e.htHeights[h] = append(e.htHeights[h], id)
|
// Here we try to jump onto a moving train. To avoid stopping the train, we release the lock
|
||||||
e.htTriggerHeights[triggerAt] = append(e.htTriggerHeights[triggerAt], id)
|
// while calling the API and/or the trigger functions. Unfortunately, it's entirely possible
|
||||||
|
// (although unlikely) to go back and forth across the trigger heights, so we need to keep
|
||||||
|
// going back and forth here till we're synced.
|
||||||
|
//
|
||||||
|
// TODO: Consider using a worker goroutine so we can just drop the handler in a channel? The
|
||||||
|
// downside is that we'd either need a tipset cache, or we'd need to potentially fetch
|
||||||
|
// tipsets in-line inside the event loop.
|
||||||
|
e.lk.Lock()
|
||||||
|
for {
|
||||||
|
head := e.head
|
||||||
|
if head.Height() >= h {
|
||||||
|
// Head is past the handler height. We at least need to stash the tipset to
|
||||||
|
// avoid doing this from the main event loop.
|
||||||
|
e.lk.Unlock()
|
||||||
|
|
||||||
|
var ts *types.TipSet
|
||||||
|
if head.Height() == h {
|
||||||
|
ts = head
|
||||||
|
} else {
|
||||||
|
var err error
|
||||||
|
ts, err = e.api.ChainGetTipSetAfterHeight(ctx, handler.height, head.Key())
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("events.ChainAt: failed to get tipset: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we've applied the handler on the wrong tipset, revert.
|
||||||
|
if handler.called && !ts.Equals(handler.ts) {
|
||||||
|
ctx, span := trace.StartSpan(ctx, "events.HeightRevert")
|
||||||
|
span.AddAttributes(trace.BoolAttribute("immediate", true))
|
||||||
|
err := handler.revert(ctx, handler.ts)
|
||||||
|
span.End()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
handler.called = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save the tipset.
|
||||||
|
handler.ts = ts
|
||||||
|
|
||||||
|
// If we've reached confidence and haven't called, call.
|
||||||
|
if !handler.called && head.Height() >= triggerAt {
|
||||||
|
ctx, span := trace.StartSpan(ctx, "events.HeightApply")
|
||||||
|
span.AddAttributes(trace.BoolAttribute("immediate", true))
|
||||||
|
err := handler.handle(ctx, handler.ts, head.Height())
|
||||||
|
span.End()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
handler.called = true
|
||||||
|
|
||||||
|
// If we've reached gcConfidence, return without saving anything.
|
||||||
|
if head.Height() >= h+e.gcConfidence {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
e.lk.Lock()
|
||||||
|
} else if handler.called {
|
||||||
|
// We're not passed the head (anymore) but have applied the handler. Revert, try again.
|
||||||
|
e.lk.Unlock()
|
||||||
|
ctx, span := trace.StartSpan(ctx, "events.HeightRevert")
|
||||||
|
span.AddAttributes(trace.BoolAttribute("immediate", true))
|
||||||
|
err := handler.revert(ctx, handler.ts)
|
||||||
|
span.End()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
handler.called = false
|
||||||
|
e.lk.Lock()
|
||||||
|
} // otherwise, we changed heads but the change didn't matter.
|
||||||
|
|
||||||
|
// If we managed to get through this without the head changing, we're finally done.
|
||||||
|
if head.Equals(e.head) {
|
||||||
|
e.triggerHeights[triggerAt] = append(e.triggerHeights[triggerAt], handler)
|
||||||
|
e.tsHeights[h] = append(e.tsHeights[h], handler)
|
||||||
|
e.lk.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Updates the head and garbage collects if we're 2x over our garbage collection confidence period.
|
||||||
|
func (e *heightEventsObserver) updateHead(h *types.TipSet) {
|
||||||
|
e.lk.Lock()
|
||||||
|
defer e.lk.Unlock()
|
||||||
|
e.head = h
|
||||||
|
|
||||||
|
if e.head.Height() < e.lastGc+e.gcConfidence*2 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
e.lastGc = h.Height()
|
||||||
|
|
||||||
|
targetGcHeight := e.head.Height() - e.gcConfidence
|
||||||
|
for h := range e.tsHeights {
|
||||||
|
if h >= targetGcHeight {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
delete(e.tsHeights, h)
|
||||||
|
}
|
||||||
|
for h := range e.triggerHeights {
|
||||||
|
if h >= targetGcHeight {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
delete(e.triggerHeights, h)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type heightEventsObserver heightEvents
|
||||||
|
|
||||||
|
func (e *heightEventsObserver) Revert(ctx context.Context, from, to *types.TipSet) error {
|
||||||
|
// Update the head first so we don't accidental skip reverting a concurrent call to ChainAt.
|
||||||
|
e.updateHead(to)
|
||||||
|
|
||||||
|
// Call revert on all hights between the two tipsets, handling empty tipsets.
|
||||||
|
for h := from.Height(); h > to.Height(); h-- {
|
||||||
|
e.lk.Lock()
|
||||||
|
triggers := e.tsHeights[h]
|
||||||
|
e.lk.Unlock()
|
||||||
|
|
||||||
|
// 1. Triggers are only invoked from the global event loop, we don't need to hold the lock while calling.
|
||||||
|
// 2. We only ever append to or replace the trigger slice, so it's safe to iterate over it without the lock.
|
||||||
|
for _, handler := range triggers {
|
||||||
|
handler.ts = nil // invalidate
|
||||||
|
if !handler.called {
|
||||||
|
// We haven't triggered this yet, or there has been a concurrent call to ChainAt.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ctx, span := trace.StartSpan(ctx, "events.HeightRevert")
|
||||||
|
err := handler.revert(ctx, from)
|
||||||
|
span.End()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("reverting chain trigger (@H %d): %s", h, err)
|
||||||
|
}
|
||||||
|
handler.called = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *heightEventsObserver) Apply(ctx context.Context, from, to *types.TipSet) error {
|
||||||
|
// Update the head first so we don't accidental skip applying a concurrent call to ChainAt.
|
||||||
|
e.updateHead(to)
|
||||||
|
|
||||||
|
for h := from.Height() + 1; h <= to.Height(); h++ {
|
||||||
|
e.lk.Lock()
|
||||||
|
triggers := e.triggerHeights[h]
|
||||||
|
tipsets := e.tsHeights[h]
|
||||||
|
e.lk.Unlock()
|
||||||
|
|
||||||
|
// Stash the tipset for future triggers.
|
||||||
|
for _, handler := range tipsets {
|
||||||
|
handler.ts = to
|
||||||
|
}
|
||||||
|
|
||||||
|
// Trigger the ready triggers.
|
||||||
|
for _, handler := range triggers {
|
||||||
|
if handler.called {
|
||||||
|
// We may have reverted past the trigger point, but not past the call point.
|
||||||
|
// Or there has been a concurrent call to ChainAt.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, span := trace.StartSpan(ctx, "events.HeightApply")
|
||||||
|
span.AddAttributes(trace.BoolAttribute("immediate", false))
|
||||||
|
err := handler.handle(ctx, handler.ts, h)
|
||||||
|
span.End()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("chain trigger (@H %d, called @ %d) failed: %+v", h, to.Height(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
handler.called = true
|
||||||
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
File diff suppressed because it is too large
Load Diff
42
chain/events/message_cache.go
Normal file
42
chain/events/message_cache.go
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
package events
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
lru "github.com/hashicorp/golang-lru"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
)
|
||||||
|
|
||||||
|
type messageCache struct {
|
||||||
|
api EventAPI
|
||||||
|
|
||||||
|
blockMsgLk sync.Mutex
|
||||||
|
blockMsgCache *lru.ARCCache
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMessageCache(api EventAPI) *messageCache {
|
||||||
|
blsMsgCache, _ := lru.NewARC(500)
|
||||||
|
|
||||||
|
return &messageCache{
|
||||||
|
api: api,
|
||||||
|
blockMsgCache: blsMsgCache,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *messageCache) ChainGetBlockMessages(ctx context.Context, blkCid cid.Cid) (*api.BlockMessages, error) {
|
||||||
|
c.blockMsgLk.Lock()
|
||||||
|
defer c.blockMsgLk.Unlock()
|
||||||
|
|
||||||
|
msgsI, ok := c.blockMsgCache.Get(blkCid)
|
||||||
|
var err error
|
||||||
|
if !ok {
|
||||||
|
msgsI, err = c.api.ChainGetBlockMessages(ctx, blkCid)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
c.blockMsgCache.Add(blkCid, msgsI)
|
||||||
|
}
|
||||||
|
return msgsI.(*api.BlockMessages), nil
|
||||||
|
}
|
255
chain/events/observer.go
Normal file
255
chain/events/observer.go
Normal file
@ -0,0 +1,255 @@
|
|||||||
|
package events
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"go.opencensus.io/trace"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
"github.com/filecoin-project/lotus/chain/store"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type observer struct {
|
||||||
|
api EventAPI
|
||||||
|
|
||||||
|
gcConfidence abi.ChainEpoch
|
||||||
|
|
||||||
|
ready chan struct{}
|
||||||
|
|
||||||
|
lk sync.Mutex
|
||||||
|
head *types.TipSet
|
||||||
|
maxHeight abi.ChainEpoch
|
||||||
|
observers []TipSetObserver
|
||||||
|
}
|
||||||
|
|
||||||
|
func newObserver(api *cache, gcConfidence abi.ChainEpoch) *observer {
|
||||||
|
obs := &observer{
|
||||||
|
api: api,
|
||||||
|
gcConfidence: gcConfidence,
|
||||||
|
|
||||||
|
ready: make(chan struct{}),
|
||||||
|
observers: []TipSetObserver{},
|
||||||
|
}
|
||||||
|
obs.Observe(api.observer())
|
||||||
|
return obs
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *observer) start(ctx context.Context) error {
|
||||||
|
go o.listenHeadChanges(ctx)
|
||||||
|
|
||||||
|
// Wait for the first tipset to be seen or bail if shutting down
|
||||||
|
select {
|
||||||
|
case <-o.ready:
|
||||||
|
return nil
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *observer) listenHeadChanges(ctx context.Context) {
|
||||||
|
for {
|
||||||
|
if err := o.listenHeadChangesOnce(ctx); err != nil {
|
||||||
|
log.Errorf("listen head changes errored: %s", err)
|
||||||
|
} else {
|
||||||
|
log.Warn("listenHeadChanges quit")
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-build.Clock.After(time.Second):
|
||||||
|
case <-ctx.Done():
|
||||||
|
log.Warnf("not restarting listenHeadChanges: context error: %s", ctx.Err())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("restarting listenHeadChanges")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *observer) listenHeadChangesOnce(ctx context.Context) error {
|
||||||
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
notifs, err := o.api.ChainNotify(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// Retry is handled by caller
|
||||||
|
return xerrors.Errorf("listenHeadChanges ChainNotify call failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var cur []*api.HeadChange
|
||||||
|
var ok bool
|
||||||
|
|
||||||
|
// Wait for first tipset or bail
|
||||||
|
select {
|
||||||
|
case cur, ok = <-notifs:
|
||||||
|
if !ok {
|
||||||
|
return xerrors.Errorf("notification channel closed")
|
||||||
|
}
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(cur) != 1 {
|
||||||
|
return xerrors.Errorf("unexpected initial head notification length: %d", len(cur))
|
||||||
|
}
|
||||||
|
|
||||||
|
if cur[0].Type != store.HCCurrent {
|
||||||
|
return xerrors.Errorf("expected first head notification type to be 'current', was '%s'", cur[0].Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
curHead := cur[0].Val
|
||||||
|
|
||||||
|
o.lk.Lock()
|
||||||
|
if o.head == nil {
|
||||||
|
o.head = curHead
|
||||||
|
close(o.ready)
|
||||||
|
}
|
||||||
|
startHead := o.head
|
||||||
|
o.lk.Unlock()
|
||||||
|
|
||||||
|
if !startHead.Equals(curHead) {
|
||||||
|
changes, err := o.api.ChainGetPath(ctx, startHead.Key(), curHead.Key())
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to get path from last applied tipset to head: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := o.applyChanges(ctx, changes); err != nil {
|
||||||
|
return xerrors.Errorf("failed catch-up head changes: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for changes := range notifs {
|
||||||
|
if err := o.applyChanges(ctx, changes); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *observer) applyChanges(ctx context.Context, changes []*api.HeadChange) error {
|
||||||
|
// Used to wait for a prior notification round to finish (by tests)
|
||||||
|
if len(changes) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var rev, app []*types.TipSet
|
||||||
|
for _, changes := range changes {
|
||||||
|
switch changes.Type {
|
||||||
|
case store.HCRevert:
|
||||||
|
rev = append(rev, changes.Val)
|
||||||
|
case store.HCApply:
|
||||||
|
app = append(app, changes.Val)
|
||||||
|
default:
|
||||||
|
log.Errorf("unexpected head change notification type: '%s'", changes.Type)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := o.headChange(ctx, rev, app); err != nil {
|
||||||
|
return xerrors.Errorf("failed to apply head changes: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *observer) headChange(ctx context.Context, rev, app []*types.TipSet) error {
|
||||||
|
ctx, span := trace.StartSpan(ctx, "events.HeadChange")
|
||||||
|
span.AddAttributes(trace.Int64Attribute("reverts", int64(len(rev))))
|
||||||
|
span.AddAttributes(trace.Int64Attribute("applies", int64(len(app))))
|
||||||
|
|
||||||
|
o.lk.Lock()
|
||||||
|
head := o.head
|
||||||
|
o.lk.Unlock()
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
span.AddAttributes(trace.Int64Attribute("endHeight", int64(head.Height())))
|
||||||
|
span.End()
|
||||||
|
}()
|
||||||
|
|
||||||
|
// NOTE: bailing out here if the head isn't what we expected is fine. We'll re-start the
|
||||||
|
// entire process and handle any strange reorgs.
|
||||||
|
for i, from := range rev {
|
||||||
|
if !from.Equals(head) {
|
||||||
|
return xerrors.Errorf(
|
||||||
|
"expected to revert %s (%d), reverting %s (%d)",
|
||||||
|
head.Key(), head.Height(), from.Key(), from.Height(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
var to *types.TipSet
|
||||||
|
if i+1 < len(rev) {
|
||||||
|
// If we have more reverts, the next revert is the next head.
|
||||||
|
to = rev[i+1]
|
||||||
|
} else {
|
||||||
|
// At the end of the revert sequenece, we need to lookup the joint tipset
|
||||||
|
// between the revert sequence and the apply sequence.
|
||||||
|
var err error
|
||||||
|
to, err = o.api.ChainGetTipSet(ctx, from.Parents())
|
||||||
|
if err != nil {
|
||||||
|
// Well, this sucks. We'll bail and restart.
|
||||||
|
return xerrors.Errorf("failed to get tipset when reverting due to a SetHeead: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the current observers and atomically set the head.
|
||||||
|
//
|
||||||
|
// 1. We need to get the observers every time in case some registered/deregistered.
|
||||||
|
// 2. We need to atomically set the head so new observers don't see events twice or
|
||||||
|
// skip them.
|
||||||
|
o.lk.Lock()
|
||||||
|
observers := o.observers
|
||||||
|
o.head = to
|
||||||
|
o.lk.Unlock()
|
||||||
|
|
||||||
|
for _, obs := range observers {
|
||||||
|
if err := obs.Revert(ctx, from, to); err != nil {
|
||||||
|
log.Errorf("observer %T failed to apply tipset %s (%d) with: %s", obs, from.Key(), from.Height(), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if to.Height() < o.maxHeight-o.gcConfidence {
|
||||||
|
log.Errorf("reverted past finality, from %d to %d", o.maxHeight, to.Height())
|
||||||
|
}
|
||||||
|
|
||||||
|
head = to
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, to := range app {
|
||||||
|
if to.Parents() != head.Key() {
|
||||||
|
return xerrors.Errorf(
|
||||||
|
"cannot apply %s (%d) with parents %s on top of %s (%d)",
|
||||||
|
to.Key(), to.Height(), to.Parents(), head.Key(), head.Height(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
o.lk.Lock()
|
||||||
|
observers := o.observers
|
||||||
|
o.head = to
|
||||||
|
o.lk.Unlock()
|
||||||
|
|
||||||
|
for _, obs := range observers {
|
||||||
|
if err := obs.Apply(ctx, head, to); err != nil {
|
||||||
|
log.Errorf("observer %T failed to revert tipset %s (%d) with: %s", obs, to.Key(), to.Height(), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if to.Height() > o.maxHeight {
|
||||||
|
o.maxHeight = to.Height()
|
||||||
|
}
|
||||||
|
|
||||||
|
head = to
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Observe registers the observer, and returns the current tipset. The observer is guaranteed to
|
||||||
|
// observe events starting at this tipset.
|
||||||
|
//
|
||||||
|
// Returns nil if the observer hasn't started yet (but still registers).
|
||||||
|
func (o *observer) Observe(obs TipSetObserver) *types.TipSet {
|
||||||
|
o.lk.Lock()
|
||||||
|
defer o.lk.Unlock()
|
||||||
|
o.observers = append(o.observers, obs)
|
||||||
|
return o.head
|
||||||
|
}
|
@ -11,7 +11,9 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type tsCacheAPI interface {
|
type tsCacheAPI interface {
|
||||||
|
ChainGetTipSetAfterHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error)
|
||||||
ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error)
|
ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error)
|
||||||
|
ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error)
|
||||||
ChainHead(context.Context) (*types.TipSet, error)
|
ChainHead(context.Context) (*types.TipSet, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -20,61 +22,157 @@ type tsCacheAPI interface {
|
|||||||
type tipSetCache struct {
|
type tipSetCache struct {
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
|
|
||||||
cache []*types.TipSet
|
byKey map[types.TipSetKey]*types.TipSet
|
||||||
start int
|
byHeight []*types.TipSet
|
||||||
len int
|
start int // chain head (end)
|
||||||
|
len int
|
||||||
|
|
||||||
storage tsCacheAPI
|
storage tsCacheAPI
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTSCache(cap abi.ChainEpoch, storage tsCacheAPI) *tipSetCache {
|
func newTSCache(storage tsCacheAPI, cap abi.ChainEpoch) *tipSetCache {
|
||||||
return &tipSetCache{
|
return &tipSetCache{
|
||||||
cache: make([]*types.TipSet, cap),
|
byKey: make(map[types.TipSetKey]*types.TipSet, cap),
|
||||||
start: 0,
|
byHeight: make([]*types.TipSet, cap),
|
||||||
len: 0,
|
start: 0,
|
||||||
|
len: 0,
|
||||||
|
|
||||||
storage: storage,
|
storage: storage,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
func (tsc *tipSetCache) ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) {
|
||||||
|
if ts, ok := tsc.byKey[tsk]; ok {
|
||||||
|
return ts, nil
|
||||||
|
}
|
||||||
|
return tsc.storage.ChainGetTipSet(ctx, tsk)
|
||||||
|
}
|
||||||
|
|
||||||
func (tsc *tipSetCache) add(ts *types.TipSet) error {
|
func (tsc *tipSetCache) ChainGetTipSetByHeight(ctx context.Context, height abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) {
|
||||||
|
return tsc.get(ctx, height, tsk, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tsc *tipSetCache) ChainGetTipSetAfterHeight(ctx context.Context, height abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) {
|
||||||
|
return tsc.get(ctx, height, tsk, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tsc *tipSetCache) get(ctx context.Context, height abi.ChainEpoch, tsk types.TipSetKey, prev bool) (*types.TipSet, error) {
|
||||||
|
fallback := tsc.storage.ChainGetTipSetAfterHeight
|
||||||
|
if prev {
|
||||||
|
fallback = tsc.storage.ChainGetTipSetByHeight
|
||||||
|
}
|
||||||
|
tsc.mu.RLock()
|
||||||
|
|
||||||
|
// Nothing in the cache?
|
||||||
|
if tsc.len == 0 {
|
||||||
|
tsc.mu.RUnlock()
|
||||||
|
log.Warnf("tipSetCache.get: cache is empty, requesting from storage (h=%d)", height)
|
||||||
|
return fallback(ctx, height, tsk)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resolve the head.
|
||||||
|
head := tsc.byHeight[tsc.start]
|
||||||
|
if !tsk.IsEmpty() {
|
||||||
|
// Not on this chain?
|
||||||
|
var ok bool
|
||||||
|
head, ok = tsc.byKey[tsk]
|
||||||
|
if !ok {
|
||||||
|
tsc.mu.RUnlock()
|
||||||
|
return fallback(ctx, height, tsk)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
headH := head.Height()
|
||||||
|
tailH := headH - abi.ChainEpoch(tsc.len)
|
||||||
|
|
||||||
|
if headH == height {
|
||||||
|
tsc.mu.RUnlock()
|
||||||
|
return head, nil
|
||||||
|
} else if headH < height {
|
||||||
|
tsc.mu.RUnlock()
|
||||||
|
// If the user doesn't pass a tsk, we assume "head" is the last tipset we processed.
|
||||||
|
return nil, xerrors.Errorf("requested epoch is in the future")
|
||||||
|
} else if height < tailH {
|
||||||
|
log.Warnf("tipSetCache.get: requested tipset not in cache, requesting from storage (h=%d; tail=%d)", height, tailH)
|
||||||
|
tsc.mu.RUnlock()
|
||||||
|
return fallback(ctx, height, head.Key())
|
||||||
|
}
|
||||||
|
|
||||||
|
direction := 1
|
||||||
|
if prev {
|
||||||
|
direction = -1
|
||||||
|
}
|
||||||
|
var ts *types.TipSet
|
||||||
|
for i := 0; i < tsc.len && ts == nil; i += direction {
|
||||||
|
ts = tsc.byHeight[normalModulo(tsc.start-int(headH-height)+i, len(tsc.byHeight))]
|
||||||
|
}
|
||||||
|
tsc.mu.RUnlock()
|
||||||
|
return ts, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tsc *tipSetCache) ChainHead(ctx context.Context) (*types.TipSet, error) {
|
||||||
|
tsc.mu.RLock()
|
||||||
|
best := tsc.byHeight[tsc.start]
|
||||||
|
tsc.mu.RUnlock()
|
||||||
|
if best == nil {
|
||||||
|
return tsc.storage.ChainHead(ctx)
|
||||||
|
}
|
||||||
|
return best, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tsc *tipSetCache) add(to *types.TipSet) error {
|
||||||
tsc.mu.Lock()
|
tsc.mu.Lock()
|
||||||
defer tsc.mu.Unlock()
|
defer tsc.mu.Unlock()
|
||||||
|
|
||||||
if tsc.len > 0 {
|
if tsc.len > 0 {
|
||||||
if tsc.cache[tsc.start].Height() >= ts.Height() {
|
best := tsc.byHeight[tsc.start]
|
||||||
return xerrors.Errorf("tipSetCache.add: expected new tipset height to be at least %d, was %d", tsc.cache[tsc.start].Height()+1, ts.Height())
|
if best.Height() >= to.Height() {
|
||||||
|
return xerrors.Errorf("tipSetCache.add: expected new tipset height to be at least %d, was %d", tsc.byHeight[tsc.start].Height()+1, to.Height())
|
||||||
|
}
|
||||||
|
if best.Key() != to.Parents() {
|
||||||
|
return xerrors.Errorf(
|
||||||
|
"tipSetCache.add: expected new tipset %s (%d) to follow %s (%d), its parents are %s",
|
||||||
|
to.Key(), to.Height(), best.Key(), best.Height(), best.Parents(),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
nextH := ts.Height()
|
nextH := to.Height()
|
||||||
if tsc.len > 0 {
|
if tsc.len > 0 {
|
||||||
nextH = tsc.cache[tsc.start].Height() + 1
|
nextH = tsc.byHeight[tsc.start].Height() + 1
|
||||||
}
|
}
|
||||||
|
|
||||||
// fill null blocks
|
// fill null blocks
|
||||||
for nextH != ts.Height() {
|
for nextH != to.Height() {
|
||||||
tsc.start = normalModulo(tsc.start+1, len(tsc.cache))
|
tsc.start = normalModulo(tsc.start+1, len(tsc.byHeight))
|
||||||
tsc.cache[tsc.start] = nil
|
was := tsc.byHeight[tsc.start]
|
||||||
if tsc.len < len(tsc.cache) {
|
if was != nil {
|
||||||
|
tsc.byHeight[tsc.start] = nil
|
||||||
|
delete(tsc.byKey, was.Key())
|
||||||
|
}
|
||||||
|
if tsc.len < len(tsc.byHeight) {
|
||||||
tsc.len++
|
tsc.len++
|
||||||
}
|
}
|
||||||
nextH++
|
nextH++
|
||||||
}
|
}
|
||||||
|
|
||||||
tsc.start = normalModulo(tsc.start+1, len(tsc.cache))
|
tsc.start = normalModulo(tsc.start+1, len(tsc.byHeight))
|
||||||
tsc.cache[tsc.start] = ts
|
was := tsc.byHeight[tsc.start]
|
||||||
if tsc.len < len(tsc.cache) {
|
if was != nil {
|
||||||
|
delete(tsc.byKey, was.Key())
|
||||||
|
}
|
||||||
|
tsc.byHeight[tsc.start] = to
|
||||||
|
if tsc.len < len(tsc.byHeight) {
|
||||||
tsc.len++
|
tsc.len++
|
||||||
}
|
}
|
||||||
|
tsc.byKey[to.Key()] = to
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tsc *tipSetCache) revert(ts *types.TipSet) error {
|
func (tsc *tipSetCache) revert(from *types.TipSet) error {
|
||||||
tsc.mu.Lock()
|
tsc.mu.Lock()
|
||||||
defer tsc.mu.Unlock()
|
defer tsc.mu.Unlock()
|
||||||
|
|
||||||
return tsc.revertUnlocked(ts)
|
return tsc.revertUnlocked(from)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tsc *tipSetCache) revertUnlocked(ts *types.TipSet) error {
|
func (tsc *tipSetCache) revertUnlocked(ts *types.TipSet) error {
|
||||||
@ -82,75 +180,35 @@ func (tsc *tipSetCache) revertUnlocked(ts *types.TipSet) error {
|
|||||||
return nil // this can happen, and it's fine
|
return nil // this can happen, and it's fine
|
||||||
}
|
}
|
||||||
|
|
||||||
if !tsc.cache[tsc.start].Equals(ts) {
|
was := tsc.byHeight[tsc.start]
|
||||||
|
|
||||||
|
if !was.Equals(ts) {
|
||||||
return xerrors.New("tipSetCache.revert: revert tipset didn't match cache head")
|
return xerrors.New("tipSetCache.revert: revert tipset didn't match cache head")
|
||||||
}
|
}
|
||||||
|
delete(tsc.byKey, was.Key())
|
||||||
|
|
||||||
tsc.cache[tsc.start] = nil
|
tsc.byHeight[tsc.start] = nil
|
||||||
tsc.start = normalModulo(tsc.start-1, len(tsc.cache))
|
tsc.start = normalModulo(tsc.start-1, len(tsc.byHeight))
|
||||||
tsc.len--
|
tsc.len--
|
||||||
|
|
||||||
_ = tsc.revertUnlocked(nil) // revert null block gap
|
_ = tsc.revertUnlocked(nil) // revert null block gap
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tsc *tipSetCache) getNonNull(height abi.ChainEpoch) (*types.TipSet, error) {
|
func (tsc *tipSetCache) observer() TipSetObserver {
|
||||||
for {
|
return (*tipSetCacheObserver)(tsc)
|
||||||
ts, err := tsc.get(height)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if ts != nil {
|
|
||||||
return ts, nil
|
|
||||||
}
|
|
||||||
height++
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tsc *tipSetCache) get(height abi.ChainEpoch) (*types.TipSet, error) {
|
type tipSetCacheObserver tipSetCache
|
||||||
tsc.mu.RLock()
|
|
||||||
|
|
||||||
if tsc.len == 0 {
|
var _ TipSetObserver = new(tipSetCacheObserver)
|
||||||
tsc.mu.RUnlock()
|
|
||||||
log.Warnf("tipSetCache.get: cache is empty, requesting from storage (h=%d)", height)
|
|
||||||
return tsc.storage.ChainGetTipSetByHeight(context.TODO(), height, types.EmptyTSK)
|
|
||||||
}
|
|
||||||
|
|
||||||
headH := tsc.cache[tsc.start].Height()
|
func (tsc *tipSetCacheObserver) Apply(_ context.Context, _, to *types.TipSet) error {
|
||||||
|
return (*tipSetCache)(tsc).add(to)
|
||||||
if height > headH {
|
|
||||||
tsc.mu.RUnlock()
|
|
||||||
return nil, xerrors.Errorf("tipSetCache.get: requested tipset not in cache (req: %d, cache head: %d)", height, headH)
|
|
||||||
}
|
|
||||||
|
|
||||||
clen := len(tsc.cache)
|
|
||||||
var tail *types.TipSet
|
|
||||||
for i := 1; i <= tsc.len; i++ {
|
|
||||||
tail = tsc.cache[normalModulo(tsc.start-tsc.len+i, clen)]
|
|
||||||
if tail != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if height < tail.Height() {
|
|
||||||
tsc.mu.RUnlock()
|
|
||||||
log.Warnf("tipSetCache.get: requested tipset not in cache, requesting from storage (h=%d; tail=%d)", height, tail.Height())
|
|
||||||
return tsc.storage.ChainGetTipSetByHeight(context.TODO(), height, tail.Key())
|
|
||||||
}
|
|
||||||
|
|
||||||
ts := tsc.cache[normalModulo(tsc.start-int(headH-height), clen)]
|
|
||||||
tsc.mu.RUnlock()
|
|
||||||
return ts, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tsc *tipSetCache) best() (*types.TipSet, error) {
|
func (tsc *tipSetCacheObserver) Revert(ctx context.Context, from, _ *types.TipSet) error {
|
||||||
tsc.mu.RLock()
|
return (*tipSetCache)(tsc).revert(from)
|
||||||
best := tsc.cache[tsc.start]
|
|
||||||
tsc.mu.RUnlock()
|
|
||||||
if best == nil {
|
|
||||||
return tsc.storage.ChainHead(context.TODO())
|
|
||||||
}
|
|
||||||
return best, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func normalModulo(n, m int) int {
|
func normalModulo(n, m int) int {
|
||||||
|
@ -6,61 +6,22 @@ import (
|
|||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-state-types/crypto"
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestTsCache(t *testing.T) {
|
|
||||||
tsc := newTSCache(50, &tsCacheAPIFailOnStorageCall{t: t})
|
|
||||||
|
|
||||||
h := abi.ChainEpoch(75)
|
|
||||||
|
|
||||||
a, _ := address.NewFromString("t00")
|
|
||||||
|
|
||||||
add := func() {
|
|
||||||
ts, err := types.NewTipSet([]*types.BlockHeader{{
|
|
||||||
Miner: a,
|
|
||||||
Height: h,
|
|
||||||
ParentStateRoot: dummyCid,
|
|
||||||
Messages: dummyCid,
|
|
||||||
ParentMessageReceipts: dummyCid,
|
|
||||||
BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS},
|
|
||||||
BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS},
|
|
||||||
}})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if err := tsc.add(ts); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
h++
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < 9000; i++ {
|
|
||||||
if i%90 > 60 {
|
|
||||||
best, err := tsc.best()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err, "; i:", i)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err := tsc.revert(best); err != nil {
|
|
||||||
t.Fatal(err, "; i:", i)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h--
|
|
||||||
} else {
|
|
||||||
add()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
type tsCacheAPIFailOnStorageCall struct {
|
type tsCacheAPIFailOnStorageCall struct {
|
||||||
t *testing.T
|
t *testing.T
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (tc *tsCacheAPIFailOnStorageCall) ChainGetTipSetAfterHeight(ctx context.Context, epoch abi.ChainEpoch, key types.TipSetKey) (*types.TipSet, error) {
|
||||||
|
tc.t.Fatal("storage call")
|
||||||
|
return &types.TipSet{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (tc *tsCacheAPIFailOnStorageCall) ChainGetTipSetByHeight(ctx context.Context, epoch abi.ChainEpoch, key types.TipSetKey) (*types.TipSet, error) {
|
func (tc *tsCacheAPIFailOnStorageCall) ChainGetTipSetByHeight(ctx context.Context, epoch abi.ChainEpoch, key types.TipSetKey) (*types.TipSet, error) {
|
||||||
tc.t.Fatal("storage call")
|
tc.t.Fatal("storage call")
|
||||||
return &types.TipSet{}, nil
|
return &types.TipSet{}, nil
|
||||||
@ -69,100 +30,181 @@ func (tc *tsCacheAPIFailOnStorageCall) ChainHead(ctx context.Context) (*types.Ti
|
|||||||
tc.t.Fatal("storage call")
|
tc.t.Fatal("storage call")
|
||||||
return &types.TipSet{}, nil
|
return &types.TipSet{}, nil
|
||||||
}
|
}
|
||||||
|
func (tc *tsCacheAPIFailOnStorageCall) ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) {
|
||||||
|
tc.t.Fatal("storage call")
|
||||||
|
return &types.TipSet{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type cacheHarness struct {
|
||||||
|
t *testing.T
|
||||||
|
|
||||||
|
miner address.Address
|
||||||
|
tsc *tipSetCache
|
||||||
|
height abi.ChainEpoch
|
||||||
|
}
|
||||||
|
|
||||||
|
func newCacheharness(t *testing.T) *cacheHarness {
|
||||||
|
a, err := address.NewFromString("t00")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
h := &cacheHarness{
|
||||||
|
t: t,
|
||||||
|
tsc: newTSCache(&tsCacheAPIFailOnStorageCall{t: t}, 50),
|
||||||
|
height: 75,
|
||||||
|
miner: a,
|
||||||
|
}
|
||||||
|
h.addWithParents(nil)
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *cacheHarness) addWithParents(parents []cid.Cid) {
|
||||||
|
ts, err := types.NewTipSet([]*types.BlockHeader{{
|
||||||
|
Miner: h.miner,
|
||||||
|
Height: h.height,
|
||||||
|
ParentStateRoot: dummyCid,
|
||||||
|
Messages: dummyCid,
|
||||||
|
ParentMessageReceipts: dummyCid,
|
||||||
|
BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS},
|
||||||
|
BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS},
|
||||||
|
Parents: parents,
|
||||||
|
}})
|
||||||
|
require.NoError(h.t, err)
|
||||||
|
require.NoError(h.t, h.tsc.add(ts))
|
||||||
|
h.height++
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *cacheHarness) add() {
|
||||||
|
last, err := h.tsc.ChainHead(context.Background())
|
||||||
|
require.NoError(h.t, err)
|
||||||
|
h.addWithParents(last.Cids())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *cacheHarness) revert() {
|
||||||
|
best, err := h.tsc.ChainHead(context.Background())
|
||||||
|
require.NoError(h.t, err)
|
||||||
|
err = h.tsc.revert(best)
|
||||||
|
require.NoError(h.t, err)
|
||||||
|
h.height--
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *cacheHarness) skip(n abi.ChainEpoch) {
|
||||||
|
h.height += n
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTsCache(t *testing.T) {
|
||||||
|
h := newCacheharness(t)
|
||||||
|
|
||||||
|
for i := 0; i < 9000; i++ {
|
||||||
|
if i%90 > 60 {
|
||||||
|
h.revert()
|
||||||
|
} else {
|
||||||
|
h.add()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestTsCacheNulls(t *testing.T) {
|
func TestTsCacheNulls(t *testing.T) {
|
||||||
tsc := newTSCache(50, &tsCacheAPIFailOnStorageCall{t: t})
|
ctx := context.Background()
|
||||||
|
h := newCacheharness(t)
|
||||||
|
|
||||||
h := abi.ChainEpoch(75)
|
h.add()
|
||||||
|
h.add()
|
||||||
|
h.add()
|
||||||
|
h.skip(5)
|
||||||
|
|
||||||
a, _ := address.NewFromString("t00")
|
h.add()
|
||||||
add := func() {
|
h.add()
|
||||||
ts, err := types.NewTipSet([]*types.BlockHeader{{
|
|
||||||
Miner: a,
|
|
||||||
Height: h,
|
|
||||||
ParentStateRoot: dummyCid,
|
|
||||||
Messages: dummyCid,
|
|
||||||
ParentMessageReceipts: dummyCid,
|
|
||||||
BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS},
|
|
||||||
BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS},
|
|
||||||
}})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if err := tsc.add(ts); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
h++
|
|
||||||
}
|
|
||||||
|
|
||||||
add()
|
best, err := h.tsc.ChainHead(ctx)
|
||||||
add()
|
|
||||||
add()
|
|
||||||
h += 5
|
|
||||||
|
|
||||||
add()
|
|
||||||
add()
|
|
||||||
|
|
||||||
best, err := tsc.best()
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, h-1, best.Height())
|
require.Equal(t, h.height-1, best.Height())
|
||||||
|
|
||||||
ts, err := tsc.get(h - 1)
|
ts, err := h.tsc.ChainGetTipSetByHeight(ctx, h.height-1, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, h-1, ts.Height())
|
require.Equal(t, h.height-1, ts.Height())
|
||||||
|
|
||||||
ts, err = tsc.get(h - 2)
|
ts, err = h.tsc.ChainGetTipSetByHeight(ctx, h.height-2, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, h-2, ts.Height())
|
require.Equal(t, h.height-2, ts.Height())
|
||||||
|
|
||||||
ts, err = tsc.get(h - 3)
|
// Should skip the nulls and walk back to the last tipset.
|
||||||
|
ts, err = h.tsc.ChainGetTipSetByHeight(ctx, h.height-3, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Nil(t, ts)
|
require.Equal(t, h.height-8, ts.Height())
|
||||||
|
|
||||||
ts, err = tsc.get(h - 8)
|
ts, err = h.tsc.ChainGetTipSetByHeight(ctx, h.height-8, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, h-8, ts.Height())
|
require.Equal(t, h.height-8, ts.Height())
|
||||||
|
|
||||||
best, err = tsc.best()
|
best, err = h.tsc.ChainHead(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, tsc.revert(best))
|
require.NoError(t, h.tsc.revert(best))
|
||||||
|
|
||||||
best, err = tsc.best()
|
best, err = h.tsc.ChainHead(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, tsc.revert(best))
|
require.NoError(t, h.tsc.revert(best))
|
||||||
|
|
||||||
best, err = tsc.best()
|
best, err = h.tsc.ChainHead(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, h-8, best.Height())
|
require.Equal(t, h.height-8, best.Height())
|
||||||
|
|
||||||
h += 50
|
h.skip(50)
|
||||||
add()
|
h.add()
|
||||||
|
|
||||||
ts, err = tsc.get(h - 1)
|
ts, err = h.tsc.ChainGetTipSetByHeight(ctx, h.height-1, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, h-1, ts.Height())
|
require.Equal(t, h.height-1, ts.Height())
|
||||||
}
|
}
|
||||||
|
|
||||||
type tsCacheAPIStorageCallCounter struct {
|
type tsCacheAPIStorageCallCounter struct {
|
||||||
t *testing.T
|
t *testing.T
|
||||||
chainGetTipSetByHeight int
|
chainGetTipSetByHeight int
|
||||||
chainHead int
|
chainGetTipSetAfterHeight int
|
||||||
|
chainGetTipSet int
|
||||||
|
chainHead int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tc *tsCacheAPIStorageCallCounter) ChainGetTipSetByHeight(ctx context.Context, epoch abi.ChainEpoch, key types.TipSetKey) (*types.TipSet, error) {
|
func (tc *tsCacheAPIStorageCallCounter) ChainGetTipSetByHeight(ctx context.Context, epoch abi.ChainEpoch, key types.TipSetKey) (*types.TipSet, error) {
|
||||||
tc.chainGetTipSetByHeight++
|
tc.chainGetTipSetByHeight++
|
||||||
return &types.TipSet{}, nil
|
return &types.TipSet{}, nil
|
||||||
}
|
}
|
||||||
|
func (tc *tsCacheAPIStorageCallCounter) ChainGetTipSetAfterHeight(ctx context.Context, epoch abi.ChainEpoch, key types.TipSetKey) (*types.TipSet, error) {
|
||||||
|
tc.chainGetTipSetAfterHeight++
|
||||||
|
return &types.TipSet{}, nil
|
||||||
|
}
|
||||||
func (tc *tsCacheAPIStorageCallCounter) ChainHead(ctx context.Context) (*types.TipSet, error) {
|
func (tc *tsCacheAPIStorageCallCounter) ChainHead(ctx context.Context) (*types.TipSet, error) {
|
||||||
tc.chainHead++
|
tc.chainHead++
|
||||||
return &types.TipSet{}, nil
|
return &types.TipSet{}, nil
|
||||||
}
|
}
|
||||||
|
func (tc *tsCacheAPIStorageCallCounter) ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) {
|
||||||
|
tc.chainGetTipSet++
|
||||||
|
return &types.TipSet{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
func TestTsCacheEmpty(t *testing.T) {
|
func TestTsCacheEmpty(t *testing.T) {
|
||||||
// Calling best on an empty cache should just call out to the chain API
|
// Calling best on an empty cache should just call out to the chain API
|
||||||
callCounter := &tsCacheAPIStorageCallCounter{t: t}
|
callCounter := &tsCacheAPIStorageCallCounter{t: t}
|
||||||
tsc := newTSCache(50, callCounter)
|
tsc := newTSCache(callCounter, 50)
|
||||||
_, err := tsc.best()
|
_, err := tsc.ChainHead(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, callCounter.chainHead)
|
require.Equal(t, 1, callCounter.chainHead)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTsCacheSkip(t *testing.T) {
|
||||||
|
h := newCacheharness(t)
|
||||||
|
|
||||||
|
ts, err := types.NewTipSet([]*types.BlockHeader{{
|
||||||
|
Miner: h.miner,
|
||||||
|
Height: h.height,
|
||||||
|
ParentStateRoot: dummyCid,
|
||||||
|
Messages: dummyCid,
|
||||||
|
ParentMessageReceipts: dummyCid,
|
||||||
|
BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS},
|
||||||
|
BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS},
|
||||||
|
// With parents that don't match the last block.
|
||||||
|
Parents: nil,
|
||||||
|
}})
|
||||||
|
require.NoError(h.t, err)
|
||||||
|
err = h.tsc.add(ts)
|
||||||
|
require.Error(t, err)
|
||||||
|
}
|
||||||
|
@ -10,10 +10,10 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (me *messageEvents) CheckMsg(ctx context.Context, smsg types.ChainMsg, hnd MsgHandler) CheckFunc {
|
func (me *messageEvents) CheckMsg(smsg types.ChainMsg, hnd MsgHandler) CheckFunc {
|
||||||
msg := smsg.VMMessage()
|
msg := smsg.VMMessage()
|
||||||
|
|
||||||
return func(ts *types.TipSet) (done bool, more bool, err error) {
|
return func(ctx context.Context, ts *types.TipSet) (done bool, more bool, err error) {
|
||||||
fa, err := me.cs.StateGetActor(ctx, msg.From, ts.Key())
|
fa, err := me.cs.StateGetActor(ctx, msg.From, ts.Key())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, true, err
|
return false, true, err
|
||||||
@ -24,7 +24,7 @@ func (me *messageEvents) CheckMsg(ctx context.Context, smsg types.ChainMsg, hnd
|
|||||||
return false, true, nil
|
return false, true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
ml, err := me.cs.StateSearchMsg(me.ctx, ts.Key(), msg.Cid(), stmgr.LookbackNoLimit, true)
|
ml, err := me.cs.StateSearchMsg(ctx, ts.Key(), msg.Cid(), stmgr.LookbackNoLimit, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, true, xerrors.Errorf("getting receipt in CheckMsg: %w", err)
|
return false, true, xerrors.Errorf("getting receipt in CheckMsg: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -151,12 +151,20 @@ func (c *client) doRequest(
|
|||||||
// errors. Peer penalization should happen here then, before returning, so
|
// errors. Peer penalization should happen here then, before returning, so
|
||||||
// we can apply the correct penalties depending on the cause of the error.
|
// we can apply the correct penalties depending on the cause of the error.
|
||||||
// FIXME: Add the `peer` as argument once we implement penalties.
|
// FIXME: Add the `peer` as argument once we implement penalties.
|
||||||
func (c *client) processResponse(req *Request, res *Response, tipsets []*types.TipSet) (*validatedResponse, error) {
|
func (c *client) processResponse(req *Request, res *Response, tipsets []*types.TipSet) (r *validatedResponse, err error) {
|
||||||
err := res.statusToError()
|
err = res.statusToError()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("status error: %s", err)
|
return nil, xerrors.Errorf("status error: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if rerr := recover(); rerr != nil {
|
||||||
|
log.Errorf("process response error: %s", rerr)
|
||||||
|
err = xerrors.Errorf("process response error: %s", rerr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
options := parseOptions(req.Options)
|
options := parseOptions(req.Options)
|
||||||
if options.noOptionsSet() {
|
if options.noOptionsSet() {
|
||||||
// Safety check: this shouldn't have been sent, and even if it did
|
// Safety check: this shouldn't have been sent, and even if it did
|
||||||
|
@ -23,7 +23,6 @@ import (
|
|||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
"github.com/ipfs/go-merkledag"
|
"github.com/ipfs/go-merkledag"
|
||||||
"github.com/ipld/go-car"
|
"github.com/ipld/go-car"
|
||||||
"go.opencensus.io/trace"
|
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
|
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
|
||||||
@ -33,6 +32,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||||
"github.com/filecoin-project/lotus/chain/beacon"
|
"github.com/filecoin-project/lotus/chain/beacon"
|
||||||
|
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||||
genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis"
|
genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis"
|
||||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||||
"github.com/filecoin-project/lotus/chain/store"
|
"github.com/filecoin-project/lotus/chain/store"
|
||||||
@ -43,7 +43,6 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||||
"github.com/filecoin-project/lotus/genesis"
|
"github.com/filecoin-project/lotus/genesis"
|
||||||
"github.com/filecoin-project/lotus/journal"
|
"github.com/filecoin-project/lotus/journal"
|
||||||
"github.com/filecoin-project/lotus/lib/sigs"
|
|
||||||
"github.com/filecoin-project/lotus/node/repo"
|
"github.com/filecoin-project/lotus/node/repo"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -233,7 +232,7 @@ func NewGeneratorWithSectorsAndUpgradeSchedule(numSectors int, us stmgr.UpgradeS
|
|||||||
return nil, xerrors.Errorf("make genesis block failed: %w", err)
|
return nil, xerrors.Errorf("make genesis block failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cs := store.NewChainStore(bs, bs, ds, j)
|
cs := store.NewChainStore(bs, bs, ds, filcns.Weight, j)
|
||||||
|
|
||||||
genfb := &types.FullBlock{Header: genb.Genesis}
|
genfb := &types.FullBlock{Header: genb.Genesis}
|
||||||
gents := store.NewFullTipSet([]*types.FullBlock{genfb})
|
gents := store.NewFullTipSet([]*types.FullBlock{genfb})
|
||||||
@ -247,7 +246,7 @@ func NewGeneratorWithSectorsAndUpgradeSchedule(numSectors int, us stmgr.UpgradeS
|
|||||||
mgen[genesis2.MinerAddress(uint64(i))] = &wppProvider{}
|
mgen[genesis2.MinerAddress(uint64(i))] = &wppProvider{}
|
||||||
}
|
}
|
||||||
|
|
||||||
sm, err := stmgr.NewStateManagerWithUpgradeSchedule(cs, sys, us)
|
sm, err := stmgr.NewStateManager(cs, filcns.NewTipSetExecutor(), sys, us)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("initing stmgr: %w", err)
|
return nil, xerrors.Errorf("initing stmgr: %w", err)
|
||||||
}
|
}
|
||||||
@ -289,7 +288,7 @@ func NewGenerator() (*ChainGen, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) {
|
func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) {
|
||||||
return NewGeneratorWithSectorsAndUpgradeSchedule(numSectors, stmgr.DefaultUpgradeSchedule())
|
return NewGeneratorWithSectorsAndUpgradeSchedule(numSectors, filcns.DefaultUpgradeSchedule())
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewGeneratorWithUpgradeSchedule(us stmgr.UpgradeSchedule) (*ChainGen, error) {
|
func NewGeneratorWithUpgradeSchedule(us stmgr.UpgradeSchedule) (*ChainGen, error) {
|
||||||
@ -487,7 +486,7 @@ func (cg *ChainGen) makeBlock(parents *types.TipSet, m address.Address, vrfticke
|
|||||||
ts = parents.MinTimestamp() + uint64(height-parents.Height())*build.BlockDelaySecs
|
ts = parents.MinTimestamp() + uint64(height-parents.Height())*build.BlockDelaySecs
|
||||||
}
|
}
|
||||||
|
|
||||||
fblk, err := MinerCreateBlock(context.TODO(), cg.sm, cg.w, &api.BlockTemplate{
|
fblk, err := filcns.NewFilecoinExpectedConsensus(cg.sm, nil, nil, nil).CreateBlock(context.TODO(), cg.w, &api.BlockTemplate{
|
||||||
Miner: m,
|
Miner: m,
|
||||||
Parents: parents.Key(),
|
Parents: parents.Key(),
|
||||||
Ticket: vrfticket,
|
Ticket: vrfticket,
|
||||||
@ -667,22 +666,6 @@ func IsRoundWinner(ctx context.Context, ts *types.TipSet, round abi.ChainEpoch,
|
|||||||
|
|
||||||
type SignFunc func(context.Context, address.Address, []byte) (*crypto.Signature, error)
|
type SignFunc func(context.Context, address.Address, []byte) (*crypto.Signature, error)
|
||||||
|
|
||||||
func VerifyVRF(ctx context.Context, worker address.Address, vrfBase, vrfproof []byte) error {
|
|
||||||
_, span := trace.StartSpan(ctx, "VerifyVRF")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
sig := &crypto.Signature{
|
|
||||||
Type: crypto.SigTypeBLS,
|
|
||||||
Data: vrfproof,
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := sigs.Verify(sig, worker, vrfBase); err != nil {
|
|
||||||
return xerrors.Errorf("vrf was invalid: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func ComputeVRF(ctx context.Context, sign SignFunc, worker address.Address, sigInput []byte) ([]byte, error) {
|
func ComputeVRF(ctx context.Context, sign SignFunc, worker address.Address, sigInput []byte) ([]byte, error) {
|
||||||
sig, err := sign(ctx, worker, sigInput)
|
sig, err := sign(ctx, worker, sigInput)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -6,6 +6,7 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
|
verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
|
||||||
adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
|
adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||||
@ -222,7 +223,7 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
|
|||||||
return nil, nil, xerrors.Errorf("set verified registry actor: %w", err)
|
return nil, nil, xerrors.Errorf("set verified registry actor: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
bact, err := makeAccountActor(ctx, cst, av, builtin.BurntFundsActorAddr, big.Zero())
|
bact, err := MakeAccountActor(ctx, cst, av, builtin.BurntFundsActorAddr, big.Zero())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, xerrors.Errorf("setup burnt funds actor state: %w", err)
|
return nil, nil, xerrors.Errorf("setup burnt funds actor state: %w", err)
|
||||||
}
|
}
|
||||||
@ -235,7 +236,7 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
|
|||||||
|
|
||||||
switch info.Type {
|
switch info.Type {
|
||||||
case genesis.TAccount:
|
case genesis.TAccount:
|
||||||
if err := createAccountActor(ctx, cst, state, info, keyIDs, av); err != nil {
|
if err := CreateAccountActor(ctx, cst, state, info, keyIDs, av); err != nil {
|
||||||
return nil, nil, xerrors.Errorf("failed to create account actor: %w", err)
|
return nil, nil, xerrors.Errorf("failed to create account actor: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -247,7 +248,7 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
|
|||||||
}
|
}
|
||||||
idStart++
|
idStart++
|
||||||
|
|
||||||
if err := createMultisigAccount(ctx, cst, state, ida, info, keyIDs, av); err != nil {
|
if err := CreateMultisigAccount(ctx, cst, state, ida, info, keyIDs, av); err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
@ -268,7 +269,7 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
|
|||||||
return nil, nil, fmt.Errorf("rootkey account has already been declared, cannot be assigned 80: %s", ainfo.Owner)
|
return nil, nil, fmt.Errorf("rootkey account has already been declared, cannot be assigned 80: %s", ainfo.Owner)
|
||||||
}
|
}
|
||||||
|
|
||||||
vact, err := makeAccountActor(ctx, cst, av, ainfo.Owner, template.VerifregRootKey.Balance)
|
vact, err := MakeAccountActor(ctx, cst, av, ainfo.Owner, template.VerifregRootKey.Balance)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, xerrors.Errorf("setup verifreg rootkey account state: %w", err)
|
return nil, nil, xerrors.Errorf("setup verifreg rootkey account state: %w", err)
|
||||||
}
|
}
|
||||||
@ -276,7 +277,7 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
|
|||||||
return nil, nil, xerrors.Errorf("set verifreg rootkey account actor: %w", err)
|
return nil, nil, xerrors.Errorf("set verifreg rootkey account actor: %w", err)
|
||||||
}
|
}
|
||||||
case genesis.TMultisig:
|
case genesis.TMultisig:
|
||||||
if err = createMultisigAccount(ctx, cst, state, builtin.RootVerifierAddress, template.VerifregRootKey, keyIDs, av); err != nil {
|
if err = CreateMultisigAccount(ctx, cst, state, builtin.RootVerifierAddress, template.VerifregRootKey, keyIDs, av); err != nil {
|
||||||
return nil, nil, xerrors.Errorf("failed to set up verified registry signer: %w", err)
|
return nil, nil, xerrors.Errorf("failed to set up verified registry signer: %w", err)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
@ -305,7 +306,7 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
|
|||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
verifierAct, err := makeAccountActor(ctx, cst, av, verifierAd, big.Zero())
|
verifierAct, err := MakeAccountActor(ctx, cst, av, verifierAd, big.Zero())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, xerrors.Errorf("setup first verifier state: %w", err)
|
return nil, nil, xerrors.Errorf("setup first verifier state: %w", err)
|
||||||
}
|
}
|
||||||
@ -348,13 +349,13 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
|
|||||||
}
|
}
|
||||||
|
|
||||||
keyIDs[ainfo.Owner] = builtin.ReserveAddress
|
keyIDs[ainfo.Owner] = builtin.ReserveAddress
|
||||||
err = createAccountActor(ctx, cst, state, template.RemainderAccount, keyIDs, av)
|
err = CreateAccountActor(ctx, cst, state, template.RemainderAccount, keyIDs, av)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, xerrors.Errorf("creating remainder acct: %w", err)
|
return nil, nil, xerrors.Errorf("creating remainder acct: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
case genesis.TMultisig:
|
case genesis.TMultisig:
|
||||||
if err = createMultisigAccount(ctx, cst, state, builtin.ReserveAddress, template.RemainderAccount, keyIDs, av); err != nil {
|
if err = CreateMultisigAccount(ctx, cst, state, builtin.ReserveAddress, template.RemainderAccount, keyIDs, av); err != nil {
|
||||||
return nil, nil, xerrors.Errorf("failed to set up remainder: %w", err)
|
return nil, nil, xerrors.Errorf("failed to set up remainder: %w", err)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
@ -364,7 +365,7 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
|
|||||||
return state, keyIDs, nil
|
return state, keyIDs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeAccountActor(ctx context.Context, cst cbor.IpldStore, av actors.Version, addr address.Address, bal types.BigInt) (*types.Actor, error) {
|
func MakeAccountActor(ctx context.Context, cst cbor.IpldStore, av actors.Version, addr address.Address, bal types.BigInt) (*types.Actor, error) {
|
||||||
ast, err := account.MakeState(adt.WrapStore(ctx, cst), av, addr)
|
ast, err := account.MakeState(adt.WrapStore(ctx, cst), av, addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -389,13 +390,13 @@ func makeAccountActor(ctx context.Context, cst cbor.IpldStore, av actors.Version
|
|||||||
return act, nil
|
return act, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func createAccountActor(ctx context.Context, cst cbor.IpldStore, state *state.StateTree, info genesis.Actor, keyIDs map[address.Address]address.Address, av actors.Version) error {
|
func CreateAccountActor(ctx context.Context, cst cbor.IpldStore, state *state.StateTree, info genesis.Actor, keyIDs map[address.Address]address.Address, av actors.Version) error {
|
||||||
var ainfo genesis.AccountMeta
|
var ainfo genesis.AccountMeta
|
||||||
if err := json.Unmarshal(info.Meta, &ainfo); err != nil {
|
if err := json.Unmarshal(info.Meta, &ainfo); err != nil {
|
||||||
return xerrors.Errorf("unmarshaling account meta: %w", err)
|
return xerrors.Errorf("unmarshaling account meta: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
aa, err := makeAccountActor(ctx, cst, av, ainfo.Owner, info.Balance)
|
aa, err := MakeAccountActor(ctx, cst, av, ainfo.Owner, info.Balance)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -412,9 +413,9 @@ func createAccountActor(ctx context.Context, cst cbor.IpldStore, state *state.St
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func createMultisigAccount(ctx context.Context, cst cbor.IpldStore, state *state.StateTree, ida address.Address, info genesis.Actor, keyIDs map[address.Address]address.Address, av actors.Version) error {
|
func CreateMultisigAccount(ctx context.Context, cst cbor.IpldStore, state *state.StateTree, ida address.Address, info genesis.Actor, keyIDs map[address.Address]address.Address, av actors.Version) error {
|
||||||
if info.Type != genesis.TMultisig {
|
if info.Type != genesis.TMultisig {
|
||||||
return fmt.Errorf("can only call createMultisigAccount with multisig Actor info")
|
return fmt.Errorf("can only call CreateMultisigAccount with multisig Actor info")
|
||||||
}
|
}
|
||||||
var ainfo genesis.MultisigMeta
|
var ainfo genesis.MultisigMeta
|
||||||
if err := json.Unmarshal(info.Meta, &ainfo); err != nil {
|
if err := json.Unmarshal(info.Meta, &ainfo); err != nil {
|
||||||
@ -436,7 +437,7 @@ func createMultisigAccount(ctx context.Context, cst cbor.IpldStore, state *state
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
aa, err := makeAccountActor(ctx, cst, av, e, big.Zero())
|
aa, err := MakeAccountActor(ctx, cst, av, e, big.Zero())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -483,6 +484,7 @@ func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, sys vm.Sysca
|
|||||||
Epoch: 0,
|
Epoch: 0,
|
||||||
Rand: &fakeRand{},
|
Rand: &fakeRand{},
|
||||||
Bstore: cs.StateBlockstore(),
|
Bstore: cs.StateBlockstore(),
|
||||||
|
Actors: filcns.NewActorRegistry(),
|
||||||
Syscalls: mkFakedSigSyscalls(sys),
|
Syscalls: mkFakedSigSyscalls(sys),
|
||||||
CircSupplyCalc: nil,
|
CircSupplyCalc: nil,
|
||||||
NtwkVersion: func(_ context.Context, _ abi.ChainEpoch) network.Version {
|
NtwkVersion: func(_ context.Context, _ abi.ChainEpoch) network.Version {
|
||||||
@ -562,7 +564,7 @@ func MakeGenesisBlock(ctx context.Context, j journal.Journal, bs bstore.Blocksto
|
|||||||
}
|
}
|
||||||
|
|
||||||
// temp chainstore
|
// temp chainstore
|
||||||
cs := store.NewChainStore(bs, bs, datastore.NewMapDatastore(), j)
|
cs := store.NewChainStore(bs, bs, datastore.NewMapDatastore(), nil, j)
|
||||||
|
|
||||||
// Verify PreSealed Data
|
// Verify PreSealed Data
|
||||||
stateroot, err = VerifyPreSealedData(ctx, cs, sys, stateroot, template, keyIDs, template.NetworkVersion)
|
stateroot, err = VerifyPreSealedData(ctx, cs, sys, stateroot, template, keyIDs, template.NetworkVersion)
|
||||||
|
@ -37,6 +37,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
|
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/reward"
|
"github.com/filecoin-project/lotus/chain/actors/builtin/reward"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||||
|
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||||
"github.com/filecoin-project/lotus/chain/state"
|
"github.com/filecoin-project/lotus/chain/state"
|
||||||
"github.com/filecoin-project/lotus/chain/store"
|
"github.com/filecoin-project/lotus/chain/store"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
@ -87,6 +88,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.Syscal
|
|||||||
Epoch: 0,
|
Epoch: 0,
|
||||||
Rand: &fakeRand{},
|
Rand: &fakeRand{},
|
||||||
Bstore: cs.StateBlockstore(),
|
Bstore: cs.StateBlockstore(),
|
||||||
|
Actors: filcns.NewActorRegistry(),
|
||||||
Syscalls: mkFakedSigSyscalls(sys),
|
Syscalls: mkFakedSigSyscalls(sys),
|
||||||
CircSupplyCalc: csc,
|
CircSupplyCalc: csc,
|
||||||
NtwkVersion: func(_ context.Context, _ abi.ChainEpoch) network.Version {
|
NtwkVersion: func(_ context.Context, _ abi.ChainEpoch) network.Version {
|
||||||
|
@ -354,7 +354,7 @@ func (ms *msgSet) toSlice() []*types.SignedMessage {
|
|||||||
return set
|
return set
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName, j journal.Journal) (*MessagePool, error) {
|
func New(api Provider, ds dtypes.MetadataDS, us stmgr.UpgradeSchedule, netName dtypes.NetworkName, j journal.Journal) (*MessagePool, error) {
|
||||||
cache, _ := lru.New2Q(build.BlsSignatureCacheSize)
|
cache, _ := lru.New2Q(build.BlsSignatureCacheSize)
|
||||||
verifcache, _ := lru.New2Q(build.VerifSigCacheSize)
|
verifcache, _ := lru.New2Q(build.VerifSigCacheSize)
|
||||||
|
|
||||||
@ -366,7 +366,6 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName, j journ
|
|||||||
if j == nil {
|
if j == nil {
|
||||||
j = journal.NilJournal()
|
j = journal.NilJournal()
|
||||||
}
|
}
|
||||||
us := stmgr.DefaultUpgradeSchedule()
|
|
||||||
|
|
||||||
mp := &MessagePool{
|
mp := &MessagePool{
|
||||||
ds: ds,
|
ds: ds,
|
||||||
|
@ -11,17 +11,18 @@ import (
|
|||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
"github.com/ipfs/go-datastore"
|
"github.com/ipfs/go-datastore"
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||||
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
|
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/chain/types/mock"
|
"github.com/filecoin-project/lotus/chain/types/mock"
|
||||||
"github.com/filecoin-project/lotus/chain/wallet"
|
"github.com/filecoin-project/lotus/chain/wallet"
|
||||||
_ "github.com/filecoin-project/lotus/lib/sigs/bls"
|
_ "github.com/filecoin-project/lotus/lib/sigs/bls"
|
||||||
_ "github.com/filecoin-project/lotus/lib/sigs/secp"
|
_ "github.com/filecoin-project/lotus/lib/sigs/secp"
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -232,7 +233,7 @@ func TestMessagePool(t *testing.T) {
|
|||||||
|
|
||||||
ds := datastore.NewMapDatastore()
|
ds := datastore.NewMapDatastore()
|
||||||
|
|
||||||
mp, err := New(tma, ds, "mptest", nil)
|
mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -276,7 +277,7 @@ func TestCheckMessageBig(t *testing.T) {
|
|||||||
|
|
||||||
ds := datastore.NewMapDatastore()
|
ds := datastore.NewMapDatastore()
|
||||||
|
|
||||||
mp, err := New(tma, ds, "mptest", nil)
|
mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
to := mock.Address(1001)
|
to := mock.Address(1001)
|
||||||
@ -339,7 +340,7 @@ func TestMessagePoolMessagesInEachBlock(t *testing.T) {
|
|||||||
|
|
||||||
ds := datastore.NewMapDatastore()
|
ds := datastore.NewMapDatastore()
|
||||||
|
|
||||||
mp, err := New(tma, ds, "mptest", nil)
|
mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -388,7 +389,7 @@ func TestRevertMessages(t *testing.T) {
|
|||||||
|
|
||||||
ds := datastore.NewMapDatastore()
|
ds := datastore.NewMapDatastore()
|
||||||
|
|
||||||
mp, err := New(tma, ds, "mptest", nil)
|
mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -451,7 +452,7 @@ func TestPruningSimple(t *testing.T) {
|
|||||||
|
|
||||||
ds := datastore.NewMapDatastore()
|
ds := datastore.NewMapDatastore()
|
||||||
|
|
||||||
mp, err := New(tma, ds, "mptest", nil)
|
mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -495,7 +496,7 @@ func TestLoadLocal(t *testing.T) {
|
|||||||
tma := newTestMpoolAPI()
|
tma := newTestMpoolAPI()
|
||||||
ds := datastore.NewMapDatastore()
|
ds := datastore.NewMapDatastore()
|
||||||
|
|
||||||
mp, err := New(tma, ds, "mptest", nil)
|
mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -538,7 +539,7 @@ func TestLoadLocal(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
mp, err = New(tma, ds, "mptest", nil)
|
mp, err = New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -567,7 +568,7 @@ func TestClearAll(t *testing.T) {
|
|||||||
tma := newTestMpoolAPI()
|
tma := newTestMpoolAPI()
|
||||||
ds := datastore.NewMapDatastore()
|
ds := datastore.NewMapDatastore()
|
||||||
|
|
||||||
mp, err := New(tma, ds, "mptest", nil)
|
mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -621,7 +622,7 @@ func TestClearNonLocal(t *testing.T) {
|
|||||||
tma := newTestMpoolAPI()
|
tma := newTestMpoolAPI()
|
||||||
ds := datastore.NewMapDatastore()
|
ds := datastore.NewMapDatastore()
|
||||||
|
|
||||||
mp, err := New(tma, ds, "mptest", nil)
|
mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -682,7 +683,7 @@ func TestUpdates(t *testing.T) {
|
|||||||
tma := newTestMpoolAPI()
|
tma := newTestMpoolAPI()
|
||||||
ds := datastore.NewMapDatastore()
|
ds := datastore.NewMapDatastore()
|
||||||
|
|
||||||
mp, err := New(tma, ds, "mptest", nil)
|
mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -9,6 +9,7 @@ import (
|
|||||||
|
|
||||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||||
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
|
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/chain/wallet"
|
"github.com/filecoin-project/lotus/chain/wallet"
|
||||||
@ -24,7 +25,7 @@ func TestRepubMessages(t *testing.T) {
|
|||||||
tma := newTestMpoolAPI()
|
tma := newTestMpoolAPI()
|
||||||
ds := datastore.NewMapDatastore()
|
ds := datastore.NewMapDatastore()
|
||||||
|
|
||||||
mp, err := New(tma, ds, "mptest", nil)
|
mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -20,8 +20,6 @@ import (
|
|||||||
|
|
||||||
var bigBlockGasLimit = big.NewInt(build.BlockGasLimit)
|
var bigBlockGasLimit = big.NewInt(build.BlockGasLimit)
|
||||||
|
|
||||||
var MaxBlockMessages = 16000
|
|
||||||
|
|
||||||
const MaxBlocks = 15
|
const MaxBlocks = 15
|
||||||
|
|
||||||
type msgChain struct {
|
type msgChain struct {
|
||||||
@ -58,8 +56,8 @@ func (mp *MessagePool) SelectMessages(ctx context.Context, ts *types.TipSet, tq
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(msgs) > MaxBlockMessages {
|
if len(msgs) > build.BlockMessageLimit {
|
||||||
msgs = msgs[:MaxBlockMessages]
|
msgs = msgs[:build.BlockMessageLimit]
|
||||||
}
|
}
|
||||||
|
|
||||||
return msgs, nil
|
return msgs, nil
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||||
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
|
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/chain/types/mock"
|
"github.com/filecoin-project/lotus/chain/types/mock"
|
||||||
@ -60,7 +61,7 @@ func makeTestMessage(w *wallet.LocalWallet, from, to address.Address, nonce uint
|
|||||||
func makeTestMpool() (*MessagePool, *testMpoolAPI) {
|
func makeTestMpool() (*MessagePool, *testMpoolAPI) {
|
||||||
tma := newTestMpoolAPI()
|
tma := newTestMpoolAPI()
|
||||||
ds := datastore.NewMapDatastore()
|
ds := datastore.NewMapDatastore()
|
||||||
mp, err := New(tma, ds, "test", nil)
|
mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "test", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
@ -6,6 +6,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-state-types/crypto"
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
"go.opencensus.io/trace"
|
"go.opencensus.io/trace"
|
||||||
@ -20,41 +21,52 @@ import (
|
|||||||
|
|
||||||
var ErrExpensiveFork = errors.New("refusing explicit call due to state fork at epoch")
|
var ErrExpensiveFork = errors.New("refusing explicit call due to state fork at epoch")
|
||||||
|
|
||||||
|
// Call applies the given message to the given tipset's parent state, at the epoch following the
|
||||||
|
// tipset's parent. In the presence of null blocks, the height at which the message is invoked may
|
||||||
|
// be less than the specified tipset.
|
||||||
|
//
|
||||||
|
// - If no tipset is specified, the first tipset without an expensive migration is used.
|
||||||
|
// - If executing a message at a given tipset would trigger an expensive migration, the call will
|
||||||
|
// fail with ErrExpensiveFork.
|
||||||
func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types.TipSet) (*api.InvocResult, error) {
|
func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types.TipSet) (*api.InvocResult, error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "statemanager.Call")
|
ctx, span := trace.StartSpan(ctx, "statemanager.Call")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
|
var pheight abi.ChainEpoch = -1
|
||||||
|
|
||||||
// If no tipset is provided, try to find one without a fork.
|
// If no tipset is provided, try to find one without a fork.
|
||||||
if ts == nil {
|
if ts == nil {
|
||||||
ts = sm.cs.GetHeaviestTipSet()
|
ts = sm.cs.GetHeaviestTipSet()
|
||||||
|
|
||||||
// Search back till we find a height with no fork, or we reach the beginning.
|
// Search back till we find a height with no fork, or we reach the beginning.
|
||||||
for ts.Height() > 0 && sm.hasExpensiveFork(ctx, ts.Height()-1) {
|
for ts.Height() > 0 {
|
||||||
var err error
|
pts, err := sm.cs.GetTipSetFromKey(ts.Parents())
|
||||||
ts, err = sm.cs.GetTipSetFromKey(ts.Parents())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("failed to find a non-forking epoch: %w", err)
|
return nil, xerrors.Errorf("failed to find a non-forking epoch: %w", err)
|
||||||
}
|
}
|
||||||
|
if !sm.hasExpensiveFork(pts.Height()) {
|
||||||
|
pheight = pts.Height()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
ts = pts
|
||||||
}
|
}
|
||||||
|
} else if ts.Height() > 0 {
|
||||||
|
pts, err := sm.cs.LoadTipSet(ts.Parents())
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("failed to load parent tipset: %w", err)
|
||||||
|
}
|
||||||
|
pheight = pts.Height()
|
||||||
|
if sm.hasExpensiveFork(pheight) {
|
||||||
|
return nil, ErrExpensiveFork
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// We can't get the parent tipset in this case.
|
||||||
|
pheight = ts.Height() - 1
|
||||||
}
|
}
|
||||||
|
|
||||||
bstate := ts.ParentState()
|
bstate := ts.ParentState()
|
||||||
pts, err := sm.cs.LoadTipSet(ts.Parents())
|
|
||||||
if err != nil {
|
|
||||||
return nil, xerrors.Errorf("failed to load parent tipset: %w", err)
|
|
||||||
}
|
|
||||||
pheight := pts.Height()
|
|
||||||
|
|
||||||
// If we have to run an expensive migration, and we're not at genesis,
|
|
||||||
// return an error because the migration will take too long.
|
|
||||||
//
|
|
||||||
// We allow this at height 0 for at-genesis migrations (for testing).
|
|
||||||
if pheight > 0 && sm.hasExpensiveFork(ctx, pheight) {
|
|
||||||
return nil, ErrExpensiveFork
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run the (not expensive) migration.
|
// Run the (not expensive) migration.
|
||||||
bstate, err = sm.handleStateForks(ctx, bstate, pheight, nil, ts)
|
bstate, err := sm.HandleStateForks(ctx, bstate, pheight, nil, ts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to handle fork: %w", err)
|
return nil, fmt.Errorf("failed to handle fork: %w", err)
|
||||||
}
|
}
|
||||||
@ -64,7 +76,8 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types.
|
|||||||
Epoch: pheight + 1,
|
Epoch: pheight + 1,
|
||||||
Rand: store.NewChainRand(sm.cs, ts.Cids()),
|
Rand: store.NewChainRand(sm.cs, ts.Cids()),
|
||||||
Bstore: sm.cs.StateBlockstore(),
|
Bstore: sm.cs.StateBlockstore(),
|
||||||
Syscalls: sm.syscalls,
|
Actors: sm.tsExec.NewActorRegistry(),
|
||||||
|
Syscalls: sm.Syscalls,
|
||||||
CircSupplyCalc: sm.GetVMCirculatingSupply,
|
CircSupplyCalc: sm.GetVMCirculatingSupply,
|
||||||
NtwkVersion: sm.GetNtwkVersion,
|
NtwkVersion: sm.GetNtwkVersion,
|
||||||
BaseFee: types.NewInt(0),
|
BaseFee: types.NewInt(0),
|
||||||
@ -140,18 +153,25 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
|
|||||||
// run the fork logic in `sm.TipSetState`. We need the _current_
|
// run the fork logic in `sm.TipSetState`. We need the _current_
|
||||||
// height to have no fork, because we'll run it inside this
|
// height to have no fork, because we'll run it inside this
|
||||||
// function before executing the given message.
|
// function before executing the given message.
|
||||||
for ts.Height() > 0 && (sm.hasExpensiveFork(ctx, ts.Height()) || sm.hasExpensiveFork(ctx, ts.Height()-1)) {
|
for ts.Height() > 0 {
|
||||||
var err error
|
pts, err := sm.cs.GetTipSetFromKey(ts.Parents())
|
||||||
ts, err = sm.cs.GetTipSetFromKey(ts.Parents())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("failed to find a non-forking epoch: %w", err)
|
return nil, xerrors.Errorf("failed to find a non-forking epoch: %w", err)
|
||||||
}
|
}
|
||||||
}
|
if !sm.hasExpensiveForkBetween(pts.Height(), ts.Height()+1) {
|
||||||
}
|
break
|
||||||
|
}
|
||||||
|
|
||||||
// When we're not at the genesis block, make sure we don't have an expensive migration.
|
ts = pts
|
||||||
if ts.Height() > 0 && (sm.hasExpensiveFork(ctx, ts.Height()) || sm.hasExpensiveFork(ctx, ts.Height()-1)) {
|
}
|
||||||
return nil, ErrExpensiveFork
|
} else if ts.Height() > 0 {
|
||||||
|
pts, err := sm.cs.GetTipSetFromKey(ts.Parents())
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("failed to find a non-forking epoch: %w", err)
|
||||||
|
}
|
||||||
|
if sm.hasExpensiveForkBetween(pts.Height(), ts.Height()+1) {
|
||||||
|
return nil, ErrExpensiveFork
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
state, _, err := sm.TipSetState(ctx, ts)
|
state, _, err := sm.TipSetState(ctx, ts)
|
||||||
@ -159,7 +179,8 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
|
|||||||
return nil, xerrors.Errorf("computing tipset state: %w", err)
|
return nil, xerrors.Errorf("computing tipset state: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
state, err = sm.handleStateForks(ctx, state, ts.Height(), nil, ts)
|
// Technically, the tipset we're passing in here should be ts+1, but that may not exist.
|
||||||
|
state, err = sm.HandleStateForks(ctx, state, ts.Height(), nil, ts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to handle fork: %w", err)
|
return nil, fmt.Errorf("failed to handle fork: %w", err)
|
||||||
}
|
}
|
||||||
@ -179,7 +200,8 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
|
|||||||
Epoch: ts.Height() + 1,
|
Epoch: ts.Height() + 1,
|
||||||
Rand: r,
|
Rand: r,
|
||||||
Bstore: sm.cs.StateBlockstore(),
|
Bstore: sm.cs.StateBlockstore(),
|
||||||
Syscalls: sm.syscalls,
|
Actors: sm.tsExec.NewActorRegistry(),
|
||||||
|
Syscalls: sm.Syscalls,
|
||||||
CircSupplyCalc: sm.GetVMCirculatingSupply,
|
CircSupplyCalc: sm.GetVMCirculatingSupply,
|
||||||
NtwkVersion: sm.GetNtwkVersion,
|
NtwkVersion: sm.GetNtwkVersion,
|
||||||
BaseFee: ts.Blocks()[0].ParentBaseFee,
|
BaseFee: ts.Blocks()[0].ParentBaseFee,
|
||||||
@ -252,7 +274,7 @@ func (sm *StateManager) Replay(ctx context.Context, ts *types.TipSet, mcid cid.C
|
|||||||
// message to find
|
// message to find
|
||||||
finder.mcid = mcid
|
finder.mcid = mcid
|
||||||
|
|
||||||
_, _, err := sm.computeTipSetState(ctx, ts, &finder)
|
_, _, err := sm.tsExec.ExecuteTipSet(ctx, sm, ts, &finder)
|
||||||
if err != nil && !xerrors.Is(err, errHaltExecution) {
|
if err != nil && !xerrors.Is(err, errHaltExecution) {
|
||||||
return nil, nil, xerrors.Errorf("unexpected error during execution: %w", err)
|
return nil, nil, xerrors.Errorf("unexpected error during execution: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -3,218 +3,14 @@ package stmgr
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync/atomic"
|
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
cbg "github.com/whyrusleeping/cbor-gen"
|
|
||||||
"go.opencensus.io/stats"
|
|
||||||
"go.opencensus.io/trace"
|
"go.opencensus.io/trace"
|
||||||
"golang.org/x/xerrors"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
|
||||||
"github.com/filecoin-project/go-state-types/big"
|
|
||||||
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/build"
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors"
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/cron"
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/reward"
|
|
||||||
"github.com/filecoin-project/lotus/chain/store"
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/chain/vm"
|
|
||||||
"github.com/filecoin-project/lotus/metrics"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEpoch, pstate cid.Cid, bms []store.BlockMessages, epoch abi.ChainEpoch, r vm.Rand, em ExecMonitor, baseFee abi.TokenAmount, ts *types.TipSet) (cid.Cid, cid.Cid, error) {
|
|
||||||
done := metrics.Timer(ctx, metrics.VMApplyBlocksTotal)
|
|
||||||
defer done()
|
|
||||||
|
|
||||||
partDone := metrics.Timer(ctx, metrics.VMApplyEarly)
|
|
||||||
defer func() {
|
|
||||||
partDone()
|
|
||||||
}()
|
|
||||||
|
|
||||||
makeVmWithBaseState := func(base cid.Cid) (*vm.VM, error) {
|
|
||||||
vmopt := &vm.VMOpts{
|
|
||||||
StateBase: base,
|
|
||||||
Epoch: epoch,
|
|
||||||
Rand: r,
|
|
||||||
Bstore: sm.cs.StateBlockstore(),
|
|
||||||
Syscalls: sm.syscalls,
|
|
||||||
CircSupplyCalc: sm.GetVMCirculatingSupply,
|
|
||||||
NtwkVersion: sm.GetNtwkVersion,
|
|
||||||
BaseFee: baseFee,
|
|
||||||
LookbackState: LookbackStateGetterForTipset(sm, ts),
|
|
||||||
}
|
|
||||||
|
|
||||||
return sm.newVM(ctx, vmopt)
|
|
||||||
}
|
|
||||||
|
|
||||||
vmi, err := makeVmWithBaseState(pstate)
|
|
||||||
if err != nil {
|
|
||||||
return cid.Undef, cid.Undef, xerrors.Errorf("making vm: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
runCron := func(epoch abi.ChainEpoch) error {
|
|
||||||
cronMsg := &types.Message{
|
|
||||||
To: cron.Address,
|
|
||||||
From: builtin.SystemActorAddr,
|
|
||||||
Nonce: uint64(epoch),
|
|
||||||
Value: types.NewInt(0),
|
|
||||||
GasFeeCap: types.NewInt(0),
|
|
||||||
GasPremium: types.NewInt(0),
|
|
||||||
GasLimit: build.BlockGasLimit * 10000, // Make super sure this is never too little
|
|
||||||
Method: cron.Methods.EpochTick,
|
|
||||||
Params: nil,
|
|
||||||
}
|
|
||||||
ret, err := vmi.ApplyImplicitMessage(ctx, cronMsg)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if em != nil {
|
|
||||||
if err := em.MessageApplied(ctx, ts, cronMsg.Cid(), cronMsg, ret, true); err != nil {
|
|
||||||
return xerrors.Errorf("callback failed on cron message: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ret.ExitCode != 0 {
|
|
||||||
return xerrors.Errorf("CheckProofSubmissions exit was non-zero: %d", ret.ExitCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := parentEpoch; i < epoch; i++ {
|
|
||||||
if i > parentEpoch {
|
|
||||||
// run cron for null rounds if any
|
|
||||||
if err := runCron(i); err != nil {
|
|
||||||
return cid.Undef, cid.Undef, err
|
|
||||||
}
|
|
||||||
|
|
||||||
pstate, err = vmi.Flush(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return cid.Undef, cid.Undef, xerrors.Errorf("flushing vm: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// handle state forks
|
|
||||||
// XXX: The state tree
|
|
||||||
newState, err := sm.handleStateForks(ctx, pstate, i, em, ts)
|
|
||||||
if err != nil {
|
|
||||||
return cid.Undef, cid.Undef, xerrors.Errorf("error handling state forks: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if pstate != newState {
|
|
||||||
vmi, err = makeVmWithBaseState(newState)
|
|
||||||
if err != nil {
|
|
||||||
return cid.Undef, cid.Undef, xerrors.Errorf("making vm: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
vmi.SetBlockHeight(i + 1)
|
|
||||||
pstate = newState
|
|
||||||
}
|
|
||||||
|
|
||||||
partDone()
|
|
||||||
partDone = metrics.Timer(ctx, metrics.VMApplyMessages)
|
|
||||||
|
|
||||||
var receipts []cbg.CBORMarshaler
|
|
||||||
processedMsgs := make(map[cid.Cid]struct{})
|
|
||||||
for _, b := range bms {
|
|
||||||
penalty := types.NewInt(0)
|
|
||||||
gasReward := big.Zero()
|
|
||||||
|
|
||||||
for _, cm := range append(b.BlsMessages, b.SecpkMessages...) {
|
|
||||||
m := cm.VMMessage()
|
|
||||||
if _, found := processedMsgs[m.Cid()]; found {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
r, err := vmi.ApplyMessage(ctx, cm)
|
|
||||||
if err != nil {
|
|
||||||
return cid.Undef, cid.Undef, err
|
|
||||||
}
|
|
||||||
|
|
||||||
receipts = append(receipts, &r.MessageReceipt)
|
|
||||||
gasReward = big.Add(gasReward, r.GasCosts.MinerTip)
|
|
||||||
penalty = big.Add(penalty, r.GasCosts.MinerPenalty)
|
|
||||||
|
|
||||||
if em != nil {
|
|
||||||
if err := em.MessageApplied(ctx, ts, cm.Cid(), m, r, false); err != nil {
|
|
||||||
return cid.Undef, cid.Undef, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
processedMsgs[m.Cid()] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
params, err := actors.SerializeParams(&reward.AwardBlockRewardParams{
|
|
||||||
Miner: b.Miner,
|
|
||||||
Penalty: penalty,
|
|
||||||
GasReward: gasReward,
|
|
||||||
WinCount: b.WinCount,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return cid.Undef, cid.Undef, xerrors.Errorf("failed to serialize award params: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
rwMsg := &types.Message{
|
|
||||||
From: builtin.SystemActorAddr,
|
|
||||||
To: reward.Address,
|
|
||||||
Nonce: uint64(epoch),
|
|
||||||
Value: types.NewInt(0),
|
|
||||||
GasFeeCap: types.NewInt(0),
|
|
||||||
GasPremium: types.NewInt(0),
|
|
||||||
GasLimit: 1 << 30,
|
|
||||||
Method: reward.Methods.AwardBlockReward,
|
|
||||||
Params: params,
|
|
||||||
}
|
|
||||||
ret, actErr := vmi.ApplyImplicitMessage(ctx, rwMsg)
|
|
||||||
if actErr != nil {
|
|
||||||
return cid.Undef, cid.Undef, xerrors.Errorf("failed to apply reward message for miner %s: %w", b.Miner, actErr)
|
|
||||||
}
|
|
||||||
if em != nil {
|
|
||||||
if err := em.MessageApplied(ctx, ts, rwMsg.Cid(), rwMsg, ret, true); err != nil {
|
|
||||||
return cid.Undef, cid.Undef, xerrors.Errorf("callback failed on reward message: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if ret.ExitCode != 0 {
|
|
||||||
return cid.Undef, cid.Undef, xerrors.Errorf("reward application message failed (exit %d): %s", ret.ExitCode, ret.ActorErr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
partDone()
|
|
||||||
partDone = metrics.Timer(ctx, metrics.VMApplyCron)
|
|
||||||
|
|
||||||
if err := runCron(epoch); err != nil {
|
|
||||||
return cid.Cid{}, cid.Cid{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
partDone()
|
|
||||||
partDone = metrics.Timer(ctx, metrics.VMApplyFlush)
|
|
||||||
|
|
||||||
rectarr := blockadt.MakeEmptyArray(sm.cs.ActorStore(ctx))
|
|
||||||
for i, receipt := range receipts {
|
|
||||||
if err := rectarr.Set(uint64(i), receipt); err != nil {
|
|
||||||
return cid.Undef, cid.Undef, xerrors.Errorf("failed to build receipts amt: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rectroot, err := rectarr.Root()
|
|
||||||
if err != nil {
|
|
||||||
return cid.Undef, cid.Undef, xerrors.Errorf("failed to build receipts amt: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
st, err := vmi.Flush(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return cid.Undef, cid.Undef, xerrors.Errorf("vm flush failed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
stats.Record(ctx, metrics.VMSends.M(int64(atomic.LoadUint64(&vm.StatSends))),
|
|
||||||
metrics.VMApplied.M(int64(atomic.LoadUint64(&vm.StatApplied))))
|
|
||||||
|
|
||||||
return st, rectroot, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st cid.Cid, rec cid.Cid, err error) {
|
func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st cid.Cid, rec cid.Cid, err error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "tipSetState")
|
ctx, span := trace.StartSpan(ctx, "tipSetState")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
@ -264,7 +60,7 @@ func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st c
|
|||||||
return ts.Blocks()[0].ParentStateRoot, ts.Blocks()[0].ParentMessageReceipts, nil
|
return ts.Blocks()[0].ParentStateRoot, ts.Blocks()[0].ParentMessageReceipts, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
st, rec, err = sm.computeTipSetState(ctx, ts, sm.tsExecMonitor)
|
st, rec, err = sm.tsExec.ExecuteTipSet(ctx, sm, ts, sm.tsExecMonitor)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cid.Undef, cid.Undef, err
|
return cid.Undef, cid.Undef, err
|
||||||
}
|
}
|
||||||
@ -273,7 +69,7 @@ func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st c
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (sm *StateManager) ExecutionTraceWithMonitor(ctx context.Context, ts *types.TipSet, em ExecMonitor) (cid.Cid, error) {
|
func (sm *StateManager) ExecutionTraceWithMonitor(ctx context.Context, ts *types.TipSet, em ExecMonitor) (cid.Cid, error) {
|
||||||
st, _, err := sm.computeTipSetState(ctx, ts, em)
|
st, _, err := sm.tsExec.ExecuteTipSet(ctx, sm, ts, em)
|
||||||
return st, err
|
return st, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -285,42 +81,3 @@ func (sm *StateManager) ExecutionTrace(ctx context.Context, ts *types.TipSet) (c
|
|||||||
}
|
}
|
||||||
return st, invocTrace, nil
|
return st, invocTrace, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *StateManager) computeTipSetState(ctx context.Context, ts *types.TipSet, em ExecMonitor) (cid.Cid, cid.Cid, error) {
|
|
||||||
ctx, span := trace.StartSpan(ctx, "computeTipSetState")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
blks := ts.Blocks()
|
|
||||||
|
|
||||||
for i := 0; i < len(blks); i++ {
|
|
||||||
for j := i + 1; j < len(blks); j++ {
|
|
||||||
if blks[i].Miner == blks[j].Miner {
|
|
||||||
return cid.Undef, cid.Undef,
|
|
||||||
xerrors.Errorf("duplicate miner in a tipset (%s %s)",
|
|
||||||
blks[i].Miner, blks[j].Miner)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var parentEpoch abi.ChainEpoch
|
|
||||||
pstate := blks[0].ParentStateRoot
|
|
||||||
if blks[0].Height > 0 {
|
|
||||||
parent, err := sm.cs.GetBlock(blks[0].Parents[0])
|
|
||||||
if err != nil {
|
|
||||||
return cid.Undef, cid.Undef, xerrors.Errorf("getting parent block: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
parentEpoch = parent.Height
|
|
||||||
}
|
|
||||||
|
|
||||||
r := store.NewChainRand(sm.cs, ts.Cids())
|
|
||||||
|
|
||||||
blkmsgs, err := sm.cs.BlockMsgsForTipset(ts)
|
|
||||||
if err != nil {
|
|
||||||
return cid.Undef, cid.Undef, xerrors.Errorf("getting block messages for tipset: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
baseFee := blks[0].ParentBaseFee
|
|
||||||
|
|
||||||
return sm.ApplyBlocks(ctx, parentEpoch, pstate, blkmsgs, blks[0].Height, r, em, baseFee, ts)
|
|
||||||
}
|
|
||||||
|
@ -15,8 +15,6 @@ import (
|
|||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-state-types/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
"github.com/filecoin-project/go-state-types/rt"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/v3/actors/migration/nv10"
|
"github.com/filecoin-project/specs-actors/v3/actors/migration/nv10"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||||
@ -41,8 +39,11 @@ type MigrationCache interface {
|
|||||||
// - The oldState is the state produced by the upgrade epoch.
|
// - The oldState is the state produced by the upgrade epoch.
|
||||||
// - The returned newState is the new state that will be used by the next epoch.
|
// - The returned newState is the new state that will be used by the next epoch.
|
||||||
// - The height is the upgrade epoch height (already executed).
|
// - The height is the upgrade epoch height (already executed).
|
||||||
// - The tipset is the tipset for the last non-null block before the upgrade. Do
|
// - The tipset is the first non-null tipset after the upgrade height (the tipset in
|
||||||
// not assume that ts.Height() is the upgrade height.
|
// which the upgrade is executed). Do not assume that ts.Height() is the upgrade height.
|
||||||
|
//
|
||||||
|
// NOTE: In StateCompute and CallWithGas, the passed tipset is actually the tipset _before_ the
|
||||||
|
// upgrade. The tipset should really only be used for referencing the "current chain".
|
||||||
type MigrationFunc func(
|
type MigrationFunc func(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
sm *StateManager, cache MigrationCache,
|
sm *StateManager, cache MigrationCache,
|
||||||
@ -95,21 +96,6 @@ type Upgrade struct {
|
|||||||
|
|
||||||
type UpgradeSchedule []Upgrade
|
type UpgradeSchedule []Upgrade
|
||||||
|
|
||||||
type migrationLogger struct{}
|
|
||||||
|
|
||||||
func (ml migrationLogger) Log(level rt.LogLevel, msg string, args ...interface{}) {
|
|
||||||
switch level {
|
|
||||||
case rt.DEBUG:
|
|
||||||
log.Debugf(msg, args...)
|
|
||||||
case rt.INFO:
|
|
||||||
log.Infof(msg, args...)
|
|
||||||
case rt.WARN:
|
|
||||||
log.Warnf(msg, args...)
|
|
||||||
case rt.ERROR:
|
|
||||||
log.Errorf(msg, args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (us UpgradeSchedule) Validate() error {
|
func (us UpgradeSchedule) Validate() error {
|
||||||
// Make sure each upgrade is valid.
|
// Make sure each upgrade is valid.
|
||||||
for _, u := range us {
|
for _, u := range us {
|
||||||
@ -178,7 +164,7 @@ func (us UpgradeSchedule) GetNtwkVersion(e abi.ChainEpoch) (network.Version, err
|
|||||||
return network.Version0, xerrors.Errorf("Epoch %d has no defined network version", e)
|
return network.Version0, xerrors.Errorf("Epoch %d has no defined network version", e)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *StateManager) handleStateForks(ctx context.Context, root cid.Cid, height abi.ChainEpoch, cb ExecMonitor, ts *types.TipSet) (cid.Cid, error) {
|
func (sm *StateManager) HandleStateForks(ctx context.Context, root cid.Cid, height abi.ChainEpoch, cb ExecMonitor, ts *types.TipSet) (cid.Cid, error) {
|
||||||
retCid := root
|
retCid := root
|
||||||
var err error
|
var err error
|
||||||
u := sm.stateMigrations[height]
|
u := sm.stateMigrations[height]
|
||||||
@ -208,7 +194,19 @@ func (sm *StateManager) handleStateForks(ctx context.Context, root cid.Cid, heig
|
|||||||
return retCid, nil
|
return retCid, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *StateManager) hasExpensiveFork(ctx context.Context, height abi.ChainEpoch) bool {
|
// Returns true executing tipsets between the specified heights would trigger an expensive
|
||||||
|
// migration. NOTE: migrations occurring _at_ the target height are not included, as they're
|
||||||
|
// executed _after_ the target height.
|
||||||
|
func (sm *StateManager) hasExpensiveForkBetween(parent, height abi.ChainEpoch) bool {
|
||||||
|
for h := parent; h < height; h++ {
|
||||||
|
if _, ok := sm.expensiveUpgrades[h]; ok {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sm *StateManager) hasExpensiveFork(height abi.ChainEpoch) bool {
|
||||||
_, ok := sm.expensiveUpgrades[height]
|
_, ok := sm.expensiveUpgrades[height]
|
||||||
return ok
|
return ok
|
||||||
}
|
}
|
||||||
@ -316,7 +314,7 @@ func (sm *StateManager) preMigrationWorker(ctx context.Context) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func doTransfer(tree types.StateTree, from, to address.Address, amt abi.TokenAmount, cb func(trace types.ExecutionTrace)) error {
|
func DoTransfer(tree types.StateTree, from, to address.Address, amt abi.TokenAmount, cb func(trace types.ExecutionTrace)) error {
|
||||||
fromAct, err := tree.GetActor(from)
|
fromAct, err := tree.GetActor(from)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("failed to get 'from' actor for transfer: %w", err)
|
return xerrors.Errorf("failed to get 'from' actor for transfer: %w", err)
|
||||||
@ -346,8 +344,8 @@ func doTransfer(tree types.StateTree, from, to address.Address, amt abi.TokenAmo
|
|||||||
// record the transfer in execution traces
|
// record the transfer in execution traces
|
||||||
|
|
||||||
cb(types.ExecutionTrace{
|
cb(types.ExecutionTrace{
|
||||||
Msg: makeFakeMsg(from, to, amt, 0),
|
Msg: MakeFakeMsg(from, to, amt, 0),
|
||||||
MsgRct: makeFakeRct(),
|
MsgRct: MakeFakeRct(),
|
||||||
Error: "",
|
Error: "",
|
||||||
Duration: 0,
|
Duration: 0,
|
||||||
GasCharges: nil,
|
GasCharges: nil,
|
||||||
@ -358,7 +356,7 @@ func doTransfer(tree types.StateTree, from, to address.Address, amt abi.TokenAmo
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func terminateActor(ctx context.Context, tree *state.StateTree, addr address.Address, em ExecMonitor, epoch abi.ChainEpoch, ts *types.TipSet) error {
|
func TerminateActor(ctx context.Context, tree *state.StateTree, addr address.Address, em ExecMonitor, epoch abi.ChainEpoch, ts *types.TipSet) error {
|
||||||
a, err := tree.GetActor(addr)
|
a, err := tree.GetActor(addr)
|
||||||
if xerrors.Is(err, types.ErrActorNotFound) {
|
if xerrors.Is(err, types.ErrActorNotFound) {
|
||||||
return types.ErrActorNotFound
|
return types.ErrActorNotFound
|
||||||
@ -367,7 +365,7 @@ func terminateActor(ctx context.Context, tree *state.StateTree, addr address.Add
|
|||||||
}
|
}
|
||||||
|
|
||||||
var trace types.ExecutionTrace
|
var trace types.ExecutionTrace
|
||||||
if err := doTransfer(tree, addr, builtin.BurntFundsActorAddr, a.Balance, func(t types.ExecutionTrace) {
|
if err := DoTransfer(tree, addr, builtin.BurntFundsActorAddr, a.Balance, func(t types.ExecutionTrace) {
|
||||||
trace = t
|
trace = t
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return xerrors.Errorf("transferring terminated actor's balance: %w", err)
|
return xerrors.Errorf("transferring terminated actor's balance: %w", err)
|
||||||
@ -376,10 +374,10 @@ func terminateActor(ctx context.Context, tree *state.StateTree, addr address.Add
|
|||||||
if em != nil {
|
if em != nil {
|
||||||
// record the transfer in execution traces
|
// record the transfer in execution traces
|
||||||
|
|
||||||
fakeMsg := makeFakeMsg(builtin.SystemActorAddr, addr, big.Zero(), uint64(epoch))
|
fakeMsg := MakeFakeMsg(builtin.SystemActorAddr, addr, big.Zero(), uint64(epoch))
|
||||||
|
|
||||||
if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{
|
if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{
|
||||||
MessageReceipt: *makeFakeRct(),
|
MessageReceipt: *MakeFakeRct(),
|
||||||
ActorErr: nil,
|
ActorErr: nil,
|
||||||
ExecutionTrace: trace,
|
ExecutionTrace: trace,
|
||||||
Duration: 0,
|
Duration: 0,
|
||||||
@ -418,7 +416,7 @@ func terminateActor(ctx context.Context, tree *state.StateTree, addr address.Add
|
|||||||
return tree.SetActor(init_.Address, ia)
|
return tree.SetActor(init_.Address, ia)
|
||||||
}
|
}
|
||||||
|
|
||||||
func setNetworkName(ctx context.Context, store adt.Store, tree *state.StateTree, name string) error {
|
func SetNetworkName(ctx context.Context, store adt.Store, tree *state.StateTree, name string) error {
|
||||||
ia, err := tree.GetActor(init_.Address)
|
ia, err := tree.GetActor(init_.Address)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("getting init actor: %w", err)
|
return xerrors.Errorf("getting init actor: %w", err)
|
||||||
@ -445,7 +443,7 @@ func setNetworkName(ctx context.Context, store adt.Store, tree *state.StateTree,
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeKeyAddr(splitAddr address.Address, count uint64) (address.Address, error) {
|
func MakeKeyAddr(splitAddr address.Address, count uint64) (address.Address, error) {
|
||||||
var b bytes.Buffer
|
var b bytes.Buffer
|
||||||
if err := splitAddr.MarshalCBOR(&b); err != nil {
|
if err := splitAddr.MarshalCBOR(&b); err != nil {
|
||||||
return address.Undef, xerrors.Errorf("marshalling split address: %w", err)
|
return address.Undef, xerrors.Errorf("marshalling split address: %w", err)
|
||||||
@ -467,7 +465,7 @@ func makeKeyAddr(splitAddr address.Address, count uint64) (address.Address, erro
|
|||||||
return addr, nil
|
return addr, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeFakeMsg(from address.Address, to address.Address, amt abi.TokenAmount, nonce uint64) *types.Message {
|
func MakeFakeMsg(from address.Address, to address.Address, amt abi.TokenAmount, nonce uint64) *types.Message {
|
||||||
return &types.Message{
|
return &types.Message{
|
||||||
From: from,
|
From: from,
|
||||||
To: to,
|
To: to,
|
||||||
@ -476,7 +474,7 @@ func makeFakeMsg(from address.Address, to address.Address, amt abi.TokenAmount,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeFakeRct() *types.MessageReceipt {
|
func MakeFakeRct() *types.MessageReceipt {
|
||||||
return &types.MessageReceipt{
|
return &types.MessageReceipt{
|
||||||
ExitCode: 0,
|
ExitCode: 0,
|
||||||
Return: nil,
|
Return: nil,
|
||||||
|
@ -28,6 +28,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/actors/aerrors"
|
"github.com/filecoin-project/lotus/chain/actors/aerrors"
|
||||||
_init "github.com/filecoin-project/lotus/chain/actors/builtin/init"
|
_init "github.com/filecoin-project/lotus/chain/actors/builtin/init"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||||
|
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||||
"github.com/filecoin-project/lotus/chain/gen"
|
"github.com/filecoin-project/lotus/chain/gen"
|
||||||
. "github.com/filecoin-project/lotus/chain/stmgr"
|
. "github.com/filecoin-project/lotus/chain/stmgr"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
@ -120,8 +121,8 @@ func TestForkHeightTriggers(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
sm, err := NewStateManagerWithUpgradeSchedule(
|
sm, err := NewStateManager(
|
||||||
cg.ChainStore(), cg.StateManager().VMSys(), UpgradeSchedule{{
|
cg.ChainStore(), filcns.NewTipSetExecutor(), cg.StateManager().VMSys(), UpgradeSchedule{{
|
||||||
Network: network.Version1,
|
Network: network.Version1,
|
||||||
Height: testForkHeight,
|
Height: testForkHeight,
|
||||||
Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor,
|
Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor,
|
||||||
@ -162,7 +163,7 @@ func TestForkHeightTriggers(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
inv := vm.NewActorRegistry()
|
inv := filcns.NewActorRegistry()
|
||||||
inv.Register(nil, testActor{})
|
inv.Register(nil, testActor{})
|
||||||
|
|
||||||
sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (*vm.VM, error) {
|
sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (*vm.VM, error) {
|
||||||
@ -242,6 +243,19 @@ func TestForkHeightTriggers(t *testing.T) {
|
|||||||
func TestForkRefuseCall(t *testing.T) {
|
func TestForkRefuseCall(t *testing.T) {
|
||||||
logging.SetAllLoggers(logging.LevelInfo)
|
logging.SetAllLoggers(logging.LevelInfo)
|
||||||
|
|
||||||
|
for after := 0; after < 3; after++ {
|
||||||
|
for before := 0; before < 3; before++ {
|
||||||
|
// Makes the lints happy...
|
||||||
|
after := after
|
||||||
|
before := before
|
||||||
|
t.Run(fmt.Sprintf("after:%d,before:%d", after, before), func(t *testing.T) {
|
||||||
|
testForkRefuseCall(t, before, after)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
func testForkRefuseCall(t *testing.T, nullsBefore, nullsAfter int) {
|
||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
|
|
||||||
cg, err := gen.NewGenerator()
|
cg, err := gen.NewGenerator()
|
||||||
@ -249,20 +263,22 @@ func TestForkRefuseCall(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
sm, err := NewStateManagerWithUpgradeSchedule(
|
var migrationCount int
|
||||||
cg.ChainStore(), cg.StateManager().VMSys(), UpgradeSchedule{{
|
sm, err := NewStateManager(
|
||||||
|
cg.ChainStore(), filcns.NewTipSetExecutor(), cg.StateManager().VMSys(), UpgradeSchedule{{
|
||||||
Network: network.Version1,
|
Network: network.Version1,
|
||||||
Expensive: true,
|
Expensive: true,
|
||||||
Height: testForkHeight,
|
Height: testForkHeight,
|
||||||
Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor,
|
Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor,
|
||||||
root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||||
|
migrationCount++
|
||||||
return root, nil
|
return root, nil
|
||||||
}}})
|
}}})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
inv := vm.NewActorRegistry()
|
inv := filcns.NewActorRegistry()
|
||||||
inv.Register(nil, testActor{})
|
inv.Register(nil, testActor{})
|
||||||
|
|
||||||
sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (*vm.VM, error) {
|
sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (*vm.VM, error) {
|
||||||
@ -292,14 +308,20 @@ func TestForkRefuseCall(t *testing.T) {
|
|||||||
GasFeeCap: types.NewInt(0),
|
GasFeeCap: types.NewInt(0),
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < 50; i++ {
|
nullStart := abi.ChainEpoch(testForkHeight - nullsBefore)
|
||||||
ts, err := cg.NextTipSet()
|
nullLength := abi.ChainEpoch(nullsBefore + nullsAfter)
|
||||||
|
|
||||||
|
for i := 0; i < testForkHeight*2; i++ {
|
||||||
|
pts := cg.CurTipset.TipSet()
|
||||||
|
skip := abi.ChainEpoch(0)
|
||||||
|
if pts.Height() == nullStart {
|
||||||
|
skip = nullLength
|
||||||
|
}
|
||||||
|
ts, err := cg.NextTipSetFromMiners(pts, cg.Miners, skip)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
pts, err := cg.ChainStore().LoadTipSet(ts.TipSet.TipSet().Parents())
|
|
||||||
require.NoError(t, err)
|
|
||||||
parentHeight := pts.Height()
|
parentHeight := pts.Height()
|
||||||
currentHeight := ts.TipSet.TipSet().Height()
|
currentHeight := ts.TipSet.TipSet().Height()
|
||||||
|
|
||||||
@ -321,7 +343,20 @@ func TestForkRefuseCall(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.True(t, ret.MsgRct.ExitCode.IsSuccess())
|
require.True(t, ret.MsgRct.ExitCode.IsSuccess())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Calls without a tipset should walk back to the last non-fork tipset.
|
||||||
|
// We _verify_ that the migration wasn't run multiple times at the end of the
|
||||||
|
// test.
|
||||||
|
ret, err = sm.CallWithGas(ctx, m, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, ret.MsgRct.ExitCode.IsSuccess())
|
||||||
|
|
||||||
|
ret, err = sm.Call(ctx, m, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, ret.MsgRct.ExitCode.IsSuccess())
|
||||||
}
|
}
|
||||||
|
// Make sure we didn't execute the migration multiple times.
|
||||||
|
require.Equal(t, migrationCount, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestForkPreMigration(t *testing.T) {
|
func TestForkPreMigration(t *testing.T) {
|
||||||
@ -364,8 +399,8 @@ func TestForkPreMigration(t *testing.T) {
|
|||||||
|
|
||||||
counter := make(chan struct{}, 10)
|
counter := make(chan struct{}, 10)
|
||||||
|
|
||||||
sm, err := NewStateManagerWithUpgradeSchedule(
|
sm, err := NewStateManager(
|
||||||
cg.ChainStore(), cg.StateManager().VMSys(), UpgradeSchedule{{
|
cg.ChainStore(), filcns.NewTipSetExecutor(), cg.StateManager().VMSys(), UpgradeSchedule{{
|
||||||
Network: network.Version1,
|
Network: network.Version1,
|
||||||
Height: testForkHeight,
|
Height: testForkHeight,
|
||||||
Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor,
|
Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor,
|
||||||
@ -462,7 +497,7 @@ func TestForkPreMigration(t *testing.T) {
|
|||||||
require.NoError(t, sm.Stop(context.Background()))
|
require.NoError(t, sm.Stop(context.Background()))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
inv := vm.NewActorRegistry()
|
inv := filcns.NewActorRegistry()
|
||||||
inv.Register(nil, testActor{})
|
inv.Register(nil, testActor{})
|
||||||
|
|
||||||
sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (*vm.VM, error) {
|
sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (*vm.VM, error) {
|
||||||
|
@ -2,7 +2,6 @@ package stmgr
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
@ -52,6 +51,11 @@ type migration struct {
|
|||||||
cache *nv10.MemMigrationCache
|
cache *nv10.MemMigrationCache
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type Executor interface {
|
||||||
|
NewActorRegistry() *vm.ActorRegistry
|
||||||
|
ExecuteTipSet(ctx context.Context, sm *StateManager, ts *types.TipSet, em ExecMonitor) (stateroot cid.Cid, rectsroot cid.Cid, err error)
|
||||||
|
}
|
||||||
|
|
||||||
type StateManager struct {
|
type StateManager struct {
|
||||||
cs *store.ChainStore
|
cs *store.ChainStore
|
||||||
|
|
||||||
@ -75,7 +79,7 @@ type StateManager struct {
|
|||||||
stlk sync.Mutex
|
stlk sync.Mutex
|
||||||
genesisMsigLk sync.Mutex
|
genesisMsigLk sync.Mutex
|
||||||
newVM func(context.Context, *vm.VMOpts) (*vm.VM, error)
|
newVM func(context.Context, *vm.VMOpts) (*vm.VM, error)
|
||||||
syscalls vm.SyscallBuilder
|
Syscalls vm.SyscallBuilder
|
||||||
preIgnitionVesting []msig0.State
|
preIgnitionVesting []msig0.State
|
||||||
postIgnitionVesting []msig0.State
|
postIgnitionVesting []msig0.State
|
||||||
postCalicoVesting []msig0.State
|
postCalicoVesting []msig0.State
|
||||||
@ -83,6 +87,7 @@ type StateManager struct {
|
|||||||
genesisPledge abi.TokenAmount
|
genesisPledge abi.TokenAmount
|
||||||
genesisMarketFunds abi.TokenAmount
|
genesisMarketFunds abi.TokenAmount
|
||||||
|
|
||||||
|
tsExec Executor
|
||||||
tsExecMonitor ExecMonitor
|
tsExecMonitor ExecMonitor
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -92,15 +97,7 @@ type treeCache struct {
|
|||||||
tree *state.StateTree
|
tree *state.StateTree
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewStateManager(cs *store.ChainStore, sys vm.SyscallBuilder) *StateManager {
|
func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule) (*StateManager, error) {
|
||||||
sm, err := NewStateManagerWithUpgradeSchedule(cs, sys, DefaultUpgradeSchedule())
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("default upgrade schedule is invalid: %s", err))
|
|
||||||
}
|
|
||||||
return sm
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewStateManagerWithUpgradeSchedule(cs *store.ChainStore, sys vm.SyscallBuilder, us UpgradeSchedule) (*StateManager, error) {
|
|
||||||
// If we have upgrades, make sure they're in-order and make sense.
|
// If we have upgrades, make sure they're in-order and make sense.
|
||||||
if err := us.Validate(); err != nil {
|
if err := us.Validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -142,8 +139,9 @@ func NewStateManagerWithUpgradeSchedule(cs *store.ChainStore, sys vm.SyscallBuil
|
|||||||
stateMigrations: stateMigrations,
|
stateMigrations: stateMigrations,
|
||||||
expensiveUpgrades: expensiveUpgrades,
|
expensiveUpgrades: expensiveUpgrades,
|
||||||
newVM: vm.NewVM,
|
newVM: vm.NewVM,
|
||||||
syscalls: sys,
|
Syscalls: sys,
|
||||||
cs: cs,
|
cs: cs,
|
||||||
|
tsExec: exec,
|
||||||
stCache: make(map[string][]cid.Cid),
|
stCache: make(map[string][]cid.Cid),
|
||||||
tCache: treeCache{
|
tCache: treeCache{
|
||||||
root: cid.Undef,
|
root: cid.Undef,
|
||||||
@ -153,8 +151,8 @@ func NewStateManagerWithUpgradeSchedule(cs *store.ChainStore, sys vm.SyscallBuil
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewStateManagerWithUpgradeScheduleAndMonitor(cs *store.ChainStore, sys vm.SyscallBuilder, us UpgradeSchedule, em ExecMonitor) (*StateManager, error) {
|
func NewStateManagerWithUpgradeScheduleAndMonitor(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, em ExecMonitor) (*StateManager, error) {
|
||||||
sm, err := NewStateManagerWithUpgradeSchedule(cs, sys, us)
|
sm, err := NewStateManager(cs, exec, sys, us)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -344,6 +342,12 @@ func (sm *StateManager) SetVMConstructor(nvm func(context.Context, *vm.VMOpts) (
|
|||||||
sm.newVM = nvm
|
sm.newVM = nvm
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (sm *StateManager) VMConstructor() func(context.Context, *vm.VMOpts) (*vm.VM, error) {
|
||||||
|
return func(ctx context.Context, opts *vm.VMOpts) (*vm.VM, error) {
|
||||||
|
return sm.newVM(ctx, opts)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (sm *StateManager) GetNtwkVersion(ctx context.Context, height abi.ChainEpoch) network.Version {
|
func (sm *StateManager) GetNtwkVersion(ctx context.Context, height abi.ChainEpoch) network.Version {
|
||||||
// The epochs here are the _last_ epoch for every version, or -1 if the
|
// The epochs here are the _last_ epoch for every version, or -1 if the
|
||||||
// version is disabled.
|
// version is disabled.
|
||||||
@ -356,5 +360,5 @@ func (sm *StateManager) GetNtwkVersion(ctx context.Context, height abi.ChainEpoc
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (sm *StateManager) VMSys() vm.SyscallBuilder {
|
func (sm *StateManager) VMSys() vm.SyscallBuilder {
|
||||||
return sm.syscalls
|
return sm.Syscalls
|
||||||
}
|
}
|
||||||
|
@ -4,8 +4,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
cbg "github.com/whyrusleeping/cbor-gen"
|
cbg "github.com/whyrusleeping/cbor-gen"
|
||||||
@ -14,16 +12,7 @@ import (
|
|||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-state-types/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/go-state-types/rt"
|
|
||||||
|
|
||||||
exported0 "github.com/filecoin-project/specs-actors/actors/builtin/exported"
|
|
||||||
exported2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/exported"
|
|
||||||
exported3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/exported"
|
|
||||||
exported4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/exported"
|
|
||||||
exported5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/exported"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
|
||||||
init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
|
init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||||
"github.com/filecoin-project/lotus/chain/state"
|
"github.com/filecoin-project/lotus/chain/state"
|
||||||
@ -33,86 +22,21 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
)
|
)
|
||||||
|
|
||||||
type MethodMeta struct {
|
|
||||||
Name string
|
|
||||||
|
|
||||||
Params reflect.Type
|
|
||||||
Ret reflect.Type
|
|
||||||
}
|
|
||||||
|
|
||||||
var MethodsMap = map[cid.Cid]map[abi.MethodNum]MethodMeta{}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// TODO: combine with the runtime actor registry.
|
|
||||||
var actors []rt.VMActor
|
|
||||||
actors = append(actors, exported0.BuiltinActors()...)
|
|
||||||
actors = append(actors, exported2.BuiltinActors()...)
|
|
||||||
actors = append(actors, exported3.BuiltinActors()...)
|
|
||||||
actors = append(actors, exported4.BuiltinActors()...)
|
|
||||||
actors = append(actors, exported5.BuiltinActors()...)
|
|
||||||
|
|
||||||
for _, actor := range actors {
|
|
||||||
exports := actor.Exports()
|
|
||||||
methods := make(map[abi.MethodNum]MethodMeta, len(exports))
|
|
||||||
|
|
||||||
// Explicitly add send, it's special.
|
|
||||||
methods[builtin.MethodSend] = MethodMeta{
|
|
||||||
Name: "Send",
|
|
||||||
Params: reflect.TypeOf(new(abi.EmptyValue)),
|
|
||||||
Ret: reflect.TypeOf(new(abi.EmptyValue)),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Iterate over exported methods. Some of these _may_ be nil and
|
|
||||||
// must be skipped.
|
|
||||||
for number, export := range exports {
|
|
||||||
if export == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
ev := reflect.ValueOf(export)
|
|
||||||
et := ev.Type()
|
|
||||||
|
|
||||||
// Extract the method names using reflection. These
|
|
||||||
// method names always match the field names in the
|
|
||||||
// `builtin.Method*` structs (tested in the specs-actors
|
|
||||||
// tests).
|
|
||||||
fnName := runtime.FuncForPC(ev.Pointer()).Name()
|
|
||||||
fnName = strings.TrimSuffix(fnName[strings.LastIndexByte(fnName, '.')+1:], "-fm")
|
|
||||||
|
|
||||||
switch abi.MethodNum(number) {
|
|
||||||
case builtin.MethodSend:
|
|
||||||
panic("method 0 is reserved for Send")
|
|
||||||
case builtin.MethodConstructor:
|
|
||||||
if fnName != "Constructor" {
|
|
||||||
panic("method 1 is reserved for Constructor")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
methods[abi.MethodNum(number)] = MethodMeta{
|
|
||||||
Name: fnName,
|
|
||||||
Params: et.In(1),
|
|
||||||
Ret: et.Out(0),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
MethodsMap[actor.Code()] = methods
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetReturnType(ctx context.Context, sm *StateManager, to address.Address, method abi.MethodNum, ts *types.TipSet) (cbg.CBORUnmarshaler, error) {
|
func GetReturnType(ctx context.Context, sm *StateManager, to address.Address, method abi.MethodNum, ts *types.TipSet) (cbg.CBORUnmarshaler, error) {
|
||||||
act, err := sm.LoadActor(ctx, to, ts)
|
act, err := sm.LoadActor(ctx, to, ts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("(get sset) failed to load miner actor: %w", err)
|
return nil, xerrors.Errorf("(get sset) failed to load miner actor: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
m, found := MethodsMap[act.Code][method]
|
m, found := sm.tsExec.NewActorRegistry().Methods[act.Code][method]
|
||||||
if !found {
|
if !found {
|
||||||
return nil, fmt.Errorf("unknown method %d for actor %s", method, act.Code)
|
return nil, fmt.Errorf("unknown method %d for actor %s", method, act.Code)
|
||||||
}
|
}
|
||||||
return reflect.New(m.Ret.Elem()).Interface().(cbg.CBORUnmarshaler), nil
|
return reflect.New(m.Ret.Elem()).Interface().(cbg.CBORUnmarshaler), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetParamType(actCode cid.Cid, method abi.MethodNum) (cbg.CBORUnmarshaler, error) {
|
func GetParamType(ar *vm.ActorRegistry, actCode cid.Cid, method abi.MethodNum) (cbg.CBORUnmarshaler, error) {
|
||||||
m, found := MethodsMap[actCode][method]
|
m, found := ar.Methods[actCode][method]
|
||||||
if !found {
|
if !found {
|
||||||
return nil, fmt.Errorf("unknown method %d for actor %s", method, actCode)
|
return nil, fmt.Errorf("unknown method %d for actor %s", method, actCode)
|
||||||
}
|
}
|
||||||
@ -143,13 +67,14 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch,
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i := ts.Height(); i < height; i++ {
|
for i := ts.Height(); i < height; i++ {
|
||||||
// handle state forks
|
// Technically, the tipset we're passing in here should be ts+1, but that may not exist.
|
||||||
base, err = sm.handleStateForks(ctx, base, i, &InvocationTracer{trace: &trace}, ts)
|
base, err = sm.HandleStateForks(ctx, base, i, &InvocationTracer{trace: &trace}, ts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cid.Undef, nil, xerrors.Errorf("error handling state forks: %w", err)
|
return cid.Undef, nil, xerrors.Errorf("error handling state forks: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: should we also run cron here?
|
// We intentionally don't run cron here, as we may be trying to look into the
|
||||||
|
// future. It's not guaranteed to be accurate... but that's fine.
|
||||||
}
|
}
|
||||||
|
|
||||||
r := store.NewChainRand(sm.cs, ts.Cids())
|
r := store.NewChainRand(sm.cs, ts.Cids())
|
||||||
@ -158,7 +83,8 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch,
|
|||||||
Epoch: height,
|
Epoch: height,
|
||||||
Rand: r,
|
Rand: r,
|
||||||
Bstore: sm.cs.StateBlockstore(),
|
Bstore: sm.cs.StateBlockstore(),
|
||||||
Syscalls: sm.syscalls,
|
Actors: sm.tsExec.NewActorRegistry(),
|
||||||
|
Syscalls: sm.Syscalls,
|
||||||
CircSupplyCalc: sm.GetVMCirculatingSupply,
|
CircSupplyCalc: sm.GetVMCirculatingSupply,
|
||||||
NtwkVersion: sm.GetNtwkVersion,
|
NtwkVersion: sm.GetNtwkVersion,
|
||||||
BaseFee: ts.Blocks()[0].ParentBaseFee,
|
BaseFee: ts.Blocks()[0].ParentBaseFee,
|
||||||
|
@ -7,6 +7,7 @@ import (
|
|||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/lotus/blockstore"
|
"github.com/filecoin-project/lotus/blockstore"
|
||||||
|
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||||
"github.com/filecoin-project/lotus/chain/gen"
|
"github.com/filecoin-project/lotus/chain/gen"
|
||||||
"github.com/filecoin-project/lotus/chain/store"
|
"github.com/filecoin-project/lotus/chain/store"
|
||||||
"github.com/filecoin-project/lotus/chain/types/mock"
|
"github.com/filecoin-project/lotus/chain/types/mock"
|
||||||
@ -31,7 +32,7 @@ func TestIndexSeeks(t *testing.T) {
|
|||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
|
|
||||||
nbs := blockstore.NewMemorySync()
|
nbs := blockstore.NewMemorySync()
|
||||||
cs := store.NewChainStore(nbs, nbs, syncds.MutexWrap(datastore.NewMapDatastore()), nil)
|
cs := store.NewChainStore(nbs, nbs, syncds.MutexWrap(datastore.NewMapDatastore()), filcns.Weight, nil)
|
||||||
defer cs.Close() //nolint:errcheck
|
defer cs.Close() //nolint:errcheck
|
||||||
|
|
||||||
_, err = cs.Import(bytes.NewReader(gencar))
|
_, err = cs.Import(bytes.NewReader(gencar))
|
||||||
|
@ -101,10 +101,11 @@ type BlockMessages struct {
|
|||||||
Miner address.Address
|
Miner address.Address
|
||||||
BlsMessages []types.ChainMsg
|
BlsMessages []types.ChainMsg
|
||||||
SecpkMessages []types.ChainMsg
|
SecpkMessages []types.ChainMsg
|
||||||
WinCount int64
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *ChainStore) BlockMsgsForTipset(ts *types.TipSet) ([]BlockMessages, error) {
|
func (cs *ChainStore) BlockMsgsForTipset(ts *types.TipSet) ([]BlockMessages, error) {
|
||||||
|
// returned BlockMessages match block order in tipset
|
||||||
|
|
||||||
applied := make(map[address.Address]uint64)
|
applied := make(map[address.Address]uint64)
|
||||||
|
|
||||||
cst := cbor.NewCborStore(cs.stateBlockstore)
|
cst := cbor.NewCborStore(cs.stateBlockstore)
|
||||||
@ -150,7 +151,6 @@ func (cs *ChainStore) BlockMsgsForTipset(ts *types.TipSet) ([]BlockMessages, err
|
|||||||
Miner: b.Miner,
|
Miner: b.Miner,
|
||||||
BlsMessages: make([]types.ChainMsg, 0, len(bms)),
|
BlsMessages: make([]types.ChainMsg, 0, len(bms)),
|
||||||
SecpkMessages: make([]types.ChainMsg, 0, len(sms)),
|
SecpkMessages: make([]types.ChainMsg, 0, len(sms)),
|
||||||
WinCount: b.ElectionProof.WinCount,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, bmsg := range bms {
|
for _, bmsg := range bms {
|
||||||
|
@ -31,7 +31,6 @@ import (
|
|||||||
lru "github.com/hashicorp/golang-lru"
|
lru "github.com/hashicorp/golang-lru"
|
||||||
block "github.com/ipfs/go-block-format"
|
block "github.com/ipfs/go-block-format"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
"github.com/ipfs/go-datastore"
|
|
||||||
dstore "github.com/ipfs/go-datastore"
|
dstore "github.com/ipfs/go-datastore"
|
||||||
"github.com/ipfs/go-datastore/query"
|
"github.com/ipfs/go-datastore/query"
|
||||||
cbor "github.com/ipfs/go-ipld-cbor"
|
cbor "github.com/ipfs/go-ipld-cbor"
|
||||||
@ -88,6 +87,8 @@ type HeadChangeEvt struct {
|
|||||||
ApplyCount int
|
ApplyCount int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type WeightFunc func(ctx context.Context, stateBs bstore.Blockstore, ts *types.TipSet) (types.BigInt, error)
|
||||||
|
|
||||||
// ChainStore is the main point of access to chain data.
|
// ChainStore is the main point of access to chain data.
|
||||||
//
|
//
|
||||||
// Raw chain data is stored in the Blockstore, with relevant markers (genesis,
|
// Raw chain data is stored in the Blockstore, with relevant markers (genesis,
|
||||||
@ -102,6 +103,8 @@ type ChainStore struct {
|
|||||||
stateBlockstore bstore.Blockstore
|
stateBlockstore bstore.Blockstore
|
||||||
metadataDs dstore.Batching
|
metadataDs dstore.Batching
|
||||||
|
|
||||||
|
weight WeightFunc
|
||||||
|
|
||||||
chainLocalBlockstore bstore.Blockstore
|
chainLocalBlockstore bstore.Blockstore
|
||||||
|
|
||||||
heaviestLk sync.RWMutex
|
heaviestLk sync.RWMutex
|
||||||
@ -129,7 +132,7 @@ type ChainStore struct {
|
|||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewChainStore(chainBs bstore.Blockstore, stateBs bstore.Blockstore, ds dstore.Batching, j journal.Journal) *ChainStore {
|
func NewChainStore(chainBs bstore.Blockstore, stateBs bstore.Blockstore, ds dstore.Batching, weight WeightFunc, j journal.Journal) *ChainStore {
|
||||||
c, _ := lru.NewARC(DefaultMsgMetaCacheSize)
|
c, _ := lru.NewARC(DefaultMsgMetaCacheSize)
|
||||||
tsc, _ := lru.NewARC(DefaultTipSetCacheSize)
|
tsc, _ := lru.NewARC(DefaultTipSetCacheSize)
|
||||||
if j == nil {
|
if j == nil {
|
||||||
@ -144,6 +147,7 @@ func NewChainStore(chainBs bstore.Blockstore, stateBs bstore.Blockstore, ds dsto
|
|||||||
chainBlockstore: chainBs,
|
chainBlockstore: chainBs,
|
||||||
stateBlockstore: stateBs,
|
stateBlockstore: stateBs,
|
||||||
chainLocalBlockstore: localbs,
|
chainLocalBlockstore: localbs,
|
||||||
|
weight: weight,
|
||||||
metadataDs: ds,
|
metadataDs: ds,
|
||||||
bestTips: pubsub.New(64),
|
bestTips: pubsub.New(64),
|
||||||
tipsets: make(map[abi.ChainEpoch][]cid.Cid),
|
tipsets: make(map[abi.ChainEpoch][]cid.Cid),
|
||||||
@ -294,27 +298,36 @@ func (cs *ChainStore) SubHeadChanges(ctx context.Context) chan []*api.HeadChange
|
|||||||
}}
|
}}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
defer close(out)
|
defer func() {
|
||||||
var unsubOnce sync.Once
|
// Tell the caller we're done first, the following may block for a bit.
|
||||||
|
close(out)
|
||||||
|
|
||||||
|
// Unsubscribe.
|
||||||
|
cs.bestTips.Unsub(subch)
|
||||||
|
|
||||||
|
// Drain the channel.
|
||||||
|
for range subch {
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case val, ok := <-subch:
|
case val, ok := <-subch:
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Warn("chain head sub exit loop")
|
// Shutting down.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case out <- val.([]*api.HeadChange):
|
||||||
|
default:
|
||||||
|
log.Errorf("closing head change subscription due to slow reader")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if len(out) > 5 {
|
if len(out) > 5 {
|
||||||
log.Warnf("head change sub is slow, has %d buffered entries", len(out))
|
log.Warnf("head change sub is slow, has %d buffered entries", len(out))
|
||||||
}
|
}
|
||||||
select {
|
|
||||||
case out <- val.([]*api.HeadChange):
|
|
||||||
case <-ctx.Done():
|
|
||||||
}
|
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
unsubOnce.Do(func() {
|
return
|
||||||
go cs.bestTips.Unsub(subch)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@ -402,11 +415,11 @@ func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipS
|
|||||||
}
|
}
|
||||||
|
|
||||||
defer cs.heaviestLk.Unlock()
|
defer cs.heaviestLk.Unlock()
|
||||||
w, err := cs.Weight(ctx, ts)
|
w, err := cs.weight(ctx, cs.StateBlockstore(), ts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
heaviestW, err := cs.Weight(ctx, cs.heaviest)
|
heaviestW, err := cs.weight(ctx, cs.StateBlockstore(), cs.heaviest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -642,7 +655,7 @@ func (cs *ChainStore) FlushValidationCache() error {
|
|||||||
return FlushValidationCache(cs.metadataDs)
|
return FlushValidationCache(cs.metadataDs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func FlushValidationCache(ds datastore.Batching) error {
|
func FlushValidationCache(ds dstore.Batching) error {
|
||||||
log.Infof("clearing block validation cache...")
|
log.Infof("clearing block validation cache...")
|
||||||
|
|
||||||
dsWalk, err := ds.Query(query.Query{
|
dsWalk, err := ds.Query(query.Query{
|
||||||
@ -674,7 +687,7 @@ func FlushValidationCache(ds datastore.Batching) error {
|
|||||||
for _, k := range allKeys {
|
for _, k := range allKeys {
|
||||||
if strings.HasPrefix(k.Key, blockValidationCacheKeyPrefix.String()) {
|
if strings.HasPrefix(k.Key, blockValidationCacheKeyPrefix.String()) {
|
||||||
delCnt++
|
delCnt++
|
||||||
batch.Delete(datastore.RawKey(k.Key)) // nolint:errcheck
|
batch.Delete(dstore.RawKey(k.Key)) // nolint:errcheck
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1148,3 +1161,7 @@ func (cs *ChainStore) GetTipsetByHeight(ctx context.Context, h abi.ChainEpoch, t
|
|||||||
|
|
||||||
return cs.LoadTipSet(lbts.Parents())
|
return cs.LoadTipSet(lbts.Parents())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (cs *ChainStore) Weight(ctx context.Context, hts *types.TipSet) (types.BigInt, error) { // todo remove
|
||||||
|
return cs.weight(ctx, cs.StateBlockstore(), hts)
|
||||||
|
}
|
||||||
|
@ -13,6 +13,7 @@ import (
|
|||||||
|
|
||||||
"github.com/filecoin-project/lotus/blockstore"
|
"github.com/filecoin-project/lotus/blockstore"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||||
|
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||||
"github.com/filecoin-project/lotus/chain/gen"
|
"github.com/filecoin-project/lotus/chain/gen"
|
||||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||||
"github.com/filecoin-project/lotus/chain/store"
|
"github.com/filecoin-project/lotus/chain/store"
|
||||||
@ -70,7 +71,7 @@ func BenchmarkGetRandomness(b *testing.B) {
|
|||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cs := store.NewChainStore(bs, bs, mds, nil)
|
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
|
||||||
defer cs.Close() //nolint:errcheck
|
defer cs.Close() //nolint:errcheck
|
||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
@ -105,7 +106,7 @@ func TestChainExportImport(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
nbs := blockstore.NewMemory()
|
nbs := blockstore.NewMemory()
|
||||||
cs := store.NewChainStore(nbs, nbs, datastore.NewMapDatastore(), nil)
|
cs := store.NewChainStore(nbs, nbs, datastore.NewMapDatastore(), filcns.Weight, nil)
|
||||||
defer cs.Close() //nolint:errcheck
|
defer cs.Close() //nolint:errcheck
|
||||||
|
|
||||||
root, err := cs.Import(buf)
|
root, err := cs.Import(buf)
|
||||||
@ -140,7 +141,7 @@ func TestChainExportImportFull(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
nbs := blockstore.NewMemory()
|
nbs := blockstore.NewMemory()
|
||||||
cs := store.NewChainStore(nbs, nbs, datastore.NewMapDatastore(), nil)
|
cs := store.NewChainStore(nbs, nbs, datastore.NewMapDatastore(), filcns.Weight, nil)
|
||||||
defer cs.Close() //nolint:errcheck
|
defer cs.Close() //nolint:errcheck
|
||||||
|
|
||||||
root, err := cs.Import(buf)
|
root, err := cs.Import(buf)
|
||||||
@ -157,7 +158,11 @@ func TestChainExportImportFull(t *testing.T) {
|
|||||||
t.Fatal("imported chain differed from exported chain")
|
t.Fatal("imported chain differed from exported chain")
|
||||||
}
|
}
|
||||||
|
|
||||||
sm := stmgr.NewStateManager(cs, nil)
|
sm, err := stmgr.NewStateManager(cs, filcns.NewTipSetExecutor(), nil, filcns.DefaultUpgradeSchedule())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
ts, err := cs.GetTipsetByHeight(context.TODO(), abi.ChainEpoch(i), nil, false)
|
ts, err := cs.GetTipsetByHeight(context.TODO(), abi.ChainEpoch(i), nil, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -2,32 +2,26 @@ package sub
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
address "github.com/filecoin-project/go-address"
|
address "github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/lotus/blockstore"
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain"
|
"github.com/filecoin-project/lotus/chain"
|
||||||
|
"github.com/filecoin-project/lotus/chain/consensus"
|
||||||
"github.com/filecoin-project/lotus/chain/messagepool"
|
"github.com/filecoin-project/lotus/chain/messagepool"
|
||||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
|
||||||
"github.com/filecoin-project/lotus/chain/store"
|
"github.com/filecoin-project/lotus/chain/store"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/lib/sigs"
|
|
||||||
"github.com/filecoin-project/lotus/metrics"
|
"github.com/filecoin-project/lotus/metrics"
|
||||||
"github.com/filecoin-project/lotus/node/impl/client"
|
"github.com/filecoin-project/lotus/node/impl/client"
|
||||||
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
|
|
||||||
lru "github.com/hashicorp/golang-lru"
|
lru "github.com/hashicorp/golang-lru"
|
||||||
blocks "github.com/ipfs/go-block-format"
|
blocks "github.com/ipfs/go-block-format"
|
||||||
bserv "github.com/ipfs/go-blockservice"
|
bserv "github.com/ipfs/go-blockservice"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
cbor "github.com/ipfs/go-ipld-cbor"
|
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
connmgr "github.com/libp2p/go-libp2p-core/connmgr"
|
connmgr "github.com/libp2p/go-libp2p-core/connmgr"
|
||||||
"github.com/libp2p/go-libp2p-core/peer"
|
"github.com/libp2p/go-libp2p-core/peer"
|
||||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||||
cbg "github.com/whyrusleeping/cbor-gen"
|
|
||||||
"go.opencensus.io/stats"
|
"go.opencensus.io/stats"
|
||||||
"go.opencensus.io/tag"
|
"go.opencensus.io/tag"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
@ -35,9 +29,6 @@ import (
|
|||||||
|
|
||||||
var log = logging.Logger("sub")
|
var log = logging.Logger("sub")
|
||||||
|
|
||||||
var ErrSoftFailure = errors.New("soft validation failure")
|
|
||||||
var ErrInsufficientPower = errors.New("incoming block's miner does not have minimum power")
|
|
||||||
|
|
||||||
var msgCidPrefix = cid.Prefix{
|
var msgCidPrefix = cid.Prefix{
|
||||||
Version: 1,
|
Version: 1,
|
||||||
Codec: cid.DagCBOR,
|
Codec: cid.DagCBOR,
|
||||||
@ -225,11 +216,11 @@ type BlockValidator struct {
|
|||||||
blacklist func(peer.ID)
|
blacklist func(peer.ID)
|
||||||
|
|
||||||
// necessary for block validation
|
// necessary for block validation
|
||||||
chain *store.ChainStore
|
chain *store.ChainStore
|
||||||
stmgr *stmgr.StateManager
|
consensus consensus.Consensus
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewBlockValidator(self peer.ID, chain *store.ChainStore, stmgr *stmgr.StateManager, blacklist func(peer.ID)) *BlockValidator {
|
func NewBlockValidator(self peer.ID, chain *store.ChainStore, cns consensus.Consensus, blacklist func(peer.ID)) *BlockValidator {
|
||||||
p, _ := lru.New2Q(4096)
|
p, _ := lru.New2Q(4096)
|
||||||
return &BlockValidator{
|
return &BlockValidator{
|
||||||
self: self,
|
self: self,
|
||||||
@ -238,7 +229,7 @@ func NewBlockValidator(self peer.ID, chain *store.ChainStore, stmgr *stmgr.State
|
|||||||
blacklist: blacklist,
|
blacklist: blacklist,
|
||||||
recvBlocks: newBlockReceiptCache(),
|
recvBlocks: newBlockReceiptCache(),
|
||||||
chain: chain,
|
chain: chain,
|
||||||
stmgr: stmgr,
|
consensus: cns,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -260,214 +251,35 @@ func (bv *BlockValidator) flagPeer(p peer.ID) {
|
|||||||
bv.peers.Add(p, v.(int)+1)
|
bv.peers.Add(p, v.(int)+1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bv *BlockValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub.Message) pubsub.ValidationResult {
|
func (bv *BlockValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub.Message) (res pubsub.ValidationResult) {
|
||||||
if pid == bv.self {
|
|
||||||
return bv.validateLocalBlock(ctx, msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// track validation time
|
|
||||||
begin := build.Clock.Now()
|
|
||||||
defer func() {
|
defer func() {
|
||||||
log.Debugf("block validation time: %s", build.Clock.Since(begin))
|
if rerr := recover(); rerr != nil {
|
||||||
|
err := xerrors.Errorf("validate block: %s", rerr)
|
||||||
|
recordFailure(ctx, metrics.BlockValidationFailure, err.Error())
|
||||||
|
bv.flagPeer(pid)
|
||||||
|
res = pubsub.ValidationReject
|
||||||
|
return
|
||||||
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
stats.Record(ctx, metrics.BlockReceived.M(1))
|
var what string
|
||||||
|
res, what = bv.consensus.ValidateBlockPubsub(ctx, pid == bv.self, msg)
|
||||||
|
if res == pubsub.ValidationAccept {
|
||||||
|
// it's a good block! make sure we've only seen it once
|
||||||
|
if count := bv.recvBlocks.add(msg.ValidatorData.(*types.BlockMsg).Cid()); count > 0 {
|
||||||
|
if pid == bv.self {
|
||||||
|
log.Warnf("local block has been seen %d times; ignoring", count)
|
||||||
|
}
|
||||||
|
|
||||||
recordFailureFlagPeer := func(what string) {
|
// TODO: once these changes propagate to the network, we can consider
|
||||||
|
// dropping peers who send us the same block multiple times
|
||||||
|
return pubsub.ValidationIgnore
|
||||||
|
}
|
||||||
|
} else {
|
||||||
recordFailure(ctx, metrics.BlockValidationFailure, what)
|
recordFailure(ctx, metrics.BlockValidationFailure, what)
|
||||||
bv.flagPeer(pid)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
blk, what, err := bv.decodeAndCheckBlock(msg)
|
return res
|
||||||
if err != nil {
|
|
||||||
log.Error("got invalid block over pubsub: ", err)
|
|
||||||
recordFailureFlagPeer(what)
|
|
||||||
return pubsub.ValidationReject
|
|
||||||
}
|
|
||||||
|
|
||||||
// validate the block meta: the Message CID in the header must match the included messages
|
|
||||||
err = bv.validateMsgMeta(ctx, blk)
|
|
||||||
if err != nil {
|
|
||||||
log.Warnf("error validating message metadata: %s", err)
|
|
||||||
recordFailureFlagPeer("invalid_block_meta")
|
|
||||||
return pubsub.ValidationReject
|
|
||||||
}
|
|
||||||
|
|
||||||
// we want to ensure that it is a block from a known miner; we reject blocks from unknown miners
|
|
||||||
// to prevent spam attacks.
|
|
||||||
// the logic works as follows: we lookup the miner in the chain for its key.
|
|
||||||
// if we can find it then it's a known miner and we can validate the signature.
|
|
||||||
// if we can't find it, we check whether we are (near) synced in the chain.
|
|
||||||
// if we are not synced we cannot validate the block and we must ignore it.
|
|
||||||
// if we are synced and the miner is unknown, then the block is rejcected.
|
|
||||||
key, err := bv.checkPowerAndGetWorkerKey(ctx, blk.Header)
|
|
||||||
if err != nil {
|
|
||||||
if err != ErrSoftFailure && bv.isChainNearSynced() {
|
|
||||||
log.Warnf("received block from unknown miner or miner that doesn't meet min power over pubsub; rejecting message")
|
|
||||||
recordFailureFlagPeer("unknown_miner")
|
|
||||||
return pubsub.ValidationReject
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Warnf("cannot validate block message; unknown miner or miner that doesn't meet min power in unsynced chain")
|
|
||||||
return pubsub.ValidationIgnore
|
|
||||||
}
|
|
||||||
|
|
||||||
err = sigs.CheckBlockSignature(ctx, blk.Header, key)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("block signature verification failed: %s", err)
|
|
||||||
recordFailureFlagPeer("signature_verification_failed")
|
|
||||||
return pubsub.ValidationReject
|
|
||||||
}
|
|
||||||
|
|
||||||
if blk.Header.ElectionProof.WinCount < 1 {
|
|
||||||
log.Errorf("block is not claiming to be winning")
|
|
||||||
recordFailureFlagPeer("not_winning")
|
|
||||||
return pubsub.ValidationReject
|
|
||||||
}
|
|
||||||
|
|
||||||
// it's a good block! make sure we've only seen it once
|
|
||||||
if bv.recvBlocks.add(blk.Header.Cid()) > 0 {
|
|
||||||
// TODO: once these changes propagate to the network, we can consider
|
|
||||||
// dropping peers who send us the same block multiple times
|
|
||||||
return pubsub.ValidationIgnore
|
|
||||||
}
|
|
||||||
|
|
||||||
// all good, accept the block
|
|
||||||
msg.ValidatorData = blk
|
|
||||||
stats.Record(ctx, metrics.BlockValidationSuccess.M(1))
|
|
||||||
return pubsub.ValidationAccept
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bv *BlockValidator) validateLocalBlock(ctx context.Context, msg *pubsub.Message) pubsub.ValidationResult {
|
|
||||||
stats.Record(ctx, metrics.BlockPublished.M(1))
|
|
||||||
|
|
||||||
if size := msg.Size(); size > 1<<20-1<<15 {
|
|
||||||
log.Errorf("ignoring oversize block (%dB)", size)
|
|
||||||
recordFailure(ctx, metrics.BlockValidationFailure, "oversize_block")
|
|
||||||
return pubsub.ValidationIgnore
|
|
||||||
}
|
|
||||||
|
|
||||||
blk, what, err := bv.decodeAndCheckBlock(msg)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("got invalid local block: %s", err)
|
|
||||||
recordFailure(ctx, metrics.BlockValidationFailure, what)
|
|
||||||
return pubsub.ValidationIgnore
|
|
||||||
}
|
|
||||||
|
|
||||||
if count := bv.recvBlocks.add(blk.Header.Cid()); count > 0 {
|
|
||||||
log.Warnf("local block has been seen %d times; ignoring", count)
|
|
||||||
return pubsub.ValidationIgnore
|
|
||||||
}
|
|
||||||
|
|
||||||
msg.ValidatorData = blk
|
|
||||||
stats.Record(ctx, metrics.BlockValidationSuccess.M(1))
|
|
||||||
return pubsub.ValidationAccept
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bv *BlockValidator) decodeAndCheckBlock(msg *pubsub.Message) (*types.BlockMsg, string, error) {
|
|
||||||
blk, err := types.DecodeBlockMsg(msg.GetData())
|
|
||||||
if err != nil {
|
|
||||||
return nil, "invalid", xerrors.Errorf("error decoding block: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if count := len(blk.BlsMessages) + len(blk.SecpkMessages); count > build.BlockMessageLimit {
|
|
||||||
return nil, "too_many_messages", fmt.Errorf("block contains too many messages (%d)", count)
|
|
||||||
}
|
|
||||||
|
|
||||||
// make sure we have a signature
|
|
||||||
if blk.Header.BlockSig == nil {
|
|
||||||
return nil, "missing_signature", fmt.Errorf("block without a signature")
|
|
||||||
}
|
|
||||||
|
|
||||||
return blk, "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bv *BlockValidator) isChainNearSynced() bool {
|
|
||||||
ts := bv.chain.GetHeaviestTipSet()
|
|
||||||
timestamp := ts.MinTimestamp()
|
|
||||||
timestampTime := time.Unix(int64(timestamp), 0)
|
|
||||||
return build.Clock.Since(timestampTime) < 6*time.Hour
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bv *BlockValidator) validateMsgMeta(ctx context.Context, msg *types.BlockMsg) error {
|
|
||||||
// TODO there has to be a simpler way to do this without the blockstore dance
|
|
||||||
// block headers use adt0
|
|
||||||
store := blockadt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewMemory()))
|
|
||||||
bmArr := blockadt.MakeEmptyArray(store)
|
|
||||||
smArr := blockadt.MakeEmptyArray(store)
|
|
||||||
|
|
||||||
for i, m := range msg.BlsMessages {
|
|
||||||
c := cbg.CborCid(m)
|
|
||||||
if err := bmArr.Set(uint64(i), &c); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, m := range msg.SecpkMessages {
|
|
||||||
c := cbg.CborCid(m)
|
|
||||||
if err := smArr.Set(uint64(i), &c); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bmroot, err := bmArr.Root()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
smroot, err := smArr.Root()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
mrcid, err := store.Put(store.Context(), &types.MsgMeta{
|
|
||||||
BlsMessages: bmroot,
|
|
||||||
SecpkMessages: smroot,
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if msg.Header.Messages != mrcid {
|
|
||||||
return fmt.Errorf("messages didn't match root cid in header")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bv *BlockValidator) checkPowerAndGetWorkerKey(ctx context.Context, bh *types.BlockHeader) (address.Address, error) {
|
|
||||||
// we check that the miner met the minimum power at the lookback tipset
|
|
||||||
|
|
||||||
baseTs := bv.chain.GetHeaviestTipSet()
|
|
||||||
lbts, lbst, err := stmgr.GetLookbackTipSetForRound(ctx, bv.stmgr, baseTs, bh.Height)
|
|
||||||
if err != nil {
|
|
||||||
log.Warnf("failed to load lookback tipset for incoming block: %s", err)
|
|
||||||
return address.Undef, ErrSoftFailure
|
|
||||||
}
|
|
||||||
|
|
||||||
key, err := stmgr.GetMinerWorkerRaw(ctx, bv.stmgr, lbst, bh.Miner)
|
|
||||||
if err != nil {
|
|
||||||
log.Warnf("failed to resolve worker key for miner %s: %s", bh.Miner, err)
|
|
||||||
return address.Undef, ErrSoftFailure
|
|
||||||
}
|
|
||||||
|
|
||||||
// NOTE: we check to see if the miner was eligible in the lookback
|
|
||||||
// tipset - 1 for historical reasons. DO NOT use the lookback state
|
|
||||||
// returned by GetLookbackTipSetForRound.
|
|
||||||
|
|
||||||
eligible, err := stmgr.MinerEligibleToMine(ctx, bv.stmgr, bh.Miner, baseTs, lbts)
|
|
||||||
if err != nil {
|
|
||||||
log.Warnf("failed to determine if incoming block's miner has minimum power: %s", err)
|
|
||||||
return address.Undef, ErrSoftFailure
|
|
||||||
}
|
|
||||||
|
|
||||||
if !eligible {
|
|
||||||
log.Warnf("incoming block's miner is ineligible")
|
|
||||||
return address.Undef, ErrInsufficientPower
|
|
||||||
}
|
|
||||||
|
|
||||||
return key, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type blockReceiptCache struct {
|
type blockReceiptCache struct {
|
||||||
|
689
chain/sync.go
689
chain/sync.go
@ -5,13 +5,11 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
"github.com/filecoin-project/lotus/chain/consensus"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
|
|
||||||
@ -29,40 +27,23 @@ import (
|
|||||||
"go.opencensus.io/trace"
|
"go.opencensus.io/trace"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
|
||||||
"github.com/filecoin-project/go-state-types/crypto"
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
|
||||||
|
|
||||||
ffi "github.com/filecoin-project/filecoin-ffi"
|
|
||||||
|
|
||||||
// named msgarray here to make it clear that these are the types used by
|
// named msgarray here to make it clear that these are the types used by
|
||||||
// messages, regardless of specs-actors version.
|
// messages, regardless of specs-actors version.
|
||||||
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
|
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||||
|
|
||||||
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
bstore "github.com/filecoin-project/lotus/blockstore"
|
bstore "github.com/filecoin-project/lotus/blockstore"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
|
|
||||||
"github.com/filecoin-project/lotus/chain/beacon"
|
"github.com/filecoin-project/lotus/chain/beacon"
|
||||||
"github.com/filecoin-project/lotus/chain/exchange"
|
"github.com/filecoin-project/lotus/chain/exchange"
|
||||||
"github.com/filecoin-project/lotus/chain/gen"
|
|
||||||
"github.com/filecoin-project/lotus/chain/state"
|
|
||||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||||
"github.com/filecoin-project/lotus/chain/store"
|
"github.com/filecoin-project/lotus/chain/store"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/chain/vm"
|
"github.com/filecoin-project/lotus/chain/vm"
|
||||||
"github.com/filecoin-project/lotus/lib/sigs"
|
|
||||||
"github.com/filecoin-project/lotus/metrics"
|
"github.com/filecoin-project/lotus/metrics"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Blocks that are more than MaxHeightDrift epochs above
|
|
||||||
// the theoretical max height based on systime are quickly rejected
|
|
||||||
const MaxHeightDrift = 5
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// LocalIncoming is the _local_ pubsub (unrelated to libp2p pubsub) topic
|
// LocalIncoming is the _local_ pubsub (unrelated to libp2p pubsub) topic
|
||||||
// where the Syncer publishes candidate chain heads to be synced.
|
// where the Syncer publishes candidate chain heads to be synced.
|
||||||
@ -108,6 +89,8 @@ type Syncer struct {
|
|||||||
// the state manager handles making state queries
|
// the state manager handles making state queries
|
||||||
sm *stmgr.StateManager
|
sm *stmgr.StateManager
|
||||||
|
|
||||||
|
consensus consensus.Consensus
|
||||||
|
|
||||||
// The known Genesis tipset
|
// The known Genesis tipset
|
||||||
Genesis *types.TipSet
|
Genesis *types.TipSet
|
||||||
|
|
||||||
@ -127,8 +110,6 @@ type Syncer struct {
|
|||||||
|
|
||||||
receiptTracker *blockReceiptTracker
|
receiptTracker *blockReceiptTracker
|
||||||
|
|
||||||
verifier ffiwrapper.Verifier
|
|
||||||
|
|
||||||
tickerCtxCancel context.CancelFunc
|
tickerCtxCancel context.CancelFunc
|
||||||
|
|
||||||
ds dtypes.MetadataDS
|
ds dtypes.MetadataDS
|
||||||
@ -136,40 +117,44 @@ type Syncer struct {
|
|||||||
|
|
||||||
type SyncManagerCtor func(syncFn SyncFunc) SyncManager
|
type SyncManagerCtor func(syncFn SyncFunc) SyncManager
|
||||||
|
|
||||||
// NewSyncer creates a new Syncer object.
|
type Genesis *types.TipSet
|
||||||
func NewSyncer(ds dtypes.MetadataDS, sm *stmgr.StateManager, exchange exchange.Client, syncMgrCtor SyncManagerCtor, connmgr connmgr.ConnManager, self peer.ID, beacon beacon.Schedule, verifier ffiwrapper.Verifier) (*Syncer, error) {
|
|
||||||
|
func LoadGenesis(sm *stmgr.StateManager) (Genesis, error) {
|
||||||
gen, err := sm.ChainStore().GetGenesis()
|
gen, err := sm.ChainStore().GetGenesis()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("getting genesis block: %w", err)
|
return nil, xerrors.Errorf("getting genesis block: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
gent, err := types.NewTipSet([]*types.BlockHeader{gen})
|
return types.NewTipSet([]*types.BlockHeader{gen})
|
||||||
if err != nil {
|
}
|
||||||
return nil, err
|
|
||||||
}
|
// NewSyncer creates a new Syncer object.
|
||||||
|
func NewSyncer(ds dtypes.MetadataDS,
|
||||||
|
sm *stmgr.StateManager,
|
||||||
|
exchange exchange.Client,
|
||||||
|
syncMgrCtor SyncManagerCtor,
|
||||||
|
connmgr connmgr.ConnManager,
|
||||||
|
self peer.ID,
|
||||||
|
beacon beacon.Schedule,
|
||||||
|
gent Genesis,
|
||||||
|
consensus consensus.Consensus) (*Syncer, error) {
|
||||||
|
|
||||||
s := &Syncer{
|
s := &Syncer{
|
||||||
ds: ds,
|
ds: ds,
|
||||||
beacon: beacon,
|
beacon: beacon,
|
||||||
bad: NewBadBlockCache(),
|
bad: NewBadBlockCache(),
|
||||||
Genesis: gent,
|
Genesis: gent,
|
||||||
|
consensus: consensus,
|
||||||
Exchange: exchange,
|
Exchange: exchange,
|
||||||
store: sm.ChainStore(),
|
store: sm.ChainStore(),
|
||||||
sm: sm,
|
sm: sm,
|
||||||
self: self,
|
self: self,
|
||||||
receiptTracker: newBlockReceiptTracker(),
|
receiptTracker: newBlockReceiptTracker(),
|
||||||
connmgr: connmgr,
|
connmgr: connmgr,
|
||||||
verifier: verifier,
|
|
||||||
|
|
||||||
incoming: pubsub.New(50),
|
incoming: pubsub.New(50),
|
||||||
}
|
}
|
||||||
|
|
||||||
if build.InsecurePoStValidation {
|
|
||||||
log.Warn("*********************************************************************************************")
|
|
||||||
log.Warn(" [INSECURE-POST-VALIDATION] Insecure test validation is enabled. If you see this outside of a test, it is a severe bug! ")
|
|
||||||
log.Warn("*********************************************************************************************")
|
|
||||||
}
|
|
||||||
|
|
||||||
s.syncmgr = syncMgrCtor(s.Sync)
|
s.syncmgr = syncMgrCtor(s.Sync)
|
||||||
return s, nil
|
return s, nil
|
||||||
}
|
}
|
||||||
@ -212,7 +197,7 @@ func (syncer *Syncer) Stop() {
|
|||||||
func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool {
|
func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool {
|
||||||
defer func() {
|
defer func() {
|
||||||
if err := recover(); err != nil {
|
if err := recover(); err != nil {
|
||||||
log.Errorf("panic in InformNewHead: ", err)
|
log.Errorf("panic in InformNewHead: %s", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@ -222,7 +207,7 @@ func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if syncer.IsEpochBeyondCurrMax(fts.TipSet().Height()) {
|
if syncer.consensus.IsEpochBeyondCurrMax(fts.TipSet().Height()) {
|
||||||
log.Errorf("Received block with impossibly large height %d", fts.TipSet().Height())
|
log.Errorf("Received block with impossibly large height %d", fts.TipSet().Height())
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -399,33 +384,21 @@ func zipTipSetAndMessages(bs cbor.IpldStore, ts *types.TipSet, allbmsgs []*types
|
|||||||
return nil, fmt.Errorf("msgincl length didnt match tipset size")
|
return nil, fmt.Errorf("msgincl length didnt match tipset size")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := checkMsgMeta(ts, allbmsgs, allsmsgs, bmi, smi); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
fts := &store.FullTipSet{}
|
fts := &store.FullTipSet{}
|
||||||
for bi, b := range ts.Blocks() {
|
for bi, b := range ts.Blocks() {
|
||||||
if msgc := len(bmi[bi]) + len(smi[bi]); msgc > build.BlockMessageLimit {
|
|
||||||
return nil, fmt.Errorf("block %q has too many messages (%d)", b.Cid(), msgc)
|
|
||||||
}
|
|
||||||
|
|
||||||
var smsgs []*types.SignedMessage
|
var smsgs []*types.SignedMessage
|
||||||
var smsgCids []cid.Cid
|
|
||||||
for _, m := range smi[bi] {
|
for _, m := range smi[bi] {
|
||||||
smsgs = append(smsgs, allsmsgs[m])
|
smsgs = append(smsgs, allsmsgs[m])
|
||||||
smsgCids = append(smsgCids, allsmsgs[m].Cid())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var bmsgs []*types.Message
|
var bmsgs []*types.Message
|
||||||
var bmsgCids []cid.Cid
|
|
||||||
for _, m := range bmi[bi] {
|
for _, m := range bmi[bi] {
|
||||||
bmsgs = append(bmsgs, allbmsgs[m])
|
bmsgs = append(bmsgs, allbmsgs[m])
|
||||||
bmsgCids = append(bmsgCids, allbmsgs[m].Cid())
|
|
||||||
}
|
|
||||||
|
|
||||||
mrcid, err := computeMsgMeta(bs, bmsgCids, smsgCids)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if b.Messages != mrcid {
|
|
||||||
return nil, fmt.Errorf("messages didnt match message root in header for ts %s", ts.Key())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fb := &types.FullBlock{
|
fb := &types.FullBlock{
|
||||||
@ -584,7 +557,7 @@ func (syncer *Syncer) Sync(ctx context.Context, maybeHead *types.TipSet) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func isPermanent(err error) bool {
|
func isPermanent(err error) bool {
|
||||||
return !errors.Is(err, ErrTemporal)
|
return !errors.Is(err, consensus.ErrTemporal)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (syncer *Syncer) ValidateTipSet(ctx context.Context, fts *store.FullTipSet, useCache bool) error {
|
func (syncer *Syncer) ValidateTipSet(ctx context.Context, fts *store.FullTipSet, useCache bool) error {
|
||||||
@ -624,55 +597,6 @@ func (syncer *Syncer) ValidateTipSet(ctx context.Context, fts *store.FullTipSet,
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (syncer *Syncer) minerIsValid(ctx context.Context, maddr address.Address, baseTs *types.TipSet) error {
|
|
||||||
act, err := syncer.sm.LoadActor(ctx, power.Address, baseTs)
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("failed to load power actor: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
powState, err := power.Load(syncer.store.ActorStore(ctx), act)
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("failed to load power actor state: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, exist, err := powState.MinerPower(maddr)
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("failed to look up miner's claim: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !exist {
|
|
||||||
return xerrors.New("miner isn't valid")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var ErrTemporal = errors.New("temporal error")
|
|
||||||
|
|
||||||
func blockSanityChecks(h *types.BlockHeader) error {
|
|
||||||
if h.ElectionProof == nil {
|
|
||||||
return xerrors.Errorf("block cannot have nil election proof")
|
|
||||||
}
|
|
||||||
|
|
||||||
if h.Ticket == nil {
|
|
||||||
return xerrors.Errorf("block cannot have nil ticket")
|
|
||||||
}
|
|
||||||
|
|
||||||
if h.BlockSig == nil {
|
|
||||||
return xerrors.Errorf("block had nil signature")
|
|
||||||
}
|
|
||||||
|
|
||||||
if h.BLSAggregate == nil {
|
|
||||||
return xerrors.Errorf("block had nil bls aggregate signature")
|
|
||||||
}
|
|
||||||
|
|
||||||
if h.Miner.Protocol() != address.ID {
|
|
||||||
return xerrors.Errorf("block had non-ID miner address")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateBlock should match up with 'Semantical Validation' in validation.md in the spec
|
// ValidateBlock should match up with 'Semantical Validation' in validation.md in the spec
|
||||||
func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock, useCache bool) (err error) {
|
func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock, useCache bool) (err error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -703,262 +627,8 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock, use
|
|||||||
ctx, span := trace.StartSpan(ctx, "validateBlock")
|
ctx, span := trace.StartSpan(ctx, "validateBlock")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
if err := blockSanityChecks(b.Header); err != nil {
|
if err := syncer.consensus.ValidateBlock(ctx, b); err != nil {
|
||||||
return xerrors.Errorf("incoming header failed basic sanity checks: %w", err)
|
return err
|
||||||
}
|
|
||||||
|
|
||||||
h := b.Header
|
|
||||||
|
|
||||||
baseTs, err := syncer.store.LoadTipSet(types.NewTipSetKey(h.Parents...))
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("load parent tipset failed (%s): %w", h.Parents, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
winPoStNv := syncer.sm.GetNtwkVersion(ctx, baseTs.Height())
|
|
||||||
|
|
||||||
lbts, lbst, err := stmgr.GetLookbackTipSetForRound(ctx, syncer.sm, baseTs, h.Height)
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("failed to get lookback tipset for block: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
prevBeacon, err := syncer.store.GetLatestBeaconEntry(baseTs)
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("failed to get latest beacon entry: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// fast checks first
|
|
||||||
|
|
||||||
if h.Height <= baseTs.Height() {
|
|
||||||
return xerrors.Errorf("block height not greater than parent height: %d != %d", h.Height, baseTs.Height())
|
|
||||||
}
|
|
||||||
|
|
||||||
nulls := h.Height - (baseTs.Height() + 1)
|
|
||||||
if tgtTs := baseTs.MinTimestamp() + build.BlockDelaySecs*uint64(nulls+1); h.Timestamp != tgtTs {
|
|
||||||
return xerrors.Errorf("block has wrong timestamp: %d != %d", h.Timestamp, tgtTs)
|
|
||||||
}
|
|
||||||
|
|
||||||
now := uint64(build.Clock.Now().Unix())
|
|
||||||
if h.Timestamp > now+build.AllowableClockDriftSecs {
|
|
||||||
return xerrors.Errorf("block was from the future (now=%d, blk=%d): %w", now, h.Timestamp, ErrTemporal)
|
|
||||||
}
|
|
||||||
if h.Timestamp > now {
|
|
||||||
log.Warn("Got block from the future, but within threshold", h.Timestamp, build.Clock.Now().Unix())
|
|
||||||
}
|
|
||||||
|
|
||||||
msgsCheck := async.Err(func() error {
|
|
||||||
if b.Cid() == build.WhitelistedBlock {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := syncer.checkBlockMessages(ctx, b, baseTs); err != nil {
|
|
||||||
return xerrors.Errorf("block had invalid messages: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
minerCheck := async.Err(func() error {
|
|
||||||
if err := syncer.minerIsValid(ctx, h.Miner, baseTs); err != nil {
|
|
||||||
return xerrors.Errorf("minerIsValid failed: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
baseFeeCheck := async.Err(func() error {
|
|
||||||
baseFee, err := syncer.store.ComputeBaseFee(ctx, baseTs)
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("computing base fee: %w", err)
|
|
||||||
}
|
|
||||||
if types.BigCmp(baseFee, b.Header.ParentBaseFee) != 0 {
|
|
||||||
return xerrors.Errorf("base fee doesn't match: %s (header) != %s (computed)",
|
|
||||||
b.Header.ParentBaseFee, baseFee)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
pweight, err := syncer.store.Weight(ctx, baseTs)
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("getting parent weight: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if types.BigCmp(pweight, b.Header.ParentWeight) != 0 {
|
|
||||||
return xerrors.Errorf("parrent weight different: %s (header) != %s (computed)",
|
|
||||||
b.Header.ParentWeight, pweight)
|
|
||||||
}
|
|
||||||
|
|
||||||
stateRootCheck := async.Err(func() error {
|
|
||||||
stateroot, precp, err := syncer.sm.TipSetState(ctx, baseTs)
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("get tipsetstate(%d, %s) failed: %w", h.Height, h.Parents, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if stateroot != h.ParentStateRoot {
|
|
||||||
msgs, err := syncer.store.MessagesForTipset(baseTs)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("failed to load messages for tipset during tipset state mismatch error: ", err)
|
|
||||||
} else {
|
|
||||||
log.Warn("Messages for tipset with mismatching state:")
|
|
||||||
for i, m := range msgs {
|
|
||||||
mm := m.VMMessage()
|
|
||||||
log.Warnf("Message[%d]: from=%s to=%s method=%d params=%x", i, mm.From, mm.To, mm.Method, mm.Params)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return xerrors.Errorf("parent state root did not match computed state (%s != %s)", stateroot, h.ParentStateRoot)
|
|
||||||
}
|
|
||||||
|
|
||||||
if precp != h.ParentMessageReceipts {
|
|
||||||
return xerrors.Errorf("parent receipts root did not match computed value (%s != %s)", precp, h.ParentMessageReceipts)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
// Stuff that needs worker address
|
|
||||||
waddr, err := stmgr.GetMinerWorkerRaw(ctx, syncer.sm, lbst, h.Miner)
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("GetMinerWorkerRaw failed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
winnerCheck := async.Err(func() error {
|
|
||||||
if h.ElectionProof.WinCount < 1 {
|
|
||||||
return xerrors.Errorf("block is not claiming to be a winner")
|
|
||||||
}
|
|
||||||
|
|
||||||
eligible, err := stmgr.MinerEligibleToMine(ctx, syncer.sm, h.Miner, baseTs, lbts)
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("determining if miner has min power failed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !eligible {
|
|
||||||
return xerrors.New("block's miner is ineligible to mine")
|
|
||||||
}
|
|
||||||
|
|
||||||
rBeacon := *prevBeacon
|
|
||||||
if len(h.BeaconEntries) != 0 {
|
|
||||||
rBeacon = h.BeaconEntries[len(h.BeaconEntries)-1]
|
|
||||||
}
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
if err := h.Miner.MarshalCBOR(buf); err != nil {
|
|
||||||
return xerrors.Errorf("failed to marshal miner address to cbor: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
vrfBase, err := store.DrawRandomness(rBeacon.Data, crypto.DomainSeparationTag_ElectionProofProduction, h.Height, buf.Bytes())
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("could not draw randomness: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := VerifyElectionPoStVRF(ctx, waddr, vrfBase, h.ElectionProof.VRFProof); err != nil {
|
|
||||||
return xerrors.Errorf("validating block election proof failed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
slashed, err := stmgr.GetMinerSlashed(ctx, syncer.sm, baseTs, h.Miner)
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("failed to check if block miner was slashed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if slashed {
|
|
||||||
return xerrors.Errorf("received block was from slashed or invalid miner")
|
|
||||||
}
|
|
||||||
|
|
||||||
mpow, tpow, _, err := stmgr.GetPowerRaw(ctx, syncer.sm, lbst, h.Miner)
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("failed getting power: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
j := h.ElectionProof.ComputeWinCount(mpow.QualityAdjPower, tpow.QualityAdjPower)
|
|
||||||
if h.ElectionProof.WinCount != j {
|
|
||||||
return xerrors.Errorf("miner claims wrong number of wins: miner: %d, computed: %d", h.ElectionProof.WinCount, j)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
blockSigCheck := async.Err(func() error {
|
|
||||||
if err := sigs.CheckBlockSignature(ctx, h, waddr); err != nil {
|
|
||||||
return xerrors.Errorf("check block signature failed: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
beaconValuesCheck := async.Err(func() error {
|
|
||||||
if os.Getenv("LOTUS_IGNORE_DRAND") == "_yes_" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := beacon.ValidateBlockValues(syncer.beacon, h, baseTs.Height(), *prevBeacon); err != nil {
|
|
||||||
return xerrors.Errorf("failed to validate blocks random beacon values: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
tktsCheck := async.Err(func() error {
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
if err := h.Miner.MarshalCBOR(buf); err != nil {
|
|
||||||
return xerrors.Errorf("failed to marshal miner address to cbor: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if h.Height > build.UpgradeSmokeHeight {
|
|
||||||
buf.Write(baseTs.MinTicket().VRFProof)
|
|
||||||
}
|
|
||||||
|
|
||||||
beaconBase := *prevBeacon
|
|
||||||
if len(h.BeaconEntries) != 0 {
|
|
||||||
beaconBase = h.BeaconEntries[len(h.BeaconEntries)-1]
|
|
||||||
}
|
|
||||||
|
|
||||||
vrfBase, err := store.DrawRandomness(beaconBase.Data, crypto.DomainSeparationTag_TicketProduction, h.Height-build.TicketRandomnessLookback, buf.Bytes())
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("failed to compute vrf base for ticket: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = VerifyElectionPoStVRF(ctx, waddr, vrfBase, h.Ticket.VRFProof)
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("validating block tickets failed: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
wproofCheck := async.Err(func() error {
|
|
||||||
if err := syncer.VerifyWinningPoStProof(ctx, winPoStNv, h, *prevBeacon, lbst, waddr); err != nil {
|
|
||||||
return xerrors.Errorf("invalid election post: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
await := []async.ErrorFuture{
|
|
||||||
minerCheck,
|
|
||||||
tktsCheck,
|
|
||||||
blockSigCheck,
|
|
||||||
beaconValuesCheck,
|
|
||||||
wproofCheck,
|
|
||||||
winnerCheck,
|
|
||||||
msgsCheck,
|
|
||||||
baseFeeCheck,
|
|
||||||
stateRootCheck,
|
|
||||||
}
|
|
||||||
|
|
||||||
var merr error
|
|
||||||
for _, fut := range await {
|
|
||||||
if err := fut.AwaitContext(ctx); err != nil {
|
|
||||||
merr = multierror.Append(merr, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if merr != nil {
|
|
||||||
mulErr := merr.(*multierror.Error)
|
|
||||||
mulErr.ErrorFormat = func(es []error) string {
|
|
||||||
if len(es) == 1 {
|
|
||||||
return fmt.Sprintf("1 error occurred:\n\t* %+v\n\n", es[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
points := make([]string, len(es))
|
|
||||||
for i, err := range es {
|
|
||||||
points[i] = fmt.Sprintf("* %+v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprintf(
|
|
||||||
"%d errors occurred:\n\t%s\n\n",
|
|
||||||
len(es), strings.Join(points, "\n\t"))
|
|
||||||
}
|
|
||||||
return mulErr
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if useCache {
|
if useCache {
|
||||||
@ -970,249 +640,6 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock, use
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (syncer *Syncer) VerifyWinningPoStProof(ctx context.Context, nv network.Version, h *types.BlockHeader, prevBeacon types.BeaconEntry, lbst cid.Cid, waddr address.Address) error {
|
|
||||||
if build.InsecurePoStValidation {
|
|
||||||
if len(h.WinPoStProof) == 0 {
|
|
||||||
return xerrors.Errorf("[INSECURE-POST-VALIDATION] No winning post proof given")
|
|
||||||
}
|
|
||||||
|
|
||||||
if string(h.WinPoStProof[0].ProofBytes) == "valid proof" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return xerrors.Errorf("[INSECURE-POST-VALIDATION] winning post was invalid")
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
if err := h.Miner.MarshalCBOR(buf); err != nil {
|
|
||||||
return xerrors.Errorf("failed to marshal miner address: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
rbase := prevBeacon
|
|
||||||
if len(h.BeaconEntries) > 0 {
|
|
||||||
rbase = h.BeaconEntries[len(h.BeaconEntries)-1]
|
|
||||||
}
|
|
||||||
|
|
||||||
rand, err := store.DrawRandomness(rbase.Data, crypto.DomainSeparationTag_WinningPoStChallengeSeed, h.Height, buf.Bytes())
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("failed to get randomness for verifying winning post proof: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
mid, err := address.IDFromAddress(h.Miner)
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("failed to get ID from miner address %s: %w", h.Miner, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
sectors, err := stmgr.GetSectorsForWinningPoSt(ctx, nv, syncer.verifier, syncer.sm, lbst, h.Miner, rand)
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("getting winning post sector set: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ok, err := ffiwrapper.ProofVerifier.VerifyWinningPoSt(ctx, proof2.WinningPoStVerifyInfo{
|
|
||||||
Randomness: rand,
|
|
||||||
Proofs: h.WinPoStProof,
|
|
||||||
ChallengedSectors: sectors,
|
|
||||||
Prover: abi.ActorID(mid),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("failed to verify election post: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !ok {
|
|
||||||
log.Errorf("invalid winning post (block: %s, %x; %v)", h.Cid(), rand, sectors)
|
|
||||||
return xerrors.Errorf("winning post was invalid")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: We should extract this somewhere else and make the message pool and miner use the same logic
|
|
||||||
func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock, baseTs *types.TipSet) error {
|
|
||||||
{
|
|
||||||
var sigCids []cid.Cid // this is what we get for people not wanting the marshalcbor method on the cid type
|
|
||||||
var pubks [][]byte
|
|
||||||
|
|
||||||
for _, m := range b.BlsMessages {
|
|
||||||
sigCids = append(sigCids, m.Cid())
|
|
||||||
|
|
||||||
pubk, err := syncer.sm.GetBlsPublicKey(ctx, m.From, baseTs)
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("failed to load bls public to validate block: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
pubks = append(pubks, pubk)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := syncer.verifyBlsAggregate(ctx, b.Header.BLSAggregate, sigCids, pubks); err != nil {
|
|
||||||
return xerrors.Errorf("bls aggregate signature was invalid: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
nonces := make(map[address.Address]uint64)
|
|
||||||
|
|
||||||
stateroot, _, err := syncer.sm.TipSetState(ctx, baseTs)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
st, err := state.LoadStateTree(syncer.store.ActorStore(ctx), stateroot)
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("failed to load base state tree: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
nv := syncer.sm.GetNtwkVersion(ctx, b.Header.Height)
|
|
||||||
pl := vm.PricelistByEpoch(baseTs.Height())
|
|
||||||
var sumGasLimit int64
|
|
||||||
checkMsg := func(msg types.ChainMsg) error {
|
|
||||||
m := msg.VMMessage()
|
|
||||||
|
|
||||||
// Phase 1: syntactic validation, as defined in the spec
|
|
||||||
minGas := pl.OnChainMessage(msg.ChainLength())
|
|
||||||
if err := m.ValidForBlockInclusion(minGas.Total(), nv); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidForBlockInclusion checks if any single message does not exceed BlockGasLimit
|
|
||||||
// So below is overflow safe
|
|
||||||
sumGasLimit += m.GasLimit
|
|
||||||
if sumGasLimit > build.BlockGasLimit {
|
|
||||||
return xerrors.Errorf("block gas limit exceeded")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Phase 2: (Partial) semantic validation:
|
|
||||||
// the sender exists and is an account actor, and the nonces make sense
|
|
||||||
var sender address.Address
|
|
||||||
if nv >= network.Version13 {
|
|
||||||
sender, err = st.LookupID(m.From)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
sender = m.From
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := nonces[sender]; !ok {
|
|
||||||
// `GetActor` does not validate that this is an account actor.
|
|
||||||
act, err := st.GetActor(sender)
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("failed to get actor: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !builtin.IsAccountActor(act.Code) {
|
|
||||||
return xerrors.New("Sender must be an account actor")
|
|
||||||
}
|
|
||||||
nonces[sender] = act.Nonce
|
|
||||||
}
|
|
||||||
|
|
||||||
if nonces[sender] != m.Nonce {
|
|
||||||
return xerrors.Errorf("wrong nonce (exp: %d, got: %d)", nonces[sender], m.Nonce)
|
|
||||||
}
|
|
||||||
nonces[sender]++
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate message arrays in a temporary blockstore.
|
|
||||||
tmpbs := bstore.NewMemory()
|
|
||||||
tmpstore := blockadt.WrapStore(ctx, cbor.NewCborStore(tmpbs))
|
|
||||||
|
|
||||||
bmArr := blockadt.MakeEmptyArray(tmpstore)
|
|
||||||
for i, m := range b.BlsMessages {
|
|
||||||
if err := checkMsg(m); err != nil {
|
|
||||||
return xerrors.Errorf("block had invalid bls message at index %d: %w", i, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
c, err := store.PutMessage(tmpbs, m)
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
k := cbg.CborCid(c)
|
|
||||||
if err := bmArr.Set(uint64(i), &k); err != nil {
|
|
||||||
return xerrors.Errorf("failed to put bls message at index %d: %w", i, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
smArr := blockadt.MakeEmptyArray(tmpstore)
|
|
||||||
for i, m := range b.SecpkMessages {
|
|
||||||
if err := checkMsg(m); err != nil {
|
|
||||||
return xerrors.Errorf("block had invalid secpk message at index %d: %w", i, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// `From` being an account actor is only validated inside the `vm.ResolveToKeyAddr` call
|
|
||||||
// in `StateManager.ResolveToKeyAddress` here (and not in `checkMsg`).
|
|
||||||
kaddr, err := syncer.sm.ResolveToKeyAddress(ctx, m.Message.From, baseTs)
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("failed to resolve key addr: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := sigs.Verify(&m.Signature, kaddr, m.Message.Cid().Bytes()); err != nil {
|
|
||||||
return xerrors.Errorf("secpk message %s has invalid signature: %w", m.Cid(), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
c, err := store.PutMessage(tmpbs, m)
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err)
|
|
||||||
}
|
|
||||||
k := cbg.CborCid(c)
|
|
||||||
if err := smArr.Set(uint64(i), &k); err != nil {
|
|
||||||
return xerrors.Errorf("failed to put secpk message at index %d: %w", i, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bmroot, err := bmArr.Root()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
smroot, err := smArr.Root()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
mrcid, err := tmpstore.Put(ctx, &types.MsgMeta{
|
|
||||||
BlsMessages: bmroot,
|
|
||||||
SecpkMessages: smroot,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if b.Header.Messages != mrcid {
|
|
||||||
return fmt.Errorf("messages didnt match message root in header")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Finally, flush.
|
|
||||||
return vm.Copy(ctx, tmpbs, syncer.store.ChainBlockstore(), mrcid)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (syncer *Syncer) verifyBlsAggregate(ctx context.Context, sig *crypto.Signature, msgs []cid.Cid, pubks [][]byte) error {
|
|
||||||
_, span := trace.StartSpan(ctx, "syncer.verifyBlsAggregate")
|
|
||||||
defer span.End()
|
|
||||||
span.AddAttributes(
|
|
||||||
trace.Int64Attribute("msgCount", int64(len(msgs))),
|
|
||||||
)
|
|
||||||
|
|
||||||
msgsS := make([]ffi.Message, len(msgs))
|
|
||||||
pubksS := make([]ffi.PublicKey, len(msgs))
|
|
||||||
for i := 0; i < len(msgs); i++ {
|
|
||||||
msgsS[i] = msgs[i].Bytes()
|
|
||||||
copy(pubksS[i][:], pubks[i][:ffi.PublicKeyBytes])
|
|
||||||
}
|
|
||||||
|
|
||||||
sigS := new(ffi.Signature)
|
|
||||||
copy(sigS[:], sig.Data[:ffi.SignatureBytes])
|
|
||||||
|
|
||||||
if len(msgs) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
valid := ffi.HashVerify(sigS, msgsS, pubksS)
|
|
||||||
if !valid {
|
|
||||||
return xerrors.New("bls aggregate signature failed to verify")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type syncStateKey struct{}
|
type syncStateKey struct{}
|
||||||
|
|
||||||
func extractSyncState(ctx context.Context) *SyncerState {
|
func extractSyncState(ctx context.Context) *SyncerState {
|
||||||
@ -1374,7 +801,7 @@ loop:
|
|||||||
return nil, xerrors.Errorf("retrieved segments of the chain are not connected at heights %d/%d",
|
return nil, xerrors.Errorf("retrieved segments of the chain are not connected at heights %d/%d",
|
||||||
blockSet[len(blockSet)-1].Height(), blks[0].Height())
|
blockSet[len(blockSet)-1].Height(), blks[0].Height())
|
||||||
// A successful `GetBlocks()` call is guaranteed to fetch at least
|
// A successful `GetBlocks()` call is guaranteed to fetch at least
|
||||||
// one tipset so the acess `blks[0]` is safe.
|
// one tipset so the access `blks[0]` is safe.
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, b := range blks {
|
for _, b := range blks {
|
||||||
@ -1598,6 +1025,35 @@ func (syncer *Syncer) iterFullTipsets(ctx context.Context, headers []*types.TipS
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func checkMsgMeta(ts *types.TipSet, allbmsgs []*types.Message, allsmsgs []*types.SignedMessage, bmi, smi [][]uint64) error {
|
||||||
|
for bi, b := range ts.Blocks() {
|
||||||
|
if msgc := len(bmi[bi]) + len(smi[bi]); msgc > build.BlockMessageLimit {
|
||||||
|
return fmt.Errorf("block %q has too many messages (%d)", b.Cid(), msgc)
|
||||||
|
}
|
||||||
|
|
||||||
|
var smsgCids []cid.Cid
|
||||||
|
for _, m := range smi[bi] {
|
||||||
|
smsgCids = append(smsgCids, allsmsgs[m].Cid())
|
||||||
|
}
|
||||||
|
|
||||||
|
var bmsgCids []cid.Cid
|
||||||
|
for _, m := range bmi[bi] {
|
||||||
|
bmsgCids = append(bmsgCids, allbmsgs[m].Cid())
|
||||||
|
}
|
||||||
|
|
||||||
|
mrcid, err := computeMsgMeta(cbor.NewCborStore(bstore.NewMemory()), bmsgCids, smsgCids)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.Messages != mrcid {
|
||||||
|
return fmt.Errorf("messages didnt match message root in header for ts %s", ts.Key())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (syncer *Syncer) fetchMessages(ctx context.Context, headers []*types.TipSet, startOffset int) ([]*exchange.CompactedMessages, error) {
|
func (syncer *Syncer) fetchMessages(ctx context.Context, headers []*types.TipSet, startOffset int) ([]*exchange.CompactedMessages, error) {
|
||||||
batchSize := len(headers)
|
batchSize := len(headers)
|
||||||
batch := make([]*exchange.CompactedMessages, batchSize)
|
batch := make([]*exchange.CompactedMessages, batchSize)
|
||||||
@ -1636,7 +1092,19 @@ func (syncer *Syncer) fetchMessages(ctx context.Context, headers []*types.TipSet
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
requestErr = multierror.Append(requestErr, err)
|
requestErr = multierror.Append(requestErr, err)
|
||||||
} else {
|
} else {
|
||||||
requestResult = result
|
isGood := true
|
||||||
|
for index, ts := range headers[nextI:lastI] {
|
||||||
|
cm := result[index]
|
||||||
|
if err := checkMsgMeta(ts, cm.Bls, cm.Secpk, cm.BlsIncludes, cm.SecpkIncludes); err != nil {
|
||||||
|
log.Errorf("fetched messages not as expected: %s", err)
|
||||||
|
isGood = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if isGood {
|
||||||
|
requestResult = result
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1754,10 +1222,6 @@ func (syncer *Syncer) collectChain(ctx context.Context, ts *types.TipSet, hts *t
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func VerifyElectionPoStVRF(ctx context.Context, worker address.Address, rand []byte, evrf []byte) error {
|
|
||||||
return gen.VerifyVRF(ctx, worker, rand, evrf)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (syncer *Syncer) State() []SyncerStateSnapshot {
|
func (syncer *Syncer) State() []SyncerStateSnapshot {
|
||||||
return syncer.syncmgr.State()
|
return syncer.syncmgr.State()
|
||||||
}
|
}
|
||||||
@ -1802,12 +1266,3 @@ func (syncer *Syncer) getLatestBeaconEntry(_ context.Context, ts *types.TipSet)
|
|||||||
|
|
||||||
return nil, xerrors.Errorf("found NO beacon entries in the 20 latest tipsets")
|
return nil, xerrors.Errorf("found NO beacon entries in the 20 latest tipsets")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (syncer *Syncer) IsEpochBeyondCurrMax(epoch abi.ChainEpoch) bool {
|
|
||||||
if syncer.Genesis == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
now := uint64(build.Clock.Now().Unix())
|
|
||||||
return epoch > (abi.ChainEpoch((now-syncer.Genesis.MinTimestamp())/build.BlockDelaySecs) + MaxHeightDrift)
|
|
||||||
}
|
|
||||||
|
@ -28,6 +28,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||||
|
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||||
"github.com/filecoin-project/lotus/chain/gen"
|
"github.com/filecoin-project/lotus/chain/gen"
|
||||||
"github.com/filecoin-project/lotus/chain/gen/slashfilter"
|
"github.com/filecoin-project/lotus/chain/gen/slashfilter"
|
||||||
"github.com/filecoin-project/lotus/chain/store"
|
"github.com/filecoin-project/lotus/chain/store"
|
||||||
@ -105,7 +106,7 @@ func prepSyncTest(t testing.TB, h int) *syncTestUtil {
|
|||||||
|
|
||||||
mn: mocknet.New(ctx),
|
mn: mocknet.New(ctx),
|
||||||
g: g,
|
g: g,
|
||||||
us: stmgr.DefaultUpgradeSchedule(),
|
us: filcns.DefaultUpgradeSchedule(),
|
||||||
}
|
}
|
||||||
|
|
||||||
tu.addSourceNode(h)
|
tu.addSourceNode(h)
|
||||||
@ -125,19 +126,19 @@ func prepSyncTestWithV5Height(t testing.TB, h int, v5height abi.ChainEpoch) *syn
|
|||||||
// prepare for upgrade.
|
// prepare for upgrade.
|
||||||
Network: network.Version9,
|
Network: network.Version9,
|
||||||
Height: 1,
|
Height: 1,
|
||||||
Migration: stmgr.UpgradeActorsV2,
|
Migration: filcns.UpgradeActorsV2,
|
||||||
}, {
|
}, {
|
||||||
Network: network.Version10,
|
Network: network.Version10,
|
||||||
Height: 2,
|
Height: 2,
|
||||||
Migration: stmgr.UpgradeActorsV3,
|
Migration: filcns.UpgradeActorsV3,
|
||||||
}, {
|
}, {
|
||||||
Network: network.Version12,
|
Network: network.Version12,
|
||||||
Height: 3,
|
Height: 3,
|
||||||
Migration: stmgr.UpgradeActorsV4,
|
Migration: filcns.UpgradeActorsV4,
|
||||||
}, {
|
}, {
|
||||||
Network: network.Version13,
|
Network: network.Version13,
|
||||||
Height: v5height,
|
Height: v5height,
|
||||||
Migration: stmgr.UpgradeActorsV5,
|
Migration: filcns.UpgradeActorsV5,
|
||||||
}}
|
}}
|
||||||
|
|
||||||
g, err := gen.NewGeneratorWithUpgradeSchedule(sched)
|
g, err := gen.NewGeneratorWithUpgradeSchedule(sched)
|
||||||
|
@ -47,7 +47,8 @@ func NewBeaconEntry(round uint64, data []byte) BeaconEntry {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type BlockHeader struct {
|
type BlockHeader struct {
|
||||||
Miner address.Address // 0 unique per block/miner
|
Miner address.Address // 0 unique per block/miner
|
||||||
|
|
||||||
Ticket *Ticket // 1 unique per block/miner: should be a valid VRF
|
Ticket *Ticket // 1 unique per block/miner: should be a valid VRF
|
||||||
ElectionProof *ElectionProof // 2 unique per block/miner: should be a valid VRF
|
ElectionProof *ElectionProof // 2 unique per block/miner: should be a valid VRF
|
||||||
BeaconEntries []BeaconEntry // 3 identical for all blocks in same tipset
|
BeaconEntries []BeaconEntry // 3 identical for all blocks in same tipset
|
||||||
|
@ -4,7 +4,6 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
"os"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@ -129,17 +128,25 @@ func BenchmarkWinCounts(b *testing.B) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestWinCounts(t *testing.T) {
|
func TestWinCounts(t *testing.T) {
|
||||||
t.SkipNow()
|
|
||||||
totalPower := NewInt(100)
|
totalPower := NewInt(100)
|
||||||
power := NewInt(30)
|
power := NewInt(20)
|
||||||
|
|
||||||
f, _ := os.Create("output.wins")
|
count := uint64(1000000)
|
||||||
fmt.Fprintf(f, "wins\n")
|
total := uint64(0)
|
||||||
ep := &ElectionProof{VRFProof: nil}
|
ep := &ElectionProof{VRFProof: make([]byte, 5)}
|
||||||
for i := uint64(0); i < 1000000; i++ {
|
for i := uint64(0); i < count; i++ {
|
||||||
i := i + 1000000
|
w := i + count
|
||||||
ep.VRFProof = []byte{byte(i), byte(i >> 8), byte(i >> 16), byte(i >> 24), byte(i >> 32)}
|
ep.VRFProof[0] = byte(w)
|
||||||
j := ep.ComputeWinCount(power, totalPower)
|
ep.VRFProof[1] = byte(w >> 8)
|
||||||
fmt.Fprintf(f, "%d\n", j)
|
ep.VRFProof[2] = byte(w >> 16)
|
||||||
|
ep.VRFProof[3] = byte(w >> 24)
|
||||||
|
ep.VRFProof[4] = byte(w >> 32)
|
||||||
|
|
||||||
|
total += uint64(ep.ComputeWinCount(power, totalPower))
|
||||||
}
|
}
|
||||||
|
// We have 1/5 of the power, so we expect to win 1 block per epoch on average. Plus or minus
|
||||||
|
// 1%.
|
||||||
|
avgWins := float64(total) / float64(count)
|
||||||
|
assert.GreaterOrEqual(t, avgWins, 1.0-0.01)
|
||||||
|
assert.LessOrEqual(t, avgWins, 1.0+0.01)
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
//+build gofuzz
|
//go:build gofuzz
|
||||||
|
// +build gofuzz
|
||||||
|
|
||||||
package types
|
package types
|
||||||
|
|
||||||
|
@ -26,7 +26,6 @@ func LoadVector(t *testing.T, f string, out interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestBlockHeaderVectors(t *testing.T) {
|
func TestBlockHeaderVectors(t *testing.T) {
|
||||||
t.Skip("we need to regenerate for beacon")
|
|
||||||
var headers []HeaderVector
|
var headers []HeaderVector
|
||||||
LoadVector(t, "block_headers.json", &headers)
|
LoadVector(t, "block_headers.json", &headers)
|
||||||
|
|
||||||
@ -65,8 +64,6 @@ func TestMessageSigningVectors(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUnsignedMessageVectors(t *testing.T) {
|
func TestUnsignedMessageVectors(t *testing.T) {
|
||||||
t.Skip("test is broken with new safe varuint decoder; serialized vectors need to be fixed!")
|
|
||||||
|
|
||||||
var msvs []UnsignedMessageVector
|
var msvs []UnsignedMessageVector
|
||||||
LoadVector(t, "unsigned_messages.json", &msvs)
|
LoadVector(t, "unsigned_messages.json", &msvs)
|
||||||
|
|
||||||
|
@ -5,6 +5,8 @@ import (
|
|||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
|
|
||||||
@ -14,11 +16,6 @@ import (
|
|||||||
cbg "github.com/whyrusleeping/cbor-gen"
|
cbg "github.com/whyrusleeping/cbor-gen"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
exported0 "github.com/filecoin-project/specs-actors/actors/builtin/exported"
|
|
||||||
exported2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/exported"
|
|
||||||
exported3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/exported"
|
|
||||||
exported4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/exported"
|
|
||||||
exported5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/exported"
|
|
||||||
vmr "github.com/filecoin-project/specs-actors/v5/actors/runtime"
|
vmr "github.com/filecoin-project/specs-actors/v5/actors/runtime"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
@ -30,8 +27,17 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type MethodMeta struct {
|
||||||
|
Name string
|
||||||
|
|
||||||
|
Params reflect.Type
|
||||||
|
Ret reflect.Type
|
||||||
|
}
|
||||||
|
|
||||||
type ActorRegistry struct {
|
type ActorRegistry struct {
|
||||||
actors map[cid.Cid]*actorInfo
|
actors map[cid.Cid]*actorInfo
|
||||||
|
|
||||||
|
Methods map[cid.Cid]map[abi.MethodNum]MethodMeta
|
||||||
}
|
}
|
||||||
|
|
||||||
// An ActorPredicate returns an error if the given actor is not valid for the given runtime environment (e.g., chain height, version, etc.).
|
// An ActorPredicate returns an error if the given actor is not valid for the given runtime environment (e.g., chain height, version, etc.).
|
||||||
@ -61,18 +67,10 @@ type actorInfo struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func NewActorRegistry() *ActorRegistry {
|
func NewActorRegistry() *ActorRegistry {
|
||||||
inv := &ActorRegistry{actors: make(map[cid.Cid]*actorInfo)}
|
return &ActorRegistry{
|
||||||
|
actors: make(map[cid.Cid]*actorInfo),
|
||||||
// TODO: define all these properties on the actors themselves, in specs-actors.
|
Methods: map[cid.Cid]map[abi.MethodNum]MethodMeta{},
|
||||||
|
}
|
||||||
// add builtInCode using: register(cid, singleton)
|
|
||||||
inv.Register(ActorsVersionPredicate(actors.Version0), exported0.BuiltinActors()...)
|
|
||||||
inv.Register(ActorsVersionPredicate(actors.Version2), exported2.BuiltinActors()...)
|
|
||||||
inv.Register(ActorsVersionPredicate(actors.Version3), exported3.BuiltinActors()...)
|
|
||||||
inv.Register(ActorsVersionPredicate(actors.Version4), exported4.BuiltinActors()...)
|
|
||||||
inv.Register(ActorsVersionPredicate(actors.Version5), exported5.BuiltinActors()...)
|
|
||||||
|
|
||||||
return inv
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ar *ActorRegistry) Invoke(codeCid cid.Cid, rt vmr.Runtime, method abi.MethodNum, params []byte) ([]byte, aerrors.ActorError) {
|
func (ar *ActorRegistry) Invoke(codeCid cid.Cid, rt vmr.Runtime, method abi.MethodNum, params []byte) ([]byte, aerrors.ActorError) {
|
||||||
@ -96,6 +94,7 @@ func (ar *ActorRegistry) Register(pred ActorPredicate, actors ...rtt.VMActor) {
|
|||||||
pred = func(vmr.Runtime, rtt.VMActor) error { return nil }
|
pred = func(vmr.Runtime, rtt.VMActor) error { return nil }
|
||||||
}
|
}
|
||||||
for _, a := range actors {
|
for _, a := range actors {
|
||||||
|
// register in the `actors` map (for the invoker)
|
||||||
code, err := ar.transform(a)
|
code, err := ar.transform(a)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(xerrors.Errorf("%s: %w", string(a.Code().Hash()), err))
|
panic(xerrors.Errorf("%s: %w", string(a.Code().Hash()), err))
|
||||||
@ -105,6 +104,51 @@ func (ar *ActorRegistry) Register(pred ActorPredicate, actors ...rtt.VMActor) {
|
|||||||
vmActor: a,
|
vmActor: a,
|
||||||
predicate: pred,
|
predicate: pred,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// register in the `Methods` map (used by statemanager utils)
|
||||||
|
exports := a.Exports()
|
||||||
|
methods := make(map[abi.MethodNum]MethodMeta, len(exports))
|
||||||
|
|
||||||
|
// Explicitly add send, it's special.
|
||||||
|
methods[builtin.MethodSend] = MethodMeta{
|
||||||
|
Name: "Send",
|
||||||
|
Params: reflect.TypeOf(new(abi.EmptyValue)),
|
||||||
|
Ret: reflect.TypeOf(new(abi.EmptyValue)),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterate over exported methods. Some of these _may_ be nil and
|
||||||
|
// must be skipped.
|
||||||
|
for number, export := range exports {
|
||||||
|
if export == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
ev := reflect.ValueOf(export)
|
||||||
|
et := ev.Type()
|
||||||
|
|
||||||
|
// Extract the method names using reflection. These
|
||||||
|
// method names always match the field names in the
|
||||||
|
// `builtin.Method*` structs (tested in the specs-actors
|
||||||
|
// tests).
|
||||||
|
fnName := runtime.FuncForPC(ev.Pointer()).Name()
|
||||||
|
fnName = strings.TrimSuffix(fnName[strings.LastIndexByte(fnName, '.')+1:], "-fm")
|
||||||
|
|
||||||
|
switch abi.MethodNum(number) {
|
||||||
|
case builtin.MethodSend:
|
||||||
|
panic("method 0 is reserved for Send")
|
||||||
|
case builtin.MethodConstructor:
|
||||||
|
if fnName != "Constructor" {
|
||||||
|
panic("method 1 is reserved for Constructor")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
methods[abi.MethodNum(number)] = MethodMeta{
|
||||||
|
Name: fnName,
|
||||||
|
Params: et.In(1),
|
||||||
|
Ret: et.Out(0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ar.Methods[a.Code()] = methods
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -226,13 +270,11 @@ func DecodeParams(b []byte, out interface{}) error {
|
|||||||
return um.UnmarshalCBOR(bytes.NewReader(b))
|
return um.UnmarshalCBOR(bytes.NewReader(b))
|
||||||
}
|
}
|
||||||
|
|
||||||
func DumpActorState(act *types.Actor, b []byte) (interface{}, error) {
|
func DumpActorState(i *ActorRegistry, act *types.Actor, b []byte) (interface{}, error) {
|
||||||
if builtin.IsAccountActor(act.Code) { // Account code special case
|
if builtin.IsAccountActor(act.Code) { // Account code special case
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
i := NewActorRegistry()
|
|
||||||
|
|
||||||
actInfo, ok := i.actors[act.Code]
|
actInfo, ok := i.actors[act.Code]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, xerrors.Errorf("state type for actor %s not found", act.Code)
|
return nil, xerrors.Errorf("state type for actor %s not found", act.Code)
|
||||||
|
@ -223,6 +223,7 @@ type VMOpts struct {
|
|||||||
Epoch abi.ChainEpoch
|
Epoch abi.ChainEpoch
|
||||||
Rand Rand
|
Rand Rand
|
||||||
Bstore blockstore.Blockstore
|
Bstore blockstore.Blockstore
|
||||||
|
Actors *ActorRegistry
|
||||||
Syscalls SyscallBuilder
|
Syscalls SyscallBuilder
|
||||||
CircSupplyCalc CircSupplyCalculator
|
CircSupplyCalc CircSupplyCalculator
|
||||||
NtwkVersion NtwkVersionGetter // TODO: stebalien: In what cases do we actually need this? It seems like even when creating new networks we want to use the 'global'/build-default version getter
|
NtwkVersion NtwkVersionGetter // TODO: stebalien: In what cases do we actually need this? It seems like even when creating new networks we want to use the 'global'/build-default version getter
|
||||||
@ -244,7 +245,7 @@ func NewVM(ctx context.Context, opts *VMOpts) (*VM, error) {
|
|||||||
cst: cst,
|
cst: cst,
|
||||||
buf: buf,
|
buf: buf,
|
||||||
blockHeight: opts.Epoch,
|
blockHeight: opts.Epoch,
|
||||||
areg: NewActorRegistry(),
|
areg: opts.Actors,
|
||||||
rand: opts.Rand, // TODO: Probably should be a syscall
|
rand: opts.Rand, // TODO: Probably should be a syscall
|
||||||
circSupplyCalc: opts.CircSupplyCalc,
|
circSupplyCalc: opts.CircSupplyCalc,
|
||||||
ntwkVersion: opts.NtwkVersion,
|
ntwkVersion: opts.NtwkVersion,
|
||||||
|
@ -35,7 +35,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/api/v0api"
|
"github.com/filecoin-project/lotus/api/v0api"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/actors"
|
"github.com/filecoin-project/lotus/chain/actors"
|
||||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||||
types "github.com/filecoin-project/lotus/chain/types"
|
types "github.com/filecoin-project/lotus/chain/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -476,7 +476,7 @@ var ChainInspectUsage = &cli.Command{
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
mm := stmgr.MethodsMap[code][m.Message.Method]
|
mm := filcns.NewActorRegistry().Methods[code][m.Message.Method] // TODO: use remote map
|
||||||
|
|
||||||
byMethod[mm.Name] += m.Message.GasLimit
|
byMethod[mm.Name] += m.Message.GasLimit
|
||||||
byMethodC[mm.Name]++
|
byMethodC[mm.Name]++
|
||||||
|
51
cli/log.go
51
cli/log.go
@ -2,7 +2,9 @@ package cli
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/fatih/color"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
)
|
)
|
||||||
@ -13,6 +15,7 @@ var LogCmd = &cli.Command{
|
|||||||
Subcommands: []*cli.Command{
|
Subcommands: []*cli.Command{
|
||||||
LogList,
|
LogList,
|
||||||
LogSetLevel,
|
LogSetLevel,
|
||||||
|
LogAlerts,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -100,3 +103,51 @@ var LogSetLevel = &cli.Command{
|
|||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var LogAlerts = &cli.Command{
|
||||||
|
Name: "alerts",
|
||||||
|
Usage: "Get alert states",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "all",
|
||||||
|
Usage: "get all (active and inactive) alerts",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
api, closer, err := GetAPI(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer closer()
|
||||||
|
|
||||||
|
ctx := ReqContext(cctx)
|
||||||
|
|
||||||
|
alerts, err := api.LogAlerts(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting alerts: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
all := cctx.Bool("all")
|
||||||
|
|
||||||
|
for _, alert := range alerts {
|
||||||
|
if !all && !alert.Active {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
active := color.RedString("active ")
|
||||||
|
if !alert.Active {
|
||||||
|
active = color.GreenString("inactive")
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("%s %s:%s\n", active, alert.Type.System, alert.Type.Subsystem)
|
||||||
|
if alert.LastResolved != nil {
|
||||||
|
fmt.Printf(" last resolved at %s; reason: %s\n", alert.LastResolved.Time.Truncate(time.Millisecond), alert.LastResolved.Message)
|
||||||
|
}
|
||||||
|
if alert.LastActive != nil {
|
||||||
|
fmt.Printf(" %s %s; reason: %s\n", color.YellowString("last raised at"), alert.LastActive.Time.Truncate(time.Millisecond), alert.LastActive.Message)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
@ -10,10 +10,6 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"text/tabwriter"
|
"text/tabwriter"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors"
|
|
||||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
|
||||||
cbg "github.com/whyrusleeping/cbor-gen"
|
cbg "github.com/whyrusleeping/cbor-gen"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
@ -31,8 +27,11 @@ import (
|
|||||||
|
|
||||||
"github.com/filecoin-project/lotus/blockstore"
|
"github.com/filecoin-project/lotus/blockstore"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/multisig"
|
"github.com/filecoin-project/lotus/chain/actors/builtin/multisig"
|
||||||
|
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -325,7 +324,7 @@ var msigInspectCmd = &cli.Command{
|
|||||||
fmt.Fprintf(w, "%d\t%s\t%d\t%s\t%s\t%s(%d)\t%s\n", txid, "pending", len(tx.Approved), target, types.FIL(tx.Value), "new account, unknown method", tx.Method, paramStr)
|
fmt.Fprintf(w, "%d\t%s\t%d\t%s\t%s\t%s(%d)\t%s\n", txid, "pending", len(tx.Approved), target, types.FIL(tx.Value), "new account, unknown method", tx.Method, paramStr)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
method := stmgr.MethodsMap[targAct.Code][tx.Method]
|
method := filcns.NewActorRegistry().Methods[targAct.Code][tx.Method] // TODO: use remote map
|
||||||
|
|
||||||
if decParams && tx.Method != 0 {
|
if decParams && tx.Method != 0 {
|
||||||
ptyp := reflect.New(method.Params.Elem()).Interface().(cbg.CBORUnmarshaler)
|
ptyp := reflect.New(method.Params.Elem()).Interface().(cbg.CBORUnmarshaler)
|
||||||
|
@ -12,7 +12,7 @@ import (
|
|||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-state-types/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||||
types "github.com/filecoin-project/lotus/chain/types"
|
types "github.com/filecoin-project/lotus/chain/types"
|
||||||
cid "github.com/ipfs/go-cid"
|
cid "github.com/ipfs/go-cid"
|
||||||
cbg "github.com/whyrusleeping/cbor-gen"
|
cbg "github.com/whyrusleeping/cbor-gen"
|
||||||
@ -86,7 +86,7 @@ func (s *ServicesImpl) DecodeTypedParamsFromJSON(ctx context.Context, to address
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
methodMeta, found := stmgr.MethodsMap[act.Code][method]
|
methodMeta, found := filcns.NewActorRegistry().Methods[act.Code][method] // TODO: use remote map
|
||||||
if !found {
|
if !found {
|
||||||
return nil, fmt.Errorf("method %d not found on actor %s", method, act.Code)
|
return nil, fmt.Errorf("method %d not found on actor %s", method, act.Code)
|
||||||
}
|
}
|
||||||
|
@ -18,6 +18,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api/v0api"
|
"github.com/filecoin-project/lotus/api/v0api"
|
||||||
|
|
||||||
@ -1366,7 +1367,7 @@ func codeStr(c cid.Cid) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func getMethod(code cid.Cid, method abi.MethodNum) string {
|
func getMethod(code cid.Cid, method abi.MethodNum) string {
|
||||||
return stmgr.MethodsMap[code][method].Name
|
return filcns.NewActorRegistry().Methods[code][method].Name // todo: use remote
|
||||||
}
|
}
|
||||||
|
|
||||||
func toFil(f types.BigInt) types.FIL {
|
func toFil(f types.BigInt) types.FIL {
|
||||||
@ -1397,7 +1398,7 @@ func sumGas(changes []*types.GasTrace) types.GasTrace {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func JsonParams(code cid.Cid, method abi.MethodNum, params []byte) (string, error) {
|
func JsonParams(code cid.Cid, method abi.MethodNum, params []byte) (string, error) {
|
||||||
p, err := stmgr.GetParamType(code, method)
|
p, err := stmgr.GetParamType(filcns.NewActorRegistry(), code, method) // todo use api for correct actor registry
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -1411,7 +1412,7 @@ func JsonParams(code cid.Cid, method abi.MethodNum, params []byte) (string, erro
|
|||||||
}
|
}
|
||||||
|
|
||||||
func jsonReturn(code cid.Cid, method abi.MethodNum, ret []byte) (string, error) {
|
func jsonReturn(code cid.Cid, method abi.MethodNum, ret []byte) (string, error) {
|
||||||
methodMeta, found := stmgr.MethodsMap[code][method]
|
methodMeta, found := filcns.NewActorRegistry().Methods[code][method] // TODO: use remote
|
||||||
if !found {
|
if !found {
|
||||||
return "", fmt.Errorf("method %d not found on actor %s", method, code)
|
return "", fmt.Errorf("method %d not found on actor %s", method, code)
|
||||||
}
|
}
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/blockstore"
|
"github.com/filecoin-project/lotus/blockstore"
|
||||||
badgerbs "github.com/filecoin-project/lotus/blockstore/badger"
|
badgerbs "github.com/filecoin-project/lotus/blockstore/badger"
|
||||||
|
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||||
"github.com/filecoin-project/lotus/chain/store"
|
"github.com/filecoin-project/lotus/chain/store"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
@ -253,10 +254,13 @@ var importBenchCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
metadataDs := datastore.NewMapDatastore()
|
metadataDs := datastore.NewMapDatastore()
|
||||||
cs := store.NewChainStore(bs, bs, metadataDs, nil)
|
cs := store.NewChainStore(bs, bs, metadataDs, filcns.Weight, nil)
|
||||||
defer cs.Close() //nolint:errcheck
|
defer cs.Close() //nolint:errcheck
|
||||||
|
|
||||||
stm := stmgr.NewStateManager(cs, vm.Syscalls(verifier))
|
stm, err := stmgr.NewStateManager(cs, filcns.NewTipSetExecutor(), vm.Syscalls(verifier), filcns.DefaultUpgradeSchedule())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
var carFile *os.File
|
var carFile *os.File
|
||||||
// open the CAR file if one is provided.
|
// open the CAR file if one is provided.
|
||||||
|
@ -12,26 +12,30 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/fatih/color"
|
"github.com/fatih/color"
|
||||||
|
"github.com/mattn/go-isatty"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
cbor "github.com/ipfs/go-ipld-cbor"
|
cbor "github.com/ipfs/go-ipld-cbor"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-state-types/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/lotus/api/v0api"
|
"github.com/filecoin-project/lotus/api/v0api"
|
||||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/blockstore"
|
"github.com/filecoin-project/lotus/blockstore"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin/reward"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
lcli "github.com/filecoin-project/lotus/cli"
|
lcli "github.com/filecoin-project/lotus/cli"
|
||||||
|
"github.com/filecoin-project/lotus/journal/alerting"
|
||||||
)
|
)
|
||||||
|
|
||||||
var infoCmd = &cli.Command{
|
var infoCmd = &cli.Command{
|
||||||
@ -45,6 +49,10 @@ var infoCmd = &cli.Command{
|
|||||||
Name: "hide-sectors-info",
|
Name: "hide-sectors-info",
|
||||||
Usage: "hide sectors info",
|
Usage: "hide sectors info",
|
||||||
},
|
},
|
||||||
|
&cli.IntFlag{
|
||||||
|
Name: "blocks",
|
||||||
|
Usage: "Log of produced <blocks> newest blocks and rewards(Miner Fee excluded)",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Action: infoCmdAct,
|
Action: infoCmdAct,
|
||||||
}
|
}
|
||||||
@ -116,6 +124,21 @@ func infoCmdAct(cctx *cli.Context) error {
|
|||||||
|
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
|
alerts, err := minerApi.LogAlerts(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting alerts: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
activeAlerts := make([]alerting.Alert, 0)
|
||||||
|
for _, alert := range alerts {
|
||||||
|
if alert.Active {
|
||||||
|
activeAlerts = append(activeAlerts, alert)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(activeAlerts) > 0 {
|
||||||
|
fmt.Printf("%s (check %s)\n", color.RedString("⚠ %d Active alerts", len(activeAlerts)), color.YellowString("lotus-miner log alerts"))
|
||||||
|
}
|
||||||
|
|
||||||
err = handleMiningInfo(ctx, cctx, fullapi, minerApi)
|
err = handleMiningInfo(ctx, cctx, fullapi, minerApi)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -141,6 +164,7 @@ func handleMiningInfo(ctx context.Context, cctx *cli.Context, fullapi v0api.Full
|
|||||||
}
|
}
|
||||||
|
|
||||||
tbs := blockstore.NewTieredBstore(blockstore.NewAPIBlockstore(fullapi), blockstore.NewMemory())
|
tbs := blockstore.NewTieredBstore(blockstore.NewAPIBlockstore(fullapi), blockstore.NewMemory())
|
||||||
|
|
||||||
mas, err := miner.Load(adt.WrapStore(ctx, cbor.NewCborStore(tbs)), mact)
|
mas, err := miner.Load(adt.WrapStore(ctx, cbor.NewCborStore(tbs)), mact)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -148,6 +172,7 @@ func handleMiningInfo(ctx context.Context, cctx *cli.Context, fullapi v0api.Full
|
|||||||
|
|
||||||
// Sector size
|
// Sector size
|
||||||
mi, err := fullapi.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
mi, err := fullapi.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -178,6 +203,7 @@ func handleMiningInfo(ctx context.Context, cctx *cli.Context, fullapi v0api.Full
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
secCounts, err := fullapi.StateMinerSectorCount(ctx, maddr, types.EmptyTSK)
|
secCounts, err := fullapi.StateMinerSectorCount(ctx, maddr, types.EmptyTSK)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -275,6 +301,7 @@ func handleMiningInfo(ctx context.Context, cctx *cli.Context, fullapi v0api.Full
|
|||||||
colorTokenAmount(" Available: %s\n", availBalance)
|
colorTokenAmount(" Available: %s\n", availBalance)
|
||||||
|
|
||||||
mb, err := fullapi.StateMarketBalance(ctx, maddr, types.EmptyTSK)
|
mb, err := fullapi.StateMarketBalance(ctx, maddr, types.EmptyTSK)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("getting market balance: %w", err)
|
return xerrors.Errorf("getting market balance: %w", err)
|
||||||
}
|
}
|
||||||
@ -285,6 +312,7 @@ func handleMiningInfo(ctx context.Context, cctx *cli.Context, fullapi v0api.Full
|
|||||||
colorTokenAmount(" Available: %s\n", big.Sub(mb.Escrow, mb.Locked))
|
colorTokenAmount(" Available: %s\n", big.Sub(mb.Escrow, mb.Locked))
|
||||||
|
|
||||||
wb, err := fullapi.WalletBalance(ctx, mi.Worker)
|
wb, err := fullapi.WalletBalance(ctx, mi.Worker)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("getting worker balance: %w", err)
|
return xerrors.Errorf("getting worker balance: %w", err)
|
||||||
}
|
}
|
||||||
@ -315,6 +343,13 @@ func handleMiningInfo(ctx context.Context, cctx *cli.Context, fullapi v0api.Full
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if cctx.IsSet("blocks") {
|
||||||
|
fmt.Println("Produced newest blocks:")
|
||||||
|
err = producedBlocks(ctx, cctx.Int("blocks"), maddr, fullapi)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
// TODO: grab actr state / info
|
// TODO: grab actr state / info
|
||||||
// * Sealed sectors (count / bytes)
|
// * Sealed sectors (count / bytes)
|
||||||
// * Power
|
// * Power
|
||||||
@ -484,8 +519,8 @@ func init() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func sectorsInfo(ctx context.Context, napi api.StorageMiner) error {
|
func sectorsInfo(ctx context.Context, mapi api.StorageMiner) error {
|
||||||
summary, err := napi.SectorsSummary(ctx)
|
summary, err := mapi.SectorsSummary(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -523,3 +558,66 @@ func colorTokenAmount(format string, amount abi.TokenAmount) {
|
|||||||
color.Red(format, types.FIL(amount).Short())
|
color.Red(format, types.FIL(amount).Short())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func producedBlocks(ctx context.Context, count int, maddr address.Address, napi v0api.FullNode) error {
|
||||||
|
var err error
|
||||||
|
head, err := napi.ChainHead(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
tbs := blockstore.NewTieredBstore(blockstore.NewAPIBlockstore(napi), blockstore.NewMemory())
|
||||||
|
|
||||||
|
tty := isatty.IsTerminal(os.Stderr.Fd())
|
||||||
|
|
||||||
|
ts := head
|
||||||
|
fmt.Printf(" Epoch | Block ID | Reward\n")
|
||||||
|
for count > 0 {
|
||||||
|
tsk := ts.Key()
|
||||||
|
bhs := ts.Blocks()
|
||||||
|
for _, bh := range bhs {
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
if bh.Miner == maddr {
|
||||||
|
if tty {
|
||||||
|
_, _ = fmt.Fprint(os.Stderr, "\r\x1b[0K")
|
||||||
|
}
|
||||||
|
|
||||||
|
rewardActor, err := napi.StateGetActor(ctx, reward.Address, tsk)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
rewardActorState, err := reward.Load(adt.WrapStore(ctx, cbor.NewCborStore(tbs)), rewardActor)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
blockReward, err := rewardActorState.ThisEpochReward()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
minerReward := types.BigDiv(types.BigMul(types.NewInt(uint64(bh.ElectionProof.WinCount)),
|
||||||
|
blockReward), types.NewInt(uint64(builtin.ExpectedLeadersPerEpoch)))
|
||||||
|
|
||||||
|
fmt.Printf("%8d | %s | %s\n", ts.Height(), bh.Cid(), types.FIL(minerReward))
|
||||||
|
count--
|
||||||
|
} else if tty && bh.Height%120 == 0 {
|
||||||
|
_, _ = fmt.Fprintf(os.Stderr, "\r\x1b[0KChecking epoch %s", lcli.EpochTime(head.Height(), bh.Height))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tsk = ts.Parents()
|
||||||
|
ts, err = napi.ChainGetTipSet(ctx, tsk)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if tty {
|
||||||
|
_, _ = fmt.Fprint(os.Stderr, "\r\x1b[0K")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
@ -51,6 +51,7 @@ import (
|
|||||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||||
"github.com/filecoin-project/lotus/genesis"
|
"github.com/filecoin-project/lotus/genesis"
|
||||||
"github.com/filecoin-project/lotus/journal"
|
"github.com/filecoin-project/lotus/journal"
|
||||||
|
"github.com/filecoin-project/lotus/journal/fsjournal"
|
||||||
storageminer "github.com/filecoin-project/lotus/miner"
|
storageminer "github.com/filecoin-project/lotus/miner"
|
||||||
"github.com/filecoin-project/lotus/node/modules"
|
"github.com/filecoin-project/lotus/node/modules"
|
||||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
@ -479,7 +480,7 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api v1api.FullNode
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
j, err := journal.OpenFSJournal(lr, journal.EnvDisabledEvents())
|
j, err := fsjournal.OpenFSJournal(lr, journal.EnvDisabledEvents())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to open filesystem journal: %w", err)
|
return fmt.Errorf("failed to open filesystem journal: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -223,6 +223,12 @@ func restore(ctx context.Context, cctx *cli.Context, targetPath string, strConfi
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Warn("--storage-config NOT SET. NO SECTOR PATHS WILL BE CONFIGURED")
|
log.Warn("--storage-config NOT SET. NO SECTOR PATHS WILL BE CONFIGURED")
|
||||||
|
// setting empty config to allow miner to be started
|
||||||
|
if err := lr.SetStorage(func(sc *stores.StorageConfig) {
|
||||||
|
sc.StoragePaths = append(sc.StoragePaths, stores.LocalPath{})
|
||||||
|
}); err != nil {
|
||||||
|
return xerrors.Errorf("set storage config: %w", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("Restoring metadata backup")
|
log.Info("Restoring metadata backup")
|
||||||
|
@ -101,17 +101,33 @@ var sealingWorkersCmd = &cli.Command{
|
|||||||
|
|
||||||
ramBarsRes := int(stat.Info.Resources.MemReserved * barCols / stat.Info.Resources.MemPhysical)
|
ramBarsRes := int(stat.Info.Resources.MemReserved * barCols / stat.Info.Resources.MemPhysical)
|
||||||
ramBarsUsed := int(stat.MemUsedMin * barCols / stat.Info.Resources.MemPhysical)
|
ramBarsUsed := int(stat.MemUsedMin * barCols / stat.Info.Resources.MemPhysical)
|
||||||
ramBar := color.YellowString(strings.Repeat("|", ramBarsRes)) +
|
ramRepeatSpace := int(barCols) - (ramBarsUsed + ramBarsRes)
|
||||||
|
|
||||||
|
colorFunc := color.YellowString
|
||||||
|
if ramRepeatSpace < 0 {
|
||||||
|
ramRepeatSpace = 0
|
||||||
|
colorFunc = color.RedString
|
||||||
|
}
|
||||||
|
|
||||||
|
ramBar := colorFunc(strings.Repeat("|", ramBarsRes)) +
|
||||||
color.GreenString(strings.Repeat("|", ramBarsUsed)) +
|
color.GreenString(strings.Repeat("|", ramBarsUsed)) +
|
||||||
strings.Repeat(" ", int(barCols)-ramBarsUsed-ramBarsRes)
|
strings.Repeat(" ", ramRepeatSpace)
|
||||||
|
|
||||||
vmem := stat.Info.Resources.MemPhysical + stat.Info.Resources.MemSwap
|
vmem := stat.Info.Resources.MemPhysical + stat.Info.Resources.MemSwap
|
||||||
|
|
||||||
vmemBarsRes := int(stat.Info.Resources.MemReserved * barCols / vmem)
|
vmemBarsRes := int(stat.Info.Resources.MemReserved * barCols / vmem)
|
||||||
vmemBarsUsed := int(stat.MemUsedMax * barCols / vmem)
|
vmemBarsUsed := int(stat.MemUsedMax * barCols / vmem)
|
||||||
vmemBar := color.YellowString(strings.Repeat("|", vmemBarsRes)) +
|
vmemRepeatSpace := int(barCols) - (vmemBarsUsed + vmemBarsRes)
|
||||||
|
|
||||||
|
colorFunc = color.YellowString
|
||||||
|
if vmemRepeatSpace < 0 {
|
||||||
|
vmemRepeatSpace = 0
|
||||||
|
colorFunc = color.RedString
|
||||||
|
}
|
||||||
|
|
||||||
|
vmemBar := colorFunc(strings.Repeat("|", vmemBarsRes)) +
|
||||||
color.GreenString(strings.Repeat("|", vmemBarsUsed)) +
|
color.GreenString(strings.Repeat("|", vmemBarsUsed)) +
|
||||||
strings.Repeat(" ", int(barCols)-vmemBarsUsed-vmemBarsRes)
|
strings.Repeat(" ", vmemRepeatSpace)
|
||||||
|
|
||||||
fmt.Printf("\tRAM: [%s] %d%% %s/%s\n", ramBar,
|
fmt.Printf("\tRAM: [%s] %d%% %s/%s\n", ramBar,
|
||||||
(stat.Info.Resources.MemReserved+stat.MemUsedMin)*100/stat.Info.Resources.MemPhysical,
|
(stat.Info.Resources.MemReserved+stat.MemUsedMin)*100/stat.Info.Resources.MemPhysical,
|
||||||
|
@ -3,6 +3,7 @@ package main
|
|||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"sort"
|
"sort"
|
||||||
@ -44,6 +45,7 @@ var sectorsCmd = &cli.Command{
|
|||||||
sectorsUpdateCmd,
|
sectorsUpdateCmd,
|
||||||
sectorsPledgeCmd,
|
sectorsPledgeCmd,
|
||||||
sectorsCheckExpireCmd,
|
sectorsCheckExpireCmd,
|
||||||
|
sectorsExpiredCmd,
|
||||||
sectorsRenewCmd,
|
sectorsRenewCmd,
|
||||||
sectorsExtendCmd,
|
sectorsExtendCmd,
|
||||||
sectorsTerminateCmd,
|
sectorsTerminateCmd,
|
||||||
@ -84,12 +86,23 @@ var sectorsStatusCmd = &cli.Command{
|
|||||||
ArgsUsage: "<sectorNum>",
|
ArgsUsage: "<sectorNum>",
|
||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
&cli.BoolFlag{
|
&cli.BoolFlag{
|
||||||
Name: "log",
|
Name: "log",
|
||||||
Usage: "display event log",
|
Usage: "display event log",
|
||||||
|
Aliases: []string{"l"},
|
||||||
},
|
},
|
||||||
&cli.BoolFlag{
|
&cli.BoolFlag{
|
||||||
Name: "on-chain-info",
|
Name: "on-chain-info",
|
||||||
Usage: "show sector on chain info",
|
Usage: "show sector on chain info",
|
||||||
|
Aliases: []string{"c"},
|
||||||
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "partition-info",
|
||||||
|
Usage: "show partition related info",
|
||||||
|
Aliases: []string{"p"},
|
||||||
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "proof",
|
||||||
|
Usage: "print snark proof bytes as hex",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
@ -125,7 +138,9 @@ var sectorsStatusCmd = &cli.Command{
|
|||||||
fmt.Printf("SeedH:\t\t%d\n", status.Seed.Epoch)
|
fmt.Printf("SeedH:\t\t%d\n", status.Seed.Epoch)
|
||||||
fmt.Printf("Precommit:\t%s\n", status.PreCommitMsg)
|
fmt.Printf("Precommit:\t%s\n", status.PreCommitMsg)
|
||||||
fmt.Printf("Commit:\t\t%s\n", status.CommitMsg)
|
fmt.Printf("Commit:\t\t%s\n", status.CommitMsg)
|
||||||
fmt.Printf("Proof:\t\t%x\n", status.Proof)
|
if cctx.Bool("proof") {
|
||||||
|
fmt.Printf("Proof:\t\t%x\n", status.Proof)
|
||||||
|
}
|
||||||
fmt.Printf("Deals:\t\t%v\n", status.Deals)
|
fmt.Printf("Deals:\t\t%v\n", status.Deals)
|
||||||
fmt.Printf("Retries:\t%d\n", status.Retries)
|
fmt.Printf("Retries:\t%d\n", status.Retries)
|
||||||
if status.LastErr != "" {
|
if status.LastErr != "" {
|
||||||
@ -145,6 +160,93 @@ var sectorsStatusCmd = &cli.Command{
|
|||||||
fmt.Printf("Early:\t\t%v\n", status.Early)
|
fmt.Printf("Early:\t\t%v\n", status.Early)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if cctx.Bool("partition-info") {
|
||||||
|
fullApi, nCloser, err := lcli.GetFullNodeAPI(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer nCloser()
|
||||||
|
|
||||||
|
maddr, err := getActorAddress(ctx, cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
mact, err := fullApi.StateGetActor(ctx, maddr, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
tbs := blockstore.NewTieredBstore(blockstore.NewAPIBlockstore(fullApi), blockstore.NewMemory())
|
||||||
|
mas, err := miner.Load(adt.WrapStore(ctx, cbor.NewCborStore(tbs)), mact)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
errFound := errors.New("found")
|
||||||
|
if err := mas.ForEachDeadline(func(dlIdx uint64, dl miner.Deadline) error {
|
||||||
|
return dl.ForEachPartition(func(partIdx uint64, part miner.Partition) error {
|
||||||
|
pas, err := part.AllSectors()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
set, err := pas.IsSet(id)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if set {
|
||||||
|
fmt.Printf("\nDeadline:\t%d\n", dlIdx)
|
||||||
|
fmt.Printf("Partition:\t%d\n", partIdx)
|
||||||
|
|
||||||
|
checkIn := func(name string, bg func() (bitfield.BitField, error)) error {
|
||||||
|
bf, err := bg()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
set, err := bf.IsSet(id)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
setstr := "no"
|
||||||
|
if set {
|
||||||
|
setstr = "yes"
|
||||||
|
}
|
||||||
|
fmt.Printf("%s: \t%s\n", name, setstr)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := checkIn("Unproven", part.UnprovenSectors); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := checkIn("Live", part.LiveSectors); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := checkIn("Active", part.ActiveSectors); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := checkIn("Faulty", part.FaultySectors); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := checkIn("Recovering", part.RecoveringSectors); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return errFound
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}); err != errFound {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("\nNot found in any partition")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if cctx.Bool("log") {
|
if cctx.Bool("log") {
|
||||||
fmt.Printf("--------\nEvent Log:\n")
|
fmt.Printf("--------\nEvent Log:\n")
|
||||||
|
|
||||||
@ -287,112 +389,132 @@ var sectorsListCmd = &cli.Command{
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if showRemoved || st.State != api.SectorState(sealing.Removed) {
|
if !showRemoved && st.State == api.SectorState(sealing.Removed) {
|
||||||
_, inSSet := commitedIDs[s]
|
continue
|
||||||
_, inASet := activeIDs[s]
|
|
||||||
|
|
||||||
dw, vp := .0, .0
|
|
||||||
if st.Expiration-st.Activation > 0 {
|
|
||||||
rdw := big.Add(st.DealWeight, st.VerifiedDealWeight)
|
|
||||||
dw = float64(big.Div(rdw, big.NewInt(int64(st.Expiration-st.Activation))).Uint64())
|
|
||||||
vp = float64(big.Div(big.Mul(st.VerifiedDealWeight, big.NewInt(9)), big.NewInt(int64(st.Expiration-st.Activation))).Uint64())
|
|
||||||
}
|
|
||||||
|
|
||||||
var deals int
|
|
||||||
for _, deal := range st.Deals {
|
|
||||||
if deal != 0 {
|
|
||||||
deals++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
exp := st.Expiration
|
|
||||||
if st.OnTime > 0 && st.OnTime < exp {
|
|
||||||
exp = st.OnTime // Can be different when the sector was CC upgraded
|
|
||||||
}
|
|
||||||
|
|
||||||
m := map[string]interface{}{
|
|
||||||
"ID": s,
|
|
||||||
"State": color.New(stateOrder[sealing.SectorState(st.State)].col).Sprint(st.State),
|
|
||||||
"OnChain": yesno(inSSet),
|
|
||||||
"Active": yesno(inASet),
|
|
||||||
}
|
|
||||||
|
|
||||||
if deals > 0 {
|
|
||||||
m["Deals"] = color.GreenString("%d", deals)
|
|
||||||
} else {
|
|
||||||
m["Deals"] = color.BlueString("CC")
|
|
||||||
if st.ToUpgrade {
|
|
||||||
m["Deals"] = color.CyanString("CC(upgrade)")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !fast {
|
|
||||||
if !inSSet {
|
|
||||||
m["Expiration"] = "n/a"
|
|
||||||
} else {
|
|
||||||
m["Expiration"] = lcli.EpochTime(head.Height(), exp)
|
|
||||||
|
|
||||||
if !fast && deals > 0 {
|
|
||||||
m["DealWeight"] = units.BytesSize(dw)
|
|
||||||
if vp > 0 {
|
|
||||||
m["VerifiedPower"] = color.GreenString(units.BytesSize(vp))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if st.Early > 0 {
|
|
||||||
m["RecoveryTimeout"] = color.YellowString(lcli.EpochTime(head.Height(), st.Early))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if cctx.Bool("events") {
|
|
||||||
var events int
|
|
||||||
for _, sectorLog := range st.Log {
|
|
||||||
if !strings.HasPrefix(sectorLog.Kind, "event") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if sectorLog.Kind == "event;sealing.SectorRestart" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
events++
|
|
||||||
}
|
|
||||||
|
|
||||||
pieces := len(st.Deals)
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case events < 12+pieces:
|
|
||||||
m["Events"] = color.GreenString("%d", events)
|
|
||||||
case events < 20+pieces:
|
|
||||||
m["Events"] = color.YellowString("%d", events)
|
|
||||||
default:
|
|
||||||
m["Events"] = color.RedString("%d", events)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if cctx.Bool("seal-time") && len(st.Log) > 1 {
|
|
||||||
start := time.Unix(int64(st.Log[0].Timestamp), 0)
|
|
||||||
|
|
||||||
for _, sectorLog := range st.Log {
|
|
||||||
if sectorLog.Kind == "event;sealing.SectorProving" {
|
|
||||||
end := time.Unix(int64(sectorLog.Timestamp), 0)
|
|
||||||
dur := end.Sub(start)
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case dur < 12*time.Hour:
|
|
||||||
m["SealTime"] = color.GreenString("%s", dur)
|
|
||||||
case dur < 24*time.Hour:
|
|
||||||
m["SealTime"] = color.YellowString("%s", dur)
|
|
||||||
default:
|
|
||||||
m["SealTime"] = color.RedString("%s", dur)
|
|
||||||
}
|
|
||||||
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
tw.Write(m)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_, inSSet := commitedIDs[s]
|
||||||
|
_, inASet := activeIDs[s]
|
||||||
|
|
||||||
|
const verifiedPowerGainMul = 9
|
||||||
|
|
||||||
|
dw, vp := .0, .0
|
||||||
|
estimate := st.Expiration-st.Activation <= 0
|
||||||
|
if !estimate {
|
||||||
|
rdw := big.Add(st.DealWeight, st.VerifiedDealWeight)
|
||||||
|
dw = float64(big.Div(rdw, big.NewInt(int64(st.Expiration-st.Activation))).Uint64())
|
||||||
|
vp = float64(big.Div(big.Mul(st.VerifiedDealWeight, big.NewInt(verifiedPowerGainMul)), big.NewInt(int64(st.Expiration-st.Activation))).Uint64())
|
||||||
|
} else {
|
||||||
|
for _, piece := range st.Pieces {
|
||||||
|
if piece.DealInfo != nil {
|
||||||
|
dw += float64(piece.Piece.Size)
|
||||||
|
if piece.DealInfo.DealProposal != nil && piece.DealInfo.DealProposal.VerifiedDeal {
|
||||||
|
vp += float64(piece.Piece.Size) * verifiedPowerGainMul
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var deals int
|
||||||
|
for _, deal := range st.Deals {
|
||||||
|
if deal != 0 {
|
||||||
|
deals++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
exp := st.Expiration
|
||||||
|
if st.OnTime > 0 && st.OnTime < exp {
|
||||||
|
exp = st.OnTime // Can be different when the sector was CC upgraded
|
||||||
|
}
|
||||||
|
|
||||||
|
m := map[string]interface{}{
|
||||||
|
"ID": s,
|
||||||
|
"State": color.New(stateOrder[sealing.SectorState(st.State)].col).Sprint(st.State),
|
||||||
|
"OnChain": yesno(inSSet),
|
||||||
|
"Active": yesno(inASet),
|
||||||
|
}
|
||||||
|
|
||||||
|
if deals > 0 {
|
||||||
|
m["Deals"] = color.GreenString("%d", deals)
|
||||||
|
} else {
|
||||||
|
m["Deals"] = color.BlueString("CC")
|
||||||
|
if st.ToUpgrade {
|
||||||
|
m["Deals"] = color.CyanString("CC(upgrade)")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !fast {
|
||||||
|
if !inSSet {
|
||||||
|
m["Expiration"] = "n/a"
|
||||||
|
} else {
|
||||||
|
m["Expiration"] = lcli.EpochTime(head.Height(), exp)
|
||||||
|
if st.Early > 0 {
|
||||||
|
m["RecoveryTimeout"] = color.YellowString(lcli.EpochTime(head.Height(), st.Early))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !fast && deals > 0 {
|
||||||
|
estWrap := func(s string) string {
|
||||||
|
if !estimate {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("[%s]", s)
|
||||||
|
}
|
||||||
|
|
||||||
|
m["DealWeight"] = estWrap(units.BytesSize(dw))
|
||||||
|
if vp > 0 {
|
||||||
|
m["VerifiedPower"] = estWrap(color.GreenString(units.BytesSize(vp)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if cctx.Bool("events") {
|
||||||
|
var events int
|
||||||
|
for _, sectorLog := range st.Log {
|
||||||
|
if !strings.HasPrefix(sectorLog.Kind, "event") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if sectorLog.Kind == "event;sealing.SectorRestart" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
events++
|
||||||
|
}
|
||||||
|
|
||||||
|
pieces := len(st.Deals)
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case events < 12+pieces:
|
||||||
|
m["Events"] = color.GreenString("%d", events)
|
||||||
|
case events < 20+pieces:
|
||||||
|
m["Events"] = color.YellowString("%d", events)
|
||||||
|
default:
|
||||||
|
m["Events"] = color.RedString("%d", events)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if cctx.Bool("seal-time") && len(st.Log) > 1 {
|
||||||
|
start := time.Unix(int64(st.Log[0].Timestamp), 0)
|
||||||
|
|
||||||
|
for _, sectorLog := range st.Log {
|
||||||
|
if sectorLog.Kind == "event;sealing.SectorProving" { // todo: figure out a good way to not hardcode
|
||||||
|
end := time.Unix(int64(sectorLog.Timestamp), 0)
|
||||||
|
dur := end.Sub(start)
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case dur < 12*time.Hour:
|
||||||
|
m["SealTime"] = color.GreenString("%s", dur)
|
||||||
|
case dur < 24*time.Hour:
|
||||||
|
m["SealTime"] = color.YellowString("%s", dur)
|
||||||
|
default:
|
||||||
|
m["SealTime"] = color.RedString("%s", dur)
|
||||||
|
}
|
||||||
|
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tw.Write(m)
|
||||||
}
|
}
|
||||||
|
|
||||||
return tw.Flush(os.Stdout)
|
return tw.Flush(os.Stdout)
|
||||||
@ -1515,6 +1637,211 @@ var sectorsUpdateCmd = &cli.Command{
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var sectorsExpiredCmd = &cli.Command{
|
||||||
|
Name: "expired",
|
||||||
|
Usage: "Get or cleanup expired sectors",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "show-removed",
|
||||||
|
Usage: "show removed sectors",
|
||||||
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "remove-expired",
|
||||||
|
Usage: "remove expired sectors",
|
||||||
|
},
|
||||||
|
|
||||||
|
&cli.Int64Flag{
|
||||||
|
Name: "confirm-remove-count",
|
||||||
|
Hidden: true,
|
||||||
|
},
|
||||||
|
&cli.Int64Flag{
|
||||||
|
Name: "expired-epoch",
|
||||||
|
Usage: "epoch at which to check sector expirations",
|
||||||
|
DefaultText: "WinningPoSt lookback epoch",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer closer()
|
||||||
|
|
||||||
|
fullApi, nCloser, err := lcli.GetFullNodeAPI(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting fullnode api: %w", err)
|
||||||
|
}
|
||||||
|
defer nCloser()
|
||||||
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
|
head, err := fullApi.ChainHead(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting chain head: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
lbEpoch := abi.ChainEpoch(cctx.Int64("expired-epoch"))
|
||||||
|
if !cctx.IsSet("expired-epoch") {
|
||||||
|
nv, err := fullApi.StateNetworkVersion(ctx, head.Key())
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting network version: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
lbEpoch = head.Height() - policy.GetWinningPoStSectorSetLookback(nv)
|
||||||
|
if lbEpoch < 0 {
|
||||||
|
return xerrors.Errorf("too early to terminate sectors")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if cctx.IsSet("confirm-remove-count") && !cctx.IsSet("expired-epoch") {
|
||||||
|
return xerrors.Errorf("--expired-epoch must be specified with --confirm-remove-count")
|
||||||
|
}
|
||||||
|
|
||||||
|
lbts, err := fullApi.ChainGetTipSetByHeight(ctx, lbEpoch, head.Key())
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting lookback tipset: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
maddr, err := nodeApi.ActorAddress(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting actor address: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// toCheck is a working bitfield which will only contain terminated sectors
|
||||||
|
toCheck := bitfield.New()
|
||||||
|
{
|
||||||
|
sectors, err := nodeApi.SectorsList(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting sector list: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, sector := range sectors {
|
||||||
|
toCheck.Set(uint64(sector))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mact, err := fullApi.StateGetActor(ctx, maddr, lbts.Key())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
tbs := blockstore.NewTieredBstore(blockstore.NewAPIBlockstore(fullApi), blockstore.NewMemory())
|
||||||
|
mas, err := miner.Load(adt.WrapStore(ctx, cbor.NewCborStore(tbs)), mact)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
alloc, err := mas.GetAllocatedSectors()
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting allocated sectors: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// only allocated sectors can be expired,
|
||||||
|
toCheck, err = bitfield.IntersectBitField(toCheck, *alloc)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("intersecting bitfields: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := mas.ForEachDeadline(func(dlIdx uint64, dl miner.Deadline) error {
|
||||||
|
return dl.ForEachPartition(func(partIdx uint64, part miner.Partition) error {
|
||||||
|
live, err := part.LiveSectors()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
toCheck, err = bitfield.SubtractBitField(toCheck, live)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
unproven, err := part.UnprovenSectors()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
toCheck, err = bitfield.SubtractBitField(toCheck, unproven)
|
||||||
|
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = mas.ForEachPrecommittedSector(func(pci miner.SectorPreCommitOnChainInfo) error {
|
||||||
|
toCheck.Unset(uint64(pci.Info.SectorNumber))
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if cctx.Bool("remove-expired") {
|
||||||
|
color.Red("Removing sectors:\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// toCheck now only contains sectors which either failed to precommit or are expired/terminated
|
||||||
|
fmt.Printf("Sector\tState\tExpiration\n")
|
||||||
|
|
||||||
|
var toRemove []abi.SectorNumber
|
||||||
|
|
||||||
|
err = toCheck.ForEach(func(u uint64) error {
|
||||||
|
s := abi.SectorNumber(u)
|
||||||
|
|
||||||
|
st, err := nodeApi.SectorsStatus(ctx, s, true)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("%d:\tError getting status: %s\n", u, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
rmMsg := ""
|
||||||
|
|
||||||
|
if st.State == api.SectorState(sealing.Removed) {
|
||||||
|
if cctx.IsSet("confirm-remove-count") || !cctx.Bool("show-removed") {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
} else { // not removed
|
||||||
|
toRemove = append(toRemove, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("%d%s\t%s\t%s\n", s, rmMsg, st.State, lcli.EpochTime(head.Height(), st.Expiration))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if cctx.Bool("remove-expired") {
|
||||||
|
if !cctx.IsSet("confirm-remove-count") {
|
||||||
|
fmt.Println()
|
||||||
|
fmt.Println(color.YellowString("All"), color.GreenString("%d", len(toRemove)), color.YellowString("sectors listed above will be removed\n"))
|
||||||
|
fmt.Println(color.YellowString("To confirm removal of the above sectors, including\n all related sealed and unsealed data, run:\n"))
|
||||||
|
fmt.Println(color.RedString("lotus-miner sectors expired --remove-expired --confirm-remove-count=%d --expired-epoch=%d\n", len(toRemove), lbts.Height()))
|
||||||
|
fmt.Println(color.YellowString("WARNING: This operation is irreversible"))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println()
|
||||||
|
|
||||||
|
if int64(len(toRemove)) != cctx.Int64("confirm-remove-count") {
|
||||||
|
return xerrors.Errorf("value of confirm-remove-count doesn't match the number of sectors which can be removed (%d)", len(toRemove))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, number := range toRemove {
|
||||||
|
fmt.Printf("Removing sector\t%s:\t", color.YellowString("%d", number))
|
||||||
|
|
||||||
|
err := nodeApi.SectorRemove(ctx, number)
|
||||||
|
if err != nil {
|
||||||
|
color.Red("ERROR: %s\n", err.Error())
|
||||||
|
} else {
|
||||||
|
color.Green("OK\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
var sectorsBatching = &cli.Command{
|
var sectorsBatching = &cli.Command{
|
||||||
Name: "batching",
|
Name: "batching",
|
||||||
Usage: "manage batch sector operations",
|
Usage: "manage batch sector operations",
|
||||||
|
@ -14,6 +14,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/gen/genesis"
|
"github.com/filecoin-project/lotus/chain/gen/genesis"
|
||||||
|
|
||||||
@ -510,13 +511,16 @@ var chainBalanceStateCmd = &cli.Command{
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
cs := store.NewChainStore(bs, bs, mds, nil)
|
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
|
||||||
defer cs.Close() //nolint:errcheck
|
defer cs.Close() //nolint:errcheck
|
||||||
|
|
||||||
cst := cbor.NewCborStore(bs)
|
cst := cbor.NewCborStore(bs)
|
||||||
store := adt.WrapStore(ctx, cst)
|
store := adt.WrapStore(ctx, cst)
|
||||||
|
|
||||||
sm := stmgr.NewStateManager(cs, vm.Syscalls(ffiwrapper.ProofVerifier))
|
sm, err := stmgr.NewStateManager(cs, filcns.NewTipSetExecutor(), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
tree, err := state.LoadStateTree(cst, sroot)
|
tree, err := state.LoadStateTree(cst, sroot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -731,14 +735,16 @@ var chainPledgeCmd = &cli.Command{
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
cs := store.NewChainStore(bs, bs, mds, nil)
|
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
|
||||||
defer cs.Close() //nolint:errcheck
|
defer cs.Close() //nolint:errcheck
|
||||||
|
|
||||||
cst := cbor.NewCborStore(bs)
|
cst := cbor.NewCborStore(bs)
|
||||||
store := adt.WrapStore(ctx, cst)
|
store := adt.WrapStore(ctx, cst)
|
||||||
|
|
||||||
sm := stmgr.NewStateManager(cs, vm.Syscalls(ffiwrapper.ProofVerifier))
|
sm, err := stmgr.NewStateManager(cs, filcns.NewTipSetExecutor(), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
state, err := state.LoadStateTree(cst, sroot)
|
state, err := state.LoadStateTree(cst, sroot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -90,7 +90,7 @@ var exportChainCmd = &cli.Command{
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
cs := store.NewChainStore(bs, bs, mds, nil)
|
cs := store.NewChainStore(bs, bs, mds, nil, nil)
|
||||||
defer cs.Close() //nolint:errcheck
|
defer cs.Close() //nolint:errcheck
|
||||||
|
|
||||||
if err := cs.Load(); err != nil {
|
if err := cs.Load(); err != nil {
|
||||||
|
@ -9,6 +9,7 @@ import (
|
|||||||
_init "github.com/filecoin-project/lotus/chain/actors/builtin/init"
|
_init "github.com/filecoin-project/lotus/chain/actors/builtin/init"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||||
|
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||||
|
|
||||||
"github.com/fatih/color"
|
"github.com/fatih/color"
|
||||||
"github.com/ipfs/go-datastore"
|
"github.com/ipfs/go-datastore"
|
||||||
@ -54,7 +55,7 @@ var genesisVerifyCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
bs := blockstore.FromDatastore(datastore.NewMapDatastore())
|
bs := blockstore.FromDatastore(datastore.NewMapDatastore())
|
||||||
|
|
||||||
cs := store.NewChainStore(bs, bs, datastore.NewMapDatastore(), nil)
|
cs := store.NewChainStore(bs, bs, datastore.NewMapDatastore(), filcns.Weight, nil)
|
||||||
defer cs.Close() //nolint:errcheck
|
defer cs.Close() //nolint:errcheck
|
||||||
|
|
||||||
cf := cctx.Args().Get(0)
|
cf := cctx.Args().Get(0)
|
||||||
|
@ -7,6 +7,7 @@ import (
|
|||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
big2 "github.com/filecoin-project/go-state-types/big"
|
big2 "github.com/filecoin-project/go-state-types/big"
|
||||||
|
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
@ -74,7 +75,7 @@ var minerTypesCmd = &cli.Command{
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
cs := store.NewChainStore(bs, bs, mds, nil)
|
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
|
||||||
defer cs.Close() //nolint:errcheck
|
defer cs.Close() //nolint:errcheck
|
||||||
|
|
||||||
cst := cbor.NewCborStore(bs)
|
cst := cbor.NewCborStore(bs)
|
||||||
|
@ -8,7 +8,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/fatih/color"
|
"github.com/fatih/color"
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
@ -16,7 +15,7 @@ import (
|
|||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-state-types/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
lcli "github.com/filecoin-project/lotus/cli"
|
lcli "github.com/filecoin-project/lotus/cli"
|
||||||
"github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig"
|
"github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig"
|
||||||
@ -141,7 +140,7 @@ func printMessage(cctx *cli.Context, msg *types.Message) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("Method:", stmgr.MethodsMap[toact.Code][msg.Method].Name)
|
fmt.Println("Method:", filcns.NewActorRegistry().Methods[toact.Code][msg.Method].Name) // todo use remote
|
||||||
p, err := lcli.JsonParams(toact.Code, msg.Method, msg.Params)
|
p, err := lcli.JsonParams(toact.Code, msg.Method, msg.Params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -6,6 +6,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||||
"github.com/ipfs/bbloom"
|
"github.com/ipfs/bbloom"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
@ -167,7 +168,7 @@ var stateTreePruneCmd = &cli.Command{
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cs := store.NewChainStore(bs, bs, mds, nil)
|
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
|
||||||
defer cs.Close() //nolint:errcheck
|
defer cs.Close() //nolint:errcheck
|
||||||
|
|
||||||
if err := cs.Load(); err != nil {
|
if err := cs.Load(); err != nil {
|
||||||
|
@ -28,7 +28,8 @@ var verifRegCmd = &cli.Command{
|
|||||||
Usage: "Interact with the verified registry actor",
|
Usage: "Interact with the verified registry actor",
|
||||||
Flags: []cli.Flag{},
|
Flags: []cli.Flag{},
|
||||||
Subcommands: []*cli.Command{
|
Subcommands: []*cli.Command{
|
||||||
verifRegAddVerifierCmd,
|
verifRegAddVerifierFromMsigCmd,
|
||||||
|
verifRegAddVerifierFromAccountCmd,
|
||||||
verifRegVerifyClientCmd,
|
verifRegVerifyClientCmd,
|
||||||
verifRegListVerifiersCmd,
|
verifRegListVerifiersCmd,
|
||||||
verifRegListClientsCmd,
|
verifRegListClientsCmd,
|
||||||
@ -37,7 +38,7 @@ var verifRegCmd = &cli.Command{
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
var verifRegAddVerifierCmd = &cli.Command{
|
var verifRegAddVerifierFromMsigCmd = &cli.Command{
|
||||||
Name: "add-verifier",
|
Name: "add-verifier",
|
||||||
Usage: "make a given account a verifier",
|
Usage: "make a given account a verifier",
|
||||||
ArgsUsage: "<message sender> <new verifier> <allowance>",
|
ArgsUsage: "<message sender> <new verifier> <allowance>",
|
||||||
@ -110,6 +111,71 @@ var verifRegAddVerifierCmd = &cli.Command{
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var verifRegAddVerifierFromAccountCmd = &cli.Command{
|
||||||
|
Name: "add-verifier-from-account",
|
||||||
|
Usage: "make a given account a verifier",
|
||||||
|
ArgsUsage: "<verifier root key> <new verifier> <allowance>",
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
if cctx.Args().Len() != 3 {
|
||||||
|
return fmt.Errorf("must specify three arguments: sender, verifier, and allowance")
|
||||||
|
}
|
||||||
|
|
||||||
|
sender, err := address.NewFromString(cctx.Args().Get(0))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
verifier, err := address.NewFromString(cctx.Args().Get(1))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
allowance, err := types.BigFromString(cctx.Args().Get(2))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: ActorUpgrade: Abstract
|
||||||
|
params, err := actors.SerializeParams(&verifreg2.AddVerifierParams{Address: verifier, Allowance: allowance})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
api, closer, err := lcli.GetFullNodeAPI(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer closer()
|
||||||
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
|
msg := &types.Message{
|
||||||
|
To: verifreg.Address,
|
||||||
|
From: sender,
|
||||||
|
Method: verifreg.Methods.AddVerifier,
|
||||||
|
Params: params,
|
||||||
|
}
|
||||||
|
|
||||||
|
smsg, err := api.MpoolPushMessage(ctx, msg, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("message sent, now waiting on cid: %s\n", smsg.Cid())
|
||||||
|
|
||||||
|
mwait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if mwait.Receipt.ExitCode != 0 {
|
||||||
|
return fmt.Errorf("failed to add verified client: %d", mwait.Receipt.ExitCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
var verifRegVerifyClientCmd = &cli.Command{
|
var verifRegVerifyClientCmd = &cli.Command{
|
||||||
Name: "verify-client",
|
Name: "verify-client",
|
||||||
Usage: "make a given account a verified client",
|
Usage: "make a given account a verified client",
|
||||||
|
@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"math"
|
"math"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
@ -83,6 +84,7 @@ func NewBlockBuilder(ctx context.Context, logger *zap.SugaredLogger, sm *stmgr.S
|
|||||||
Epoch: parentTs.Height() + 1,
|
Epoch: parentTs.Height() + 1,
|
||||||
Rand: r,
|
Rand: r,
|
||||||
Bstore: sm.ChainStore().StateBlockstore(),
|
Bstore: sm.ChainStore().StateBlockstore(),
|
||||||
|
Actors: filcns.NewActorRegistry(),
|
||||||
Syscalls: sm.VMSys(),
|
Syscalls: sm.VMSys(),
|
||||||
CircSupplyCalc: sm.GetVMCirculatingSupply,
|
CircSupplyCalc: sm.GetVMCirculatingSupply,
|
||||||
NtwkVersion: sm.GetNtwkVersion,
|
NtwkVersion: sm.GetNtwkVersion,
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user