Merge branch 'master' into libp2p-pubsub-tracer
This commit is contained in:
commit
df17ea06f5
@ -92,6 +92,9 @@ jobs:
|
||||
- run: sudo apt-get install npm
|
||||
- run:
|
||||
command: make buildall
|
||||
- run:
|
||||
name: check tag and version output match
|
||||
command: ./scripts/version-check.sh ./lotus
|
||||
- store_artifacts:
|
||||
path: lotus
|
||||
- store_artifacts:
|
||||
@ -282,21 +285,6 @@ jobs:
|
||||
root: "."
|
||||
paths:
|
||||
- linux-butterflynet
|
||||
build-ntwk-nerpa:
|
||||
description: |
|
||||
Compile lotus binaries for the nerpa network
|
||||
parameters:
|
||||
<<: *test-params
|
||||
executor: << parameters.executor >>
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run: make nerpanet
|
||||
- run: mkdir linux-nerpanet && mv lotus lotus-miner lotus-worker linux-nerpanet
|
||||
- persist_to_workspace:
|
||||
root: "."
|
||||
paths:
|
||||
- linux-nerpanet
|
||||
build-lotus-soup:
|
||||
description: |
|
||||
Compile `lotus-soup` Testground test plan
|
||||
@ -330,7 +318,7 @@ jobs:
|
||||
command: pushd testplans/lotus-soup && mkdir -p $HOME/testground && cp env-ci.toml $HOME/testground/.env.toml && echo 'endpoint="https://ci.testground.ipfs.team"' >> $HOME/testground/.env.toml && echo 'user="circleci"' >> $HOME/testground/.env.toml
|
||||
- run:
|
||||
name: "prepare testground home dir and link test plans"
|
||||
command: mkdir -p $HOME/testground/plans && ln -s $(pwd)/testplans/lotus-soup $HOME/testground/plans/lotus-soup && ln -s $(pwd)/testplans/graphsync $HOME/testground/plans/graphsync
|
||||
command: mkdir -p $HOME/testground/plans && ln -s $(pwd)/testplans/lotus-soup $HOME/testground/plans/lotus-soup
|
||||
- run:
|
||||
name: "go get lotus@master"
|
||||
command: cd testplans/lotus-soup && go get github.com/filecoin-project/lotus@master
|
||||
@ -340,15 +328,11 @@ jobs:
|
||||
- run:
|
||||
name: "trigger payment channel stress testplan on taas"
|
||||
command: ~/testground-cli run composition -f $HOME/testground/plans/lotus-soup/_compositions/paych-stress-k8s.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH
|
||||
- run:
|
||||
name: "trigger graphsync testplan on taas"
|
||||
command: ~/testground-cli run composition -f $HOME/testground/plans/graphsync/_compositions/stress-k8s.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH
|
||||
|
||||
|
||||
build-macos:
|
||||
description: build darwin lotus binary
|
||||
macos:
|
||||
xcode: "10.0.0"
|
||||
xcode: "12.5.0"
|
||||
working_directory: ~/go/src/github.com/filecoin-project/lotus
|
||||
steps:
|
||||
- prepare:
|
||||
@ -367,11 +351,6 @@ jobs:
|
||||
name: Install Rust
|
||||
command: |
|
||||
curl https://sh.rustup.rs -sSf | sh -s -- -y
|
||||
- run:
|
||||
name: Install jq
|
||||
command: |
|
||||
curl --location https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64 --output /usr/local/bin/jq
|
||||
chmod +x /usr/local/bin/jq
|
||||
- run:
|
||||
name: Install hwloc
|
||||
command: |
|
||||
@ -388,6 +367,9 @@ jobs:
|
||||
- run:
|
||||
command: make build
|
||||
no_output_timeout: 30m
|
||||
- run:
|
||||
name: check tag and version output match
|
||||
command: ./scripts/version-check.sh ./lotus
|
||||
- store_artifacts:
|
||||
path: lotus
|
||||
- store_artifacts:
|
||||
@ -723,18 +705,6 @@ jobs:
|
||||
- packer/build:
|
||||
template: tools/packer/lotus.pkr.hcl
|
||||
args: "-var ci_workspace_bins=./linux-butterflynet -var lotus_network=butterflynet -var git_tag=$CIRCLE_TAG"
|
||||
publish-packer-nerpanet:
|
||||
description: build and push AWS IAM and DigitalOcean droplet.
|
||||
executor:
|
||||
name: packer/default
|
||||
packer-version: 1.6.6
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: "."
|
||||
- packer/build:
|
||||
template: tools/packer/lotus.pkr.hcl
|
||||
args: "-var ci_workspace_bins=./linux-nerpanet -var lotus_network=nerpanet -var git_tag=$CIRCLE_TAG"
|
||||
publish-dockerhub:
|
||||
description: publish to dockerhub
|
||||
machine:
|
||||
@ -810,11 +780,21 @@ workflows:
|
||||
suite: itest-deadlines
|
||||
target: "./itests/deadlines_test.go"
|
||||
|
||||
- test:
|
||||
name: test-itest-deals_512mb
|
||||
suite: itest-deals_512mb
|
||||
target: "./itests/deals_512mb_test.go"
|
||||
|
||||
- test:
|
||||
name: test-itest-deals_concurrent
|
||||
suite: itest-deals_concurrent
|
||||
target: "./itests/deals_concurrent_test.go"
|
||||
|
||||
- test:
|
||||
name: test-itest-deals_max_staging_deals
|
||||
suite: itest-deals_max_staging_deals
|
||||
target: "./itests/deals_max_staging_deals_test.go"
|
||||
|
||||
- test:
|
||||
name: test-itest-deals_offline
|
||||
suite: itest-deals_offline
|
||||
@ -825,6 +805,11 @@ workflows:
|
||||
suite: itest-deals_padding
|
||||
target: "./itests/deals_padding_test.go"
|
||||
|
||||
- test:
|
||||
name: test-itest-deals_partial_retrieval
|
||||
suite: itest-deals_partial_retrieval
|
||||
target: "./itests/deals_partial_retrieval_test.go"
|
||||
|
||||
- test:
|
||||
name: test-itest-deals_power
|
||||
suite: itest-deals_power
|
||||
@ -840,6 +825,11 @@ workflows:
|
||||
suite: itest-deals_publish
|
||||
target: "./itests/deals_publish_test.go"
|
||||
|
||||
- test:
|
||||
name: test-itest-deals_retry_deal_no_funds
|
||||
suite: itest-deals_retry_deal_no_funds
|
||||
target: "./itests/deals_retry_deal_no_funds_test.go"
|
||||
|
||||
- test:
|
||||
name: test-itest-deals
|
||||
suite: itest-deals
|
||||
@ -972,11 +962,6 @@ workflows:
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- build-ntwk-nerpa:
|
||||
filters:
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- build-lotus-soup
|
||||
- build-macos:
|
||||
filters:
|
||||
@ -1041,16 +1026,6 @@ workflows:
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- publish-packer-nerpanet:
|
||||
requires:
|
||||
- build-ntwk-nerpa
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
- /.*/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- publish-snapcraft:
|
||||
name: publish-snapcraft-stable
|
||||
channel: stable
|
||||
|
@ -92,6 +92,9 @@ jobs:
|
||||
- run: sudo apt-get install npm
|
||||
- run:
|
||||
command: make buildall
|
||||
- run:
|
||||
name: check tag and version output match
|
||||
command: ./scripts/version-check.sh ./lotus
|
||||
- store_artifacts:
|
||||
path: lotus
|
||||
- store_artifacts:
|
||||
@ -282,21 +285,6 @@ jobs:
|
||||
root: "."
|
||||
paths:
|
||||
- linux-butterflynet
|
||||
build-ntwk-nerpa:
|
||||
description: |
|
||||
Compile lotus binaries for the nerpa network
|
||||
parameters:
|
||||
<<: *test-params
|
||||
executor: << parameters.executor >>
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run: make nerpanet
|
||||
- run: mkdir linux-nerpanet && mv lotus lotus-miner lotus-worker linux-nerpanet
|
||||
- persist_to_workspace:
|
||||
root: "."
|
||||
paths:
|
||||
- linux-nerpanet
|
||||
build-lotus-soup:
|
||||
description: |
|
||||
Compile `lotus-soup` Testground test plan
|
||||
@ -330,7 +318,7 @@ jobs:
|
||||
command: pushd testplans/lotus-soup && mkdir -p $HOME/testground && cp env-ci.toml $HOME/testground/.env.toml && echo 'endpoint="https://ci.testground.ipfs.team"' >> $HOME/testground/.env.toml && echo 'user="circleci"' >> $HOME/testground/.env.toml
|
||||
- run:
|
||||
name: "prepare testground home dir and link test plans"
|
||||
command: mkdir -p $HOME/testground/plans && ln -s $(pwd)/testplans/lotus-soup $HOME/testground/plans/lotus-soup && ln -s $(pwd)/testplans/graphsync $HOME/testground/plans/graphsync
|
||||
command: mkdir -p $HOME/testground/plans && ln -s $(pwd)/testplans/lotus-soup $HOME/testground/plans/lotus-soup
|
||||
- run:
|
||||
name: "go get lotus@master"
|
||||
command: cd testplans/lotus-soup && go get github.com/filecoin-project/lotus@master
|
||||
@ -340,15 +328,11 @@ jobs:
|
||||
- run:
|
||||
name: "trigger payment channel stress testplan on taas"
|
||||
command: ~/testground-cli run composition -f $HOME/testground/plans/lotus-soup/_compositions/paych-stress-k8s.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH
|
||||
- run:
|
||||
name: "trigger graphsync testplan on taas"
|
||||
command: ~/testground-cli run composition -f $HOME/testground/plans/graphsync/_compositions/stress-k8s.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH
|
||||
|
||||
|
||||
build-macos:
|
||||
description: build darwin lotus binary
|
||||
macos:
|
||||
xcode: "10.0.0"
|
||||
xcode: "12.5.0"
|
||||
working_directory: ~/go/src/github.com/filecoin-project/lotus
|
||||
steps:
|
||||
- prepare:
|
||||
@ -367,11 +351,6 @@ jobs:
|
||||
name: Install Rust
|
||||
command: |
|
||||
curl https://sh.rustup.rs -sSf | sh -s -- -y
|
||||
- run:
|
||||
name: Install jq
|
||||
command: |
|
||||
curl --location https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64 --output /usr/local/bin/jq
|
||||
chmod +x /usr/local/bin/jq
|
||||
- run:
|
||||
name: Install hwloc
|
||||
command: |
|
||||
@ -388,6 +367,9 @@ jobs:
|
||||
- run:
|
||||
command: make build
|
||||
no_output_timeout: 30m
|
||||
- run:
|
||||
name: check tag and version output match
|
||||
command: ./scripts/version-check.sh ./lotus
|
||||
- store_artifacts:
|
||||
path: lotus
|
||||
- store_artifacts:
|
||||
@ -723,18 +705,6 @@ jobs:
|
||||
- packer/build:
|
||||
template: tools/packer/lotus.pkr.hcl
|
||||
args: "-var ci_workspace_bins=./linux-butterflynet -var lotus_network=butterflynet -var git_tag=$CIRCLE_TAG"
|
||||
publish-packer-nerpanet:
|
||||
description: build and push AWS IAM and DigitalOcean droplet.
|
||||
executor:
|
||||
name: packer/default
|
||||
packer-version: 1.6.6
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: "."
|
||||
- packer/build:
|
||||
template: tools/packer/lotus.pkr.hcl
|
||||
args: "-var ci_workspace_bins=./linux-nerpanet -var lotus_network=nerpanet -var git_tag=$CIRCLE_TAG"
|
||||
publish-dockerhub:
|
||||
description: publish to dockerhub
|
||||
machine:
|
||||
@ -837,11 +807,6 @@ workflows:
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- build-ntwk-nerpa:
|
||||
filters:
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- build-lotus-soup
|
||||
- build-macos:
|
||||
filters:
|
||||
@ -906,16 +871,6 @@ workflows:
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- publish-packer-nerpanet:
|
||||
requires:
|
||||
- build-ntwk-nerpa
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
- /.*/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- publish-snapcraft:
|
||||
name: publish-snapcraft-stable
|
||||
channel: stable
|
||||
|
32
.codecov.yml
32
.codecov.yml
@ -1,9 +1,25 @@
|
||||
ignore:
|
||||
# Auto generated
|
||||
- "^.*_gen.go$"
|
||||
- "^.*/mock_full.go$"
|
||||
# Old actors.
|
||||
- "^chain/actors/builtin/[^/]*/(message|state|v)[0-4]\\.go$" # We test the latest version only.
|
||||
# Tests
|
||||
- "api/test/**"
|
||||
- "conformance/**"
|
||||
# Generators
|
||||
- "gen/**"
|
||||
- "chain/actors/agen/**"
|
||||
# Non-critical utilities
|
||||
- "api/docgen/**"
|
||||
- "api/docgen-openrpc/**"
|
||||
coverage:
|
||||
status:
|
||||
patch: off
|
||||
project:
|
||||
tools-and-tests:
|
||||
target: auto
|
||||
threshold: 0.5%
|
||||
threshold: 1%
|
||||
informational: true
|
||||
paths:
|
||||
- "testplans"
|
||||
@ -17,27 +33,27 @@ coverage:
|
||||
- "build"
|
||||
markets:
|
||||
target: auto
|
||||
threshold: 0.5%
|
||||
threshold: 1%
|
||||
informational: false
|
||||
paths:
|
||||
- "markets"
|
||||
- "paychmgr"
|
||||
miner:
|
||||
target: auto
|
||||
threshold: 0.5%
|
||||
threshold: 1.5%
|
||||
informational: false
|
||||
paths:
|
||||
- "miner"
|
||||
- "storage"
|
||||
chain:
|
||||
target: auto
|
||||
threshold: 0.5%
|
||||
threshold: 1%
|
||||
informational: false
|
||||
paths:
|
||||
- "chain"
|
||||
node:
|
||||
node:
|
||||
target: auto
|
||||
threshold: 0.5%
|
||||
threshold: 1%
|
||||
informational: false
|
||||
paths:
|
||||
- "node"
|
||||
@ -50,8 +66,8 @@ coverage:
|
||||
- "journal"
|
||||
cli:
|
||||
target: auto
|
||||
threshold: 0.5%
|
||||
threshold: 1%
|
||||
informational: true
|
||||
paths:
|
||||
- "cli"
|
||||
- "cmd"
|
||||
- "cmd"
|
||||
|
14
.github/workflows/sync-master-main.yaml
vendored
Normal file
14
.github/workflows/sync-master-main.yaml
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
name: sync-master-main
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
jobs:
|
||||
sync:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: update remote branch main
|
||||
run: |
|
||||
# overrides the remote branch (origin:github) `main`
|
||||
git push origin --force master:main
|
424
CHANGELOG.md
424
CHANGELOG.md
@ -1,5 +1,429 @@
|
||||
# Lotus changelog
|
||||
|
||||
# v1.13.0 / 2021-10-18
|
||||
|
||||
Lotus v1.13.0 is a *highly recommended* feature release for all lotus users(i.e: storage providers, data brokers, application developers and so on) that supports the upcoming
|
||||
[Network v14 Chocolate upgrade](https://github.com/filecoin-project/lotus/discussions/7431).
|
||||
This feature release includes the latest functionalities and improvements, like data transfer rate-limiting for both storage and retrieval deals, proof v10 with CUDA support, etc. You can find more details in the Changelog below.
|
||||
|
||||
## Highlights
|
||||
- Enable separate storage and retrieval transfer limits ([filecoin-project/lotus#7405](https://github.com/filecoin-project/lotus/pull/7405))
|
||||
- `SimultaneousTransfer` is now replaced by `SimultaneousTransfersForStorage` and `SimultaneousTransfersForRetrieval`, where users may set the amount of ongoing data transfer for storage and retrieval deals in parallel separately. The default value for both is set to 20.
|
||||
- If you are using the lotus client, these two configuration variables are under the `Client` section in `./lotus/config.toml`.
|
||||
- If you are a service provider, these two configuration variables should be set under the `Dealmaking` section in `/.lotusminer/config.toml`.
|
||||
- Update proofs to v10.0.0 ([filecoin-project/lotus#7420](https://github.com/filecoin-project/lotus/pull/7420))
|
||||
- This version supports CUDA. To enable CUDA instead of openCL, build lotus with `FFI_USE_CUDA=1 FFI_BUILD_FROM_SOURCE=1 ...`.
|
||||
- You can find additional Nvidia driver installation instructions written by MinerX fellows [here](https://github.com/filecoin-project/lotus/discussions/7443#discussioncomment-1425274) and perf improvements result on PC2/C2/WindowPoSt computation on different profiles [here](https://github.com/filecoin-project/lotus/discussions/7443), most people observe a 30-50% decrease in computation time.
|
||||
|
||||
## New Features
|
||||
- Feat/datamodel selector retrieval ([filecoin-project/lotus#6393](https://github.com/filecoin-project/lotus/pull/66393393))
|
||||
- This introduces a new RetrievalOrder-struct field and a CLI option that takes a string representation as understood by [https://pkg.go.dev/github.com/ipld/go-ipld-selector-text-lite#SelectorSpecFromPath](https://pkg.go.dev/github.com/ipld/go-ipld-selector-text-lite#SelectorSpecFromPath). This allows for partial retrieval of any sub-DAG of a deal provided the user knows the exact low-level shape of the deal contents.
|
||||
- For example, to retrieve the first entry of a UnixFS directory by executing, run `lotus client retrieve --miner f0XXXXX --datamodel-path-selector 'Links/0/Hash' bafyROOTCID ~/output`
|
||||
- Expose storage stats on the metrics endpoint ([filecoin-project/lotus#7418](https://github.com/filecoin-project/lotus/pull/7418))
|
||||
- feat: Catch panic to generate report and reraise ([filecoin-project/lotus#7341](https://github.com/filecoin-project/lotus/pull/7341))
|
||||
- Set `LOTUS_PANIC_REPORT_PATH` and `LOTUS_PANIC_JOURNAL_LOOKBACK` to get reports generated when a panic occurs on your daemon miner or workers.
|
||||
- Add envconfig docs to the config ([filecoin-project/lotus#7412](https://github.com/filecoin-project/lotus/pull/7412))
|
||||
- You can now find supported env vars in [default-lotus-miner-config.toml](https://github.com/filecoin-project/lotus/blob/master/documentation/en/default-lotus-miner-config.toml).
|
||||
- lotus shed: fr32 utils ([filecoin-project/lotus#7355](https://github.com/filecoin-project/lotus/pull/7355))
|
||||
- Miner CLI: Allow trying to change owners of any miner actor ([filecoin-project/lotus#7328](https://github.com/filecoin-project/lotus/pull/7328))
|
||||
- Add --unproven flag to the sectors list command ([filecoin-project/lotus#7308](https://github.com/filecoin-project/lotus/pull/7308))
|
||||
|
||||
## Improvements
|
||||
- check for deal start epoch on SectorAddPieceToAny ([filecoin-project/lotus#7407](https://github.com/filecoin-project/lotus/pull/7407))
|
||||
- Verify Voucher locks in VoucherValidUnlocked ([filecoin-project/lotus#5609](https://github.com/filecoin-project/lotus/pull/5609))
|
||||
- Add more info to miner allinfo command ([filecoin-project/lotus#7384](https://github.com/filecoin-project/lotus/pull/7384))
|
||||
- add `lotus-miner storage-deals list --format=json` with transfers ([filecoin-project/lotus#7312](https://github.com/filecoin-project/lotus/pull/7312))
|
||||
- Fix formatting ([filecoin-project/lotus#7383](https://github.com/filecoin-project/lotus/pull/7383))
|
||||
- GetCurrentDealInfo err: handle correctly err case ([filecoin-project/lotus#7346](https://github.com/filecoin-project/lotus/pull/7346))
|
||||
- fix: Enforce verification key integrity check regardless of TRUST_PARAMS=1 ([filecoin-project/lotus#7327](https://github.com/filecoin-project/lotus/pull/7327))
|
||||
- Show more deal states in miner info ([filecoin-project/lotus#7311](https://github.com/filecoin-project/lotus/pull/7311))
|
||||
- Prep retrieval for selectors: no functional changes ([filecoin-project/lotus#7306](https://github.com/filecoin-project/lotus/pull/7306))
|
||||
- Seed: improve helptext ([filecoin-project/lotus#7304](https://github.com/filecoin-project/lotus/pull/7304))
|
||||
- Mempool: reduce size of sigValCache ([filecoin-project/lotus#7305](https://github.com/filecoin-project/lotus/pull/7305))
|
||||
- Stop indirectly depending on deprecated github.com/prometheus/common ([filecoin-project/lotus#7474](https://github.com/filecoin-project/lotus/pull/7474))
|
||||
|
||||
## Bug Fixes
|
||||
- StateSearchMsg: Correct usage of the allowReplaced flag ([filecoin-project/lotus#7450](https://github.com/filecoin-project/lotus/pull/7450))
|
||||
- fix staging area path buildup ([filecoin-project/lotus#7363](https://github.com/filecoin-project/lotus/pull/7363))
|
||||
- storagemgr: Cleanup workerLk around worker resources ([filecoin-project/lotus#7334](https://github.com/filecoin-project/lotus/pull/7334))
|
||||
- fix: check padSector Cid ([filecoin-project/lotus#7310](https://github.com/filecoin-project/lotus/pull/7310))
|
||||
- sealing: Recover sectors after failed AddPiece ([filecoin-project/lotus#7492](https://github.com/filecoin-project/lotus/pull/7492))
|
||||
- fix: support node instantiation in external packages ([filecoin-project/lotus#7511](https://github.com/filecoin-project/lotus/pull/7511))
|
||||
- Chore/backport cleanup withdrawn dependency ([filecoin-project/lotus#7482](https://github.com/filecoin-project/lotus/pull/7482))
|
||||
|
||||
## Dependency Updates
|
||||
- github.com/filecoin-project/go-data-transfer (v1.10.1 -> v1.11.1):
|
||||
- github.com/filecoin-project/go-fil-markets (v1.12.0 -> v1.13.1):
|
||||
- github.com/filecoin-project/go-paramfetch (v0.0.2-0.20210614165157-25a6c7769498 -> v0.0.2):
|
||||
- update go-libp2p to v0.15.0 ([filecoin-project/lotus#7362](https://github.com/filecoin-project/lotus/pull/7362))
|
||||
- update to go-graphsync v0.10.1 ([filecoin-project/lotus#7359](https://github.com/filecoin-project/lotus/pull/7359))
|
||||
|
||||
## Others
|
||||
- Chocolate to master ([filecoin-project/lotus#7440](https://github.com/filecoin-project/lotus/pull/7440))
|
||||
- releases -> master ([filecoin-project/lotus#7403](https://github.com/filecoin-project/lotus/pull/7403))
|
||||
- remove nerpanet related code ([filecoin-project/lotus#7373](https://github.com/filecoin-project/lotus/pull/7373))
|
||||
- sync branch main with master on updates ([filecoin-project/lotus#7366](https://github.com/filecoin-project/lotus/pull/7366))
|
||||
- remove job to install jq ([filecoin-project/lotus#7309](https://github.com/filecoin-project/lotus/pull/7309))
|
||||
- restore filters for the build-macos job ([filecoin-project/lotus#7455](https://github.com/filecoin-project/lotus/pull/7455))
|
||||
- v1.13.0-rc2 ([filecoin-project/lotus#7458](https://github.com/filecoin-project/lotus/pull/7458))
|
||||
- v1.13.0-rc1 ([filecoin-project/lotus#7452](https://github.com/filecoin-project/lotus/pull/7452))
|
||||
|
||||
## Contributors
|
||||
|
||||
| Contributor | Commits | Lines ± | Files Changed |
|
||||
|-------------|---------|---------|---------------|
|
||||
| @dirkmc | 8 | +845/-375 | 55 |
|
||||
| @magik6k | 10 | +1056/-60 | 26 |
|
||||
| @aarshkshah1992 | 6 | +813/-259 | 16 |
|
||||
| @arajasek | 10 | +552/-251 | 43 |
|
||||
| @ribasushi | 6 | +505/-78 | 22 |
|
||||
| @jennijuju | 7 | +212/-323 | 34 |
|
||||
| @nonsense | 10 | +335/-139 | 19 |
|
||||
| @dirkmc | 8 | +149/-55 | 16 |
|
||||
| @hannahhoward | 4 | +56/-32 | 17 |
|
||||
| @rvagg | 4 | +61/-13 | 9 |
|
||||
| @jennijuju | 2 | +0/-57 | 2 |
|
||||
| @hannahhoward | 1 | +33/-18 | 7 |
|
||||
| @Kubuxu | 8 | +27/-16 | 9 |
|
||||
| @coryschwartz | 1 | +16/-2 | 2 |
|
||||
| @travisperson | 1 | +14/-0 | 1 |
|
||||
| @frrist | 1 | +12/-0 | 2 |
|
||||
| @ognots | 1 | +0/-10 | 2 |
|
||||
| @lanzafame | 1 | +3/-3 | 1 |
|
||||
| @jennijuju | 1 | +2/-2 | 1 |
|
||||
| @swift-mx | 1 | +1/-1 | 1 |
|
||||
|
||||
# v1.12.0 / 2021-10-12
|
||||
|
||||
This is a mandatory release of Lotus that introduces [Filecoin Network v14](https://github.com/filecoin-project/community/discussions/74#discussioncomment-1398542), codenamed the Chocolate upgrade. The Filecoin mainnet will upgrade at epoch 1231620, on 2021-10-26T13:30:00Z.
|
||||
|
||||
The Chocolate upgrade introduces the following FIPs, delivered in [v6 actors](https://github.com/filecoin-project/specs-actors/releases/tag/v6.0.0)
|
||||
|
||||
- [FIP-0020](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0020.md): Add return value to `WithdrawBalance`
|
||||
- [FIP-0021](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0021.md): Correct quality calculation on expiration
|
||||
- [FIP-0022](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0022.md): Bad deals don't fail PublishStorageDeals
|
||||
- [FIP-0023](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0023.md): Break ties between tipsets of equal weight
|
||||
- [FIP-0024](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0024.md): BatchBalancer & BatchDiscount Post-HyperDrive Adjustment
|
||||
- [FIP-0026](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0026.md): Extend sector faulty period from 2 weeks to 6 weeks
|
||||
|
||||
Note that this release is built on top of lotus v1.11.3. Enterprising users like storage providers, data brokers and others are recommended to use lotus v1.13.0 for latest new features, improvements and bug fixes.
|
||||
|
||||
## New Features and Changes
|
||||
- Implement and support [FIP-0024](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0024.md) BatchBalancer & BatchDiscount Post-HyperDrive Adjustment:
|
||||
- Precommit batch balancer support/config ([filecoin-project/lotus#7410](https://github.com/filecoin-project/lotus/pull/7410))
|
||||
- Set `BatchPreCommitAboveBaseFee` to decide whether sending out a PreCommits in individual messages or in a batch.
|
||||
- The default value of `BatchPreCommitAboveBaseFee` and `AggregateAboveBaseFee` are now updated to 0.32nanoFIL.
|
||||
- The amount of FIL withdrawn from `WithdrawBalance` from miner or market via lotus CLI is now printed out upon message landing on the chain.
|
||||
|
||||
## Improvements
|
||||
- Implement [FIP-0023](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0023.md) (Break ties between tipsets of equal weight)
|
||||
- ChainStore: Add a tiebreaker rule for tipsets of equal weight ([filecoin-project/lotus#7378](https://github.com/filecoin-project/lotus/pull/7378))
|
||||
- Randomness: Move getters from ChainAPI to StateAPI ([filecoin-project/lotus#7322](https://github.com/filecoin-project/lotus/pull/7322))
|
||||
|
||||
## Bug Fixes
|
||||
- Fix Drand fetching around null tipsets ([filecoin-project/lotus#7376](https://github.com/filecoin-project/lotus/pull/7376))
|
||||
|
||||
## Dependency Updates
|
||||
- Add [v6 actors](https://github.com/filecoin-project/specs-actors/releases/tag/v6.0.0)
|
||||
- **Protocol changes**
|
||||
- Multisig Approve only hashes when hash in params
|
||||
- FIP 0020 WithdrawBalance methods return withdrawn value
|
||||
- FIP 0021 Fix bug in power calculation when extending verified deals sectors
|
||||
- FIP 0022 PublishStorageDeals drops errors in batch
|
||||
- FIP 0024 BatchBalancer update and burn added to PreCommitBatch
|
||||
- FIP 0026 Add FaultMaxAge extension
|
||||
- Reduce calls to power and reward actors by passing values from power cron
|
||||
- Defensive programming hardening power cron against programmer error
|
||||
- **Implementation changes**
|
||||
- Move to xerrors
|
||||
- Improved logging: burn events are not logged with reasons and burned value.
|
||||
- github.com/filecoin-project/go-state-types (v0.1.1-0.20210810190654-139e0e79e69e -> v0.1.1-0.20210915140513-d354ccf10379):
|
||||
|
||||
## Others
|
||||
- v1.12.0-rc1 prep ([filecoin-project/lotus#7426](https://github.com/filecoin-project/lotus/pull/7426)
|
||||
- Extend FaultMaxAge to 6 weeks for actors v6 on test networks only ([filecoin-project/lotus#7421](https://github.com/filecoin-project/lotus/pull/7421))
|
||||
|
||||
## Contributors
|
||||
|
||||
| Contributor | Commits | Lines ± | Files Changed |
|
||||
|-------------|---------|---------|---------------|
|
||||
| @ZenGround0 | 12 | +4202/-2752 | 187 |
|
||||
| @arajasek | 25 | +4567/-854 | 190 |
|
||||
| @laudiacay | 4 | +1276/-435 | 37 |
|
||||
| @laudiacay | 12 | +1350/-209 | 43 |
|
||||
| @magik6k | 1 | +171/-13 | 8 |
|
||||
| @Stebalien | 2 | +115/-12 | 6 |
|
||||
| @jennijuju | 7 | +73/-34 | 26 |
|
||||
| @travisperson | 2 | +19/-19 | 7 |
|
||||
| @coryschwartz | 1 | +16/-2 | 2 |
|
||||
| @Kubuxu | 5 | +5/-5 | 5 |
|
||||
| @ribasushi | 1 | +5/-3 | 1 |
|
||||
|
||||
# v1.11.3 / 2021-09-29
|
||||
|
||||
lotus v1.11.3 is a feature release that's **highly recommended to ALL lotus users to upgrade**, including node
|
||||
operators, storage providers and clients. It includes many improvements and bug fixes that result in perf
|
||||
improvements in different area, like deal making, sealing and so on.
|
||||
|
||||
## Highlights
|
||||
|
||||
- 🌟🌟Introduce `MaxStagingDealsBytes - reject new deals if our staging deals area is full ([filecoin-project/lotus#7276](https://github.com/filecoin-project/lotus/pull/7276))
|
||||
- Set `MaxStagingDealsBytes` under the [Dealmaking] section of the markets' subsystem's `config.toml` to reject new incoming deals when the `deal-staging` directory of market subsystem's repo gets too large.
|
||||
- 🌟🌟miner: Command to list/remove expired sectors locally ([filecoin-project/lotus#7140](https://github.com/filecoin-project/lotus/pull/7140))
|
||||
- run `./lotus-miner sectors expired -h` for more details.
|
||||
- 🚀update to ffi to update-bellperson-proofs-v9-0-2 ([filecoin-project/lotus#7369](https://github.com/filecoin-project/lotus/pull/7369))
|
||||
- MinerX fellows(early testers of lotus releases) have reported faster WindowPoSt computation!
|
||||
- 🌟dealpublisher: Fully validate deals before publishing ([filecoin-project/lotus#7234](https://github.com/filecoin-project/lotus/pull/7234))
|
||||
- This excludes the expired deals before sending out a PSD message which reduces the chances of PSD message failure due to invalid deals.
|
||||
- 🌟Simple alert system; FD limit alerts ([filecoin-project/lotus#7108](https://github.com/filecoin-project/lotus/pull/7108))
|
||||
|
||||
## New Features
|
||||
|
||||
- feat(ci): include version/cli checks in tagged releases ([filecoin-project/lotus#7331](https://github.com/filecoin-project/lotus/pull/7331))
|
||||
- Show deal sizes is sealing sectors ([filecoin-project/lotus#7261](https://github.com/filecoin-project/lotus/pull/7261))
|
||||
- config for disabling NAT port mapping ([filecoin-project/lotus#7204](https://github.com/filecoin-project/lotus/pull/7204))
|
||||
- Add optional mined block list to miner info ([filecoin-project/lotus#7202](https://github.com/filecoin-project/lotus/pull/7202))
|
||||
- Shed: Create a verifreg command for when VRK isn't a multisig ([filecoin-project/lotus#7099](https://github.com/filecoin-project/lotus/pull/7099))
|
||||
|
||||
## Improvements
|
||||
|
||||
- build macOS CI ([filecoin-project/lotus#7307](https://github.com/filecoin-project/lotus/pull/7307))
|
||||
- itests: remove cid equality comparison ([filecoin-project/lotus#7292](https://github.com/filecoin-project/lotus/pull/7292))
|
||||
- Add partition info to the 'sectors status' command ([filecoin-project/lotus#7246](https://github.com/filecoin-project/lotus/pull/7246))
|
||||
- chain: Cleanup consensus logic ([filecoin-project/lotus#7255](https://github.com/filecoin-project/lotus/pull/7255))
|
||||
- builder: Handle chainstore config in ConfigFullNode ([filecoin-project/lotus#7232](https://github.com/filecoin-project/lotus/pull/7232))
|
||||
- gateway: check tipsets in ChainGetPath ([filecoin-project/lotus#7230](https://github.com/filecoin-project/lotus/pull/7230))
|
||||
- Refactor events subsystem ([filecoin-project/lotus#7000](https://github.com/filecoin-project/lotus/pull/7000))
|
||||
- test: re-enable disabled tests ([filecoin-project/lotus#7211](https://github.com/filecoin-project/lotus/pull/7211))
|
||||
- Reduce lotus-miner startup spam ([filecoin-project/lotus#7205](https://github.com/filecoin-project/lotus/pull/7205))
|
||||
- Catch deal slashed because sector was terminated ([filecoin-project/lotus#7201](https://github.com/filecoin-project/lotus/pull/7201))
|
||||
- Insert miner and network power data as gibibytes to avoid int64 overflows ([filecoin-project/lotus#7194](https://github.com/filecoin-project/lotus/pull/7194))
|
||||
- sealing: Check piece CIDs after AddPiece ([filecoin-project/lotus#7185](https://github.com/filecoin-project/lotus/pull/7185))
|
||||
- markets: OnDealExpiredOrSlashed - get deal by proposal instead of deal ID ([filecoin-project/lotus#5431](https://github.com/filecoin-project/lotus/pull/5431))
|
||||
- Incoming: improve a log message ([filecoin-project/lotus#7181](https://github.com/filecoin-project/lotus/pull/7181))
|
||||
- journal: make current log file have a fixed named (#7112) ([filecoin-project/lotus#7112](https://github.com/filecoin-project/lotus/pull/7112))
|
||||
- call string.Repeat always with positive int ([filecoin-project/lotus#7104](https://github. com/filecoin-project/lotus/pull/7104))
|
||||
- itests: support larger sector sizes; add large deal test. ([filecoin-project/lotus#7148](https://github.com/filecoin-project/lotus/pull/7148))
|
||||
- Ignore nil throttler ([filecoin-project/lotus#7169](https://github.com/filecoin-project/lotus/pull/7169))
|
||||
|
||||
## Bug Fixes
|
||||
|
||||
- fix: escape periods to match actual periods in version
|
||||
- fix bug for CommittedCapacitySectorLifetime ([filecoin-project/lotus#7337](https://github.com/filecoin-project/lotus/pull/7337))
|
||||
- fix a panic in HandleRecoverDealIDs ([filecoin-project/lotus#7336](https://github.com/filecoin-project/lotus/pull/7336))
|
||||
- fix index out of range ([filecoin-project/lotus#7273](https://github.com/filecoin-project/lotus/pull/7273))
|
||||
- fix: correctly handle null blocks when detecting an expensive fork ([filecoin-project/lotus#7210](https://github.com/filecoin-project/lotus/pull/7210))
|
||||
- fix: make lotus soup use the correct dependencies ([filecoin-project/lotus#7221](https://github.com/filecoin-project/lotus/pull/7221))
|
||||
- fix: init restore adds empty storage.json ([filecoin-project/lotus#7025](https://github.com/filecoin-project/lotus/pull/7025))
|
||||
- fix: disable broken testground integration test ([filecoin-project/lotus#7187](https://github.com/filecoin-project/lotus/pull/7187))
|
||||
- fix TestDealPublisher ([filecoin-project/lotus#7173](https://github.com/filecoin-project/lotus/pull/7173))
|
||||
- fix: make TestTimedCacheBlockstoreSimple pass reliably ([filecoin-project/lotus#7174](https://github.com/filecoin-project/lotus/pull/7174))
|
||||
- Fix throttling bug ([filecoin-project/lotus#7177](https://github.com/filecoin-project/lotus/pull/7177))
|
||||
- sealing: Fix sector state accounting with FinalizeEarly ([filecoin-project/lotus#7256](https://github.com/filecoin-project/lotus/pull/7256))
|
||||
- docker entrypoint.sh missing variable escape character ([filecoin-project/lotus#7291](https://github.com/filecoin-project/lotus/pull/7291))
|
||||
- sealing: Fix retry loop in SubmitCommitAggregate ([filecoin-project/lotus#7245](https://github.com/filecoin-project/lotus/pull/7245))
|
||||
- sectors expired: Handle precomitted and unproven sectors correctly ([filecoin-project/lotus#7236](https://github.com/filecoin-project/lotus/pull/7236))
|
||||
- stores: Fix reserved disk usage log spam ([filecoin-project/lotus#7233](https://github.com/filecoin-project/lotus/pull/7233))
|
||||
|
||||
|
||||
## Dependency Updates
|
||||
|
||||
- github.com/filecoin-project/go-fil-markets (v1.8.1 -> v1.12.0):
|
||||
- github.com/filecoin-project/go-data-transfer (v1.7.8 -> v1.10.1):
|
||||
- update to ffi to update-bellperson-proofs-v9-0-2 ([filecoin-project/lotus#7369](https://github.com/filecoin-project/lotus/pull/7369))
|
||||
- fix(deps): use go-graphsync v0.9.3 with hotfix
|
||||
- Update to unified go-graphsync v0.9.0 ([filecoin-project/lotus#7197](https://github.com/filecoin-project/lotus/pull/7197))
|
||||
|
||||
## Others
|
||||
|
||||
- v1.11.3-rc2 ([filecoin-project/lotus#7371](https://github.com/filecoin-project/lotus/pull/7371))
|
||||
- v1.11.3-rc1 ([filecoin-project/lotus#7299](https://github.com/filecoin-project/lotus/pull/7299))
|
||||
- Increase threshold from 0.5% to 1% ([filecoin-project/lotus#7262](https://github.com/filecoin-project/lotus/pull/7262))
|
||||
- ci: exclude cruft from code coverage ([filecoin-project/lotus#7189](https://github.com/filecoin-project/lotus/pull/7189))
|
||||
- Bump version to v1.11.3-dev ([filecoin-project/lotus#7180](https://github.com/filecoin-project/lotus/pull/7180))
|
||||
- test: disable flaky TestBatchDealInput ([filecoin-project/lotus#7176](https://github.com/filecoin-project/lotus/pull/7176))
|
||||
- Turn off patch ([filecoin-project/lotus#7172](https://github.com/filecoin-project/lotus/pull/7172))
|
||||
- test: disable flaky TestSimultaneousTransferLimit ([filecoin-project/lotus#7153](https://github.com/filecoin-project/lotus/pull/7153))
|
||||
|
||||
|
||||
## Contributors
|
||||
|
||||
| Contributor | Commits | Lines ± | Files Changed |
|
||||
|-------------|---------|---------|---------------|
|
||||
| @magik6k | 39 | +3311/-1825 | 179 |
|
||||
| @Stebalien | 23 | +1935/-1417 | 84 |
|
||||
| @dirkmc | 12 | +921/-732 | 111 |
|
||||
| @dirkmc | 12 | +663/-790 | 30 |
|
||||
| @hannahhoward | 3 | +482/-275 | 46 |
|
||||
| @travisperson | 1 | +317/-65 | 5 |
|
||||
| @jennijuju | 11 | +223/-126 | 24 |
|
||||
| @hannahhoward | 7 | +257/-55 | 16 |
|
||||
| @nonsense| 9 | +258/-37 | 19 |
|
||||
| @raulk | 4 | +127/-36 | 13 |
|
||||
| @raulk | 1 | +43/-60 | 15 |
|
||||
| @arajasek | 4 | +74/-8 | 10 |
|
||||
| @Frank | 2 | +68/-8 | 3 |
|
||||
| @placer14| 2 | +52/-1 | 4 |
|
||||
| @ldoublewood | 2 | +15/-13 | 3 |
|
||||
| @lanzafame | 1 | +16/-2 | 1 |
|
||||
| @aarshkshah1992 | 2 | +11/-6 | 2 |
|
||||
| @ZenGround0 | 2 | +7/-6 | 2 |
|
||||
| @ognots | 1 | +0/-10 | 2 |
|
||||
| @KAYUII | 2 | +4/-4 | 2 |
|
||||
| @lanzafame | 1 | +6/-0 | 1 |
|
||||
| @jacobheun | 1 | +3/-3 | 1 |
|
||||
| @frank | 1 | +4/-0 | 1 |
|
||||
|
||||
|
||||
# v1.11.2 / 2021-09-06
|
||||
|
||||
lotus v1.11.2 is a feature release that's **highly recommended ALL lotus users to upgrade**, including node operators,
|
||||
storage providers and clients.
|
||||
|
||||
## Highlights
|
||||
- 🌟🌟🌟 Introduce Dagstore and CARv2 for deal-making (#6671) ([filecoin-project/lotus#6671](https://github.com/filecoin-project/lotus/pull/6671))
|
||||
- **[lotus miner markets' Dagstore](https://docs.filecoin.io/mine/lotus/dagstore/#conceptual-overview)** is a
|
||||
component of the `markets` subsystem in lotus-miner. It is a sharded store to hold large IPLD graphs efficiently,
|
||||
packaged as location-transparent attachable CAR files and it replaces the former Badger staging blockstore. It
|
||||
is designed to provide high efficiency and throughput, and minimize resource utilization during deal-making operations.
|
||||
The dagstore also leverages the indexing features of [CARv2](https://github.com/ipld/ipld/blob/master/specs/transport/car/carv2/index.md) to enable plan CAR files to act as read and write
|
||||
blockstores, which are served as the direct medium for data exchanges in markets for both storage and retrieval
|
||||
deal making without requiring intermediate buffers.
|
||||
- In the future, lotus will leverage and interact with Dagstore a lot for new features and improvements for deal
|
||||
making, therefore, it's highly recommended to lotus users to go through [Lotus Miner: About the markets dagstore](https://docs.filecoin.io/mine/lotus/dagstore/#conceptual-overview) thoroughly to learn more about Dagstore's
|
||||
conceptual overview, terminology, directory structure, configuration and so on.
|
||||
- **Note**:
|
||||
- When you first start your lotus-miner or market subsystem with this release, a one-time/first-time **dagstore migration** will be triggered which replaces the former Badger staging blockstore with dagstore. We highly
|
||||
recommend storage providers to read this [section](https://docs.filecoin.io/mine/lotus/dagstore/#first-time-migration) to learn more about
|
||||
what the process does, what to expect and how monitor it.
|
||||
- It is highly recommended to **wait all ongoing data transfer to finish or cancel inbound storage deals that
|
||||
are still transferring**, using the `lotus-miner data-transfers cancel` command before upgrade your market nodes. Reason being that the new dagstore changes attributes in the internal deal state objects, and the paths to the staging CARs where the deal data was being placed will be lost.
|
||||
- ‼️Having your dags initialized will become important in the near feature for you to provide a better storage
|
||||
and retrieval service. We'd suggest you to start [forced bulk initialization] soon if possible as this process
|
||||
places relatively high IP workload on your storage system and is better to be carried out gradually and over a
|
||||
longer timeframe. Read how to do properly perform a force bulk initialization [here](https://docs.filecoin.io/mine/lotus/dagstore/#forcing-bulk-initialization).
|
||||
- ⏮ Rollback Alert(from v1.11.2-rcX to any version lower): If a storages deal is initiated with M1/v1.11.2(-rcX)
|
||||
release, it needs to get to the `StorageDealAwaitingPrecommit` state before you can do a version rollback or the markets process may panic.
|
||||
- 💙 **Special thanks to [MinerX fellows for testing and providing valuable feedbacks](https://github.com/filecoin-project/lotus/discussions/6852) for Dagstore in the past month!**
|
||||
- 🌟🌟 rpcenc: Support reader redirect ([filecoin-project/lotus#6952](https://github.com/filecoin-project/lotus/pull/6952))
|
||||
- This allows market processes to send piece bytes directly to workers involved on `AddPiece`.
|
||||
- Extending sectors: more practical and flexible tools ([filecoin-project/lotus#6097](https://github.com/filecoin-project/lotus/pull/6097))
|
||||
- `lotus-miner sectors check-expire` to inspect expiring sectors.
|
||||
- `lotus-miner sectors renew` for renewing expiring sectors, see the command help menu for customizable option
|
||||
like `extension`, `new-expiration` and so on.
|
||||
- ‼️ MpoolReplaceCmd ( lotus mpool replace`) now takes FIL for fee-limit ([filecoin-project/lotus#6927](https://github.com/filecoin-project/lotus/pull/6927))
|
||||
- Drop townhall/chainwatch ([filecoin-project/lotus#6912](https://github.com/filecoin-project/lotus/pull/6912))
|
||||
- ChainWatch is no longer supported by lotus.
|
||||
- Configurable CC Sector Expiration ([filecoin-project/lotus#6803](https://github.com/filecoin-project/lotus/pull/6803))
|
||||
- Set `CommittedCapacitySectorLifetime` in lotus-miner/config.toml to specify the default expiration for a new CC
|
||||
sector, value must be between 180-540 days inclusive.
|
||||
|
||||
## New Features
|
||||
- api/command for encoding actor params ([filecoin-project/lotus#7150](https://github.com/filecoin-project/lotus/pull/7150))
|
||||
- shed: Support raw encoding in cid id ([filecoin-project/lotus#7149](https://github.com/filecoin-project/lotus/pull/7149))
|
||||
- feat(miner deals): create subdir to miner repo for staged deals ([filecoin-project/lotus#6853](https://github.com/filecoin-project/lotus/pull/6853))
|
||||
- Support --actor in miner actor control list ([filecoin-project/lotus#7027](https://github.com/filecoin-project/lotus/pull/7027))
|
||||
- Shed: Include network name in genesis-verify ([filecoin-project/lotus#7019](https://github.com/filecoin-project/lotus/pull/7019))
|
||||
- feat: add ChainGetTipSetAfterHeight ([filecoin-project/lotus#6990](https://github.com/filecoin-project/lotus/pull/6990))
|
||||
- lotus-shed splitstore clear command ([filecoin-project/lotus#6967](https://github.com/filecoin-project/lotus/pull/6967))
|
||||
|
||||
## Improvements
|
||||
- improve get api error messages ([filecoin-project/lotus#7088](https://github.com/filecoin-project/lotus/pull/7088))
|
||||
- Strict major minor version checking on v0 and v1 apis ([filecoin-project/lotus#7038](https://github.com/filecoin-project/lotus/pull/7038))
|
||||
- make lotus-miner net commands hit markets subsystem. ([filecoin-project/lotus#7042](https://github.com/filecoin-project/lotus/pull/7042))
|
||||
- Test with latest actors version ([filecoin-project/lotus#6998](https://github.com/filecoin-project/lotus/pull/6998))
|
||||
- Reduce splitstore memory usage during chain walks ([filecoin-project/lotus#6949](https://github.com/filecoin-project/lotus/pull/6949))
|
||||
- Remove forgotten non-functioning config from the pre-mainnet days ([filecoin-project/lotus#6970](https://github.com/filecoin-project/lotus/pull/6970))
|
||||
- add explicit error msg if repo dir does not exist ([filecoin-project/lotus#6909](https://github.com/filecoin-project/lotus/pull/6909))
|
||||
- Test/pledge batching msg prop ([filecoin-project/lotus#6537](https://github.com/filecoin-project/lotus/pull/6537))
|
||||
- reasonable max value for initial sector expiration ([filecoin-project/lotus#6099](https://github.com/filecoin-project/lotus/pull/6099))
|
||||
- support MARKETS_API_INFO env var, and markets-repo, markets-api-url CLI flags. ([filecoin-project/lotus#6936](https://github.com/filecoin-project/lotus/pull/6936))
|
||||
- Improve formatting of workers CLI ([filecoin-project/lotus#6942](https://github.com/filecoin-project/lotus/pull/6942))
|
||||
- make: set default GOCC earlier ([filecoin-project/lotus#6932](https://github.com/filecoin-project/lotus/pull/6932))
|
||||
- Moving GC Followup ([filecoin-project/lotus#6905](https://github.com/filecoin-project/lotus/pull/6905))
|
||||
- Log more call context during errors ([filecoin-project/lotus#6918](https://github.com/filecoin-project/lotus/pull/6918))
|
||||
- polish(errors): better state tree errors ([filecoin-project/lotus#6923](https://github.com/filecoin-project/lotus/pull/6923))
|
||||
- adding an RuntimeSubsystems API to storage miner; fix `lotus-miner info` ([filecoin-project/lotus#6906](https://github.com/filecoin-project/lotus/pull/6906))
|
||||
- Reduce entropy in the chain package ([filecoin-project/lotus#6889](https://github.com/filecoin-project/lotus/pull/6889))
|
||||
- make: Allow setting Go compiler with GOCC ([filecoin-project/lotus#6911](https://github.com/filecoin-project/lotus/pull/6911))
|
||||
|
||||
## Bug Fixes
|
||||
- sealing: Fix RecoverDealIDs loop with changed PieceCID ([filecoin-project/lotus#7117](https://github.com/filecoin-project/lotus/pull/7117))
|
||||
- Fix error handling in SectorAddPieceToAny api impl ([filecoin-project/lotus#7135](https://github.com/filecoin-project/lotus/pull/7135))
|
||||
- add rice box to required binaries ([filecoin-project/lotus#7125](https://github.com/filecoin-project/lotus/pull/7125))
|
||||
- fix(miner): always create miner deal staging directory (#7098) ([filecoin-project/lotus#7098](https://github.com/filecoin-project/lotus/pull/7098))
|
||||
- fix build after merging #6097. (#7096) ([filecoin-project/lotus#7096](https://github.com/filecoin-project/lotus/pull/7096))
|
||||
- fix: don't check for t_aux when proving ([filecoin-project/lotus#7011](https://github.com/filecoin-project/lotus/pull/7011))
|
||||
- fix: vet actors shims ([filecoin-project/lotus#6999](https://github.com/filecoin-project/lotus/pull/6999))
|
||||
- fix: more logging in maybeStartBatch error ([filecoin-project/lotus#6996](https://github.com/filecoin-project/lotus/pull/6996))
|
||||
- fix flaky TestDealPublisher and re-enable ([filecoin-project/lotus#6991](https://github.com/filecoin-project/lotus/pull/6991))
|
||||
- fix skipCount ([filecoin-project/lotus#6940](https://github.com/filecoin-project/lotus/pull/6940))
|
||||
- fix bug in MpoolPending message exclusion ([filecoin-project/lotus#6945](https://github.com/filecoin-project/lotus/pull/6945))
|
||||
- PreCommitPolicy: Don't try to align expirations on proving period boundaries ([filecoin-project/lotus#7018](https://github.com/filecoin-project/lotus/pull/7018))
|
||||
- make: fix version check when using gotip ([filecoin-project/lotus#6916](https://github.com/filecoin-project/lotus/pull/6916))
|
||||
- fix ticket check ([filecoin-project/lotus#6882](https://github.com/filecoin-project/lotus/pull/6882))
|
||||
|
||||
## Dependency Updates
|
||||
- github.com/filecoin-project/go-data-transfer (v1.7.2 -> v1.7.8):
|
||||
- github.com/filecoin-project/go-fil-markets (v1.6.2 -> v1.8.1):
|
||||
- update go-libp2p-pubsub to v0.5.4 ([filecoin-project/lotus#6958](https://github.com/filecoin-project/lotus/pull/6958))
|
||||
- integrate the proof patch: tag proofs-v9-revert-deps-hotfix
|
||||
- Update markets, dt and graphsync ([filecoin-project/lotus#7160](https://github.com/filecoin-project/lotus/pull/7160))
|
||||
- Remove replace directive for multihash dep (#7113) ([filecoin-project/lotus#7113](https://github.com/filecoin-project/lotus/pull/7113))
|
||||
- upgrade upstream dependencies. ([filecoin-project/lotus#7115](https://github.com/filecoin-project/lotus/pull/7115))
|
||||
- Update to latest FFI ([filecoin-project/lotus#7110](https://github.com/filecoin-project/lotus/pull/7110))
|
||||
- Update state machine deps for logging ([filecoin-project/lotus#6941](https://github.com/filecoin-project/lotus/pull/6941))
|
||||
- Update deps for more logging in data transfer and markets ([filecoin-project/lotus#6937](https://github.com/filecoin-project/lotus/pull/6937))
|
||||
- Update to branches with improved logging ([filecoin-project/lotus#6919](https://github.com/filecoin-project/lotus/pull/6919))
|
||||
- update go-libp2p-pubsub to v0.5.3 ([filecoin-project/lotus#6907](https://github.com/filecoin-project/lotus/pull/6907))
|
||||
|
||||
## Others
|
||||
- Fix nits and see if codecov works now with auto ([filecoin-project/lotus#7151](https://github.com/filecoin-project/lotus/pull/7151))
|
||||
- Codecov Projects ([filecoin-project/lotus#7147](https://github.com/filecoin-project/lotus/pull/7147))
|
||||
- remove m1 templates and make area selection multi-optionable ([filecoin-project/lotus#7121](https://github.com/filecoin-project/lotus/pull/7121))
|
||||
- release -> master ([filecoin-project/lotus#7105](https://github.com/filecoin-project/lotus/pull/7105))
|
||||
- Lotus release process - how we make releases ([filecoin-project/lotus#6944](https://github.com/filecoin-project/lotus/pull/6944))
|
||||
- codecov: fix mock name ([filecoin-project/lotus#7039](https://github.com/filecoin-project/lotus/pull/7039))
|
||||
- codecov: fix regexes ([filecoin-project/lotus#7037](https://github.com/filecoin-project/lotus/pull/7037))
|
||||
- chore: disable flaky test ([filecoin-project/lotus#6957](https://github.com/filecoin-project/lotus/pull/6957))
|
||||
- set buildtype in nerpa and butterfly ([filecoin-project/lotus#6085](https://github.com/filecoin-project/lotus/pull/6085))
|
||||
- release v1.11.1 backport -> master ([filecoin-project/lotus#6929](https://github.com/filecoin-project/lotus/pull/6929))
|
||||
- chore: fixup issue templates ([filecoin-project/lotus#6899](https://github.com/filecoin-project/lotus/pull/6899))
|
||||
- bump master version to v1.11.2-dev ([filecoin-project/lotus#6903](https://github.com/filecoin-project/lotus/pull/6903))
|
||||
- releases -> master for v1.11.0 ([filecoin-project/lotus#6894](https://github.com/filecoin-project/lotus/pull/6894))
|
||||
|
||||
|
||||
Contributors
|
||||
|
||||
| Contributor | Commits | Lines ± | Files Changed |
|
||||
|-------------|---------|---------|---------------|
|
||||
| @magik6k | 23 | +5040/-8389 | 114 |
|
||||
| @aarshkshah1992 | 11 | +4859/-1078 | 101 |
|
||||
| @raulk | 5 | +4170/-1662 | 104 |
|
||||
| @vyzo | 30 | +1092/-702 | 49 |
|
||||
| @nonsense | 6 | +630/-472 | 19 |
|
||||
| @ZenGround0 | 31 | +556/-274 | 74 |
|
||||
| @He Weidong | 16 | +680/-128 | 16 |
|
||||
| @raulk | 16 | +444/-277 | 49 |
|
||||
| @Stebalien | 11 | +403/-259 | 48 |
|
||||
| @jennijuju| 17 | +276/-281 | 42 |
|
||||
| @dirkmc | 5 | +204/-138 | 20 |
|
||||
| @placer14 | 7 | +178/-77 | 17 |
|
||||
| @BlocksOnAChain | 1 | +138/-0 | 1 |
|
||||
| @Frrist | 1 | +63/-56 | 2 |
|
||||
| @arajasek | 7 | +74/-42 | 13 |
|
||||
| @frrist | 2 | +67/-6 | 6 |
|
||||
| @hannahhoward | 2 | +13/-11 | 3 |
|
||||
| @coryschwartz | 1 | +16/-6 | 3 |
|
||||
| @whyrusleeping | 1 | +7/-7 | 1 |
|
||||
| @hunjixin | 1 | +8/-6 | 1 |
|
||||
| @aarshkshah1992 | 1 | +6/-6 | 2 |
|
||||
| @dirkmc | 2 | +8/-0 | 2 |
|
||||
| @mx | 2 | +6/-1 | 2 |
|
||||
| @travisperson | 1 | +3/-2 | 1 |
|
||||
| @jennijuju | 2 | +2/-2 | 2 |
|
||||
| @ribasushi | 1 | +1/-2 | 2 |
|
||||
|
||||
# 1.11.1 / 2021-08-16
|
||||
|
||||
> Note: for discussion about this release, please comment [here](https://github.com/filecoin-project/lotus/discussions/6904)
|
||||
|
8
Makefile
8
Makefile
@ -76,9 +76,6 @@ debug: build-devnets
|
||||
calibnet: GOFLAGS+=-tags=calibnet
|
||||
calibnet: build-devnets
|
||||
|
||||
nerpanet: GOFLAGS+=-tags=nerpanet
|
||||
nerpanet: build-devnets
|
||||
|
||||
butterflynet: GOFLAGS+=-tags=butterflynet
|
||||
butterflynet: build-devnets
|
||||
|
||||
@ -294,6 +291,7 @@ method-gen: api-gen
|
||||
(cd ./lotuspond/front/src/chain && $(GOCC) run ./methodgen.go)
|
||||
|
||||
actors-gen:
|
||||
$(GOCC) run ./gen/inline-gen . gen/inlinegen-data.json
|
||||
$(GOCC) run ./chain/actors/agen
|
||||
$(GOCC) fmt ./...
|
||||
|
||||
@ -344,7 +342,7 @@ docsgen-openrpc-worker: docsgen-openrpc-bin
|
||||
.PHONY: docsgen docsgen-md-bin docsgen-openrpc-bin
|
||||
|
||||
gen: actors-gen type-gen method-gen cfgdoc-gen docsgen api-gen circleci
|
||||
@echo ">>> IF YOU'VE MODIFIED THE CLI, REMEMBER TO ALSO MAKE docsgen-cli"
|
||||
@echo ">>> IF YOU'VE MODIFIED THE CLI OR CONFIG, REMEMBER TO ALSO MAKE docsgen-cli"
|
||||
.PHONY: gen
|
||||
|
||||
snap: lotus lotus-miner lotus-worker
|
||||
@ -354,6 +352,8 @@ snap: lotus lotus-miner lotus-worker
|
||||
# separate from gen because it needs binaries
|
||||
docsgen-cli: lotus lotus-miner lotus-worker
|
||||
python ./scripts/generate-lotus-cli.py
|
||||
./lotus config default > documentation/en/default-lotus-config.toml
|
||||
./lotus-miner config default > documentation/en/default-lotus-miner-config.toml
|
||||
.PHONY: docsgen-cli
|
||||
|
||||
print-%:
|
||||
|
@ -123,7 +123,6 @@ Note: The default branch `master` is the dev branch where the latest new feature
|
||||
|
||||
# Or to join a testnet or devnet:
|
||||
make clean calibnet # Calibration with min 32GiB sectors
|
||||
make clean nerpanet # Nerpa with min 512MiB sectors
|
||||
|
||||
sudo make install
|
||||
```
|
||||
|
@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||
"github.com/filecoin-project/lotus/journal/alerting"
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
||||
@ -33,6 +34,10 @@ type Common interface {
|
||||
LogList(context.Context) ([]string, error) //perm:write
|
||||
LogSetLevel(context.Context, string, string) error //perm:write
|
||||
|
||||
// LogAlerts returns list of all, active and inactive alerts tracked by the
|
||||
// node
|
||||
LogAlerts(ctx context.Context) ([]alerting.Alert, error) //perm:admin
|
||||
|
||||
// MethodGroup: Common
|
||||
|
||||
// Version provides information about API provider
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
textselector "github.com/ipld/go-ipld-selector-text-lite"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
@ -72,12 +73,6 @@ type FullNode interface {
|
||||
// ChainHead returns the current head of the chain.
|
||||
ChainHead(context.Context) (*types.TipSet, error) //perm:read
|
||||
|
||||
// ChainGetRandomnessFromTickets is used to sample the chain for randomness.
|
||||
ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) //perm:read
|
||||
|
||||
// ChainGetRandomnessFromBeacon is used to sample the beacon for randomness.
|
||||
ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) //perm:read
|
||||
|
||||
// ChainGetBlock returns the block specified by the given CID.
|
||||
ChainGetBlock(context.Context, cid.Cid) (*types.BlockHeader, error) //perm:read
|
||||
// ChainGetTipSet returns the tipset specified by the given TipSetKey.
|
||||
@ -296,7 +291,7 @@ type FullNode interface {
|
||||
|
||||
// // UX ?
|
||||
|
||||
// MethodGroup: Wallet
|
||||
// MethodGroup: WalletF
|
||||
|
||||
// WalletNew creates a new address in the wallet with the given sigType.
|
||||
// Available key types: bls, secp256k1, secp256k1-ledger
|
||||
@ -591,6 +586,11 @@ type FullNode interface {
|
||||
// StateNetworkVersion returns the network version at the given tipset
|
||||
StateNetworkVersion(context.Context, types.TipSetKey) (apitypes.NetworkVersion, error) //perm:read
|
||||
|
||||
// StateGetRandomnessFromTickets is used to sample the chain for randomness.
|
||||
StateGetRandomnessFromTickets(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) //perm:read
|
||||
// StateGetRandomnessFromBeacon is used to sample the beacon for randomness.
|
||||
StateGetRandomnessFromBeacon(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) //perm:read
|
||||
|
||||
// MethodGroup: Msig
|
||||
// The Msig methods are used to interact with multisig wallets on the
|
||||
// filecoin network
|
||||
@ -931,9 +931,10 @@ type MarketDeal struct {
|
||||
|
||||
type RetrievalOrder struct {
|
||||
// TODO: make this less unixfs specific
|
||||
Root cid.Cid
|
||||
Piece *cid.Cid
|
||||
Size uint64
|
||||
Root cid.Cid
|
||||
Piece *cid.Cid
|
||||
DatamodelPathSelector *textselector.Expression
|
||||
Size uint64
|
||||
|
||||
FromLocalCAR string // if specified, get data from a local CARv2 file.
|
||||
// TODO: support offset
|
||||
|
@ -33,6 +33,7 @@ type Gateway interface {
|
||||
ChainHead(ctx context.Context) (*types.TipSet, error)
|
||||
ChainGetBlockMessages(context.Context, cid.Cid) (*BlockMessages, error)
|
||||
ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error)
|
||||
ChainGetPath(ctx context.Context, from, to types.TipSetKey) ([]*HeadChange, error)
|
||||
ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error)
|
||||
ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error)
|
||||
ChainGetTipSetAfterHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error)
|
||||
|
@ -166,6 +166,7 @@ type StorageMiner interface {
|
||||
MarketCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
|
||||
MarketPendingDeals(ctx context.Context) (PendingDealInfo, error) //perm:write
|
||||
MarketPublishPendingDeals(ctx context.Context) error //perm:admin
|
||||
MarketRetryPublishDeal(ctx context.Context, propcid cid.Cid) error //perm:admin
|
||||
|
||||
// DagstoreListShards returns information about all shards known to the
|
||||
// DAG store. Only available on nodes running the markets subsystem.
|
||||
@ -267,6 +268,11 @@ type SectorLog struct {
|
||||
Message string
|
||||
}
|
||||
|
||||
type SectorPiece struct {
|
||||
Piece abi.PieceInfo
|
||||
DealInfo *PieceDealInfo // nil for pieces which do not appear in deals (e.g. filler pieces)
|
||||
}
|
||||
|
||||
type SectorInfo struct {
|
||||
SectorID abi.SectorNumber
|
||||
State SectorState
|
||||
@ -274,6 +280,7 @@ type SectorInfo struct {
|
||||
CommR *cid.Cid
|
||||
Proof []byte
|
||||
Deals []abi.DealID
|
||||
Pieces []SectorPiece
|
||||
Ticket SealTicket
|
||||
Seed SealSeed
|
||||
PreCommitMsg *cid.Cid
|
||||
|
@ -13,10 +13,8 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
"github.com/filecoin-project/go-multistore"
|
||||
"github.com/google/uuid"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-filestore"
|
||||
"github.com/libp2p/go-libp2p-core/metrics"
|
||||
"github.com/libp2p/go-libp2p-core/network"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
@ -25,9 +23,10 @@ import (
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer"
|
||||
filestore2 "github.com/filecoin-project/go-fil-markets/filestore"
|
||||
filestore "github.com/filecoin-project/go-fil-markets/filestore"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||
textselector "github.com/ipld/go-ipld-selector-text-lite"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
@ -90,8 +89,8 @@ func init() {
|
||||
addExample(pid)
|
||||
addExample(&pid)
|
||||
|
||||
multistoreIDExample := multistore.StoreID(50)
|
||||
storeIDExample := imports.ID(50)
|
||||
textSelExample := textselector.Expression("Links/21/Hash/Links/42/Hash")
|
||||
|
||||
addExample(bitfield.NewFromSet([]uint64{5}))
|
||||
addExample(abi.RegisteredSealProof_StackedDrg32GiBV1_1)
|
||||
@ -110,7 +109,6 @@ func init() {
|
||||
addExample(abi.UnpaddedPieceSize(1024))
|
||||
addExample(abi.UnpaddedPieceSize(1024).Padded())
|
||||
addExample(abi.DealID(5432))
|
||||
addExample(filestore.StatusFileChanged)
|
||||
addExample(abi.SectorNumber(9))
|
||||
addExample(abi.SectorSize(32 * 1024 * 1024 * 1024))
|
||||
addExample(api.MpoolChange(0))
|
||||
@ -124,10 +122,9 @@ func init() {
|
||||
addExample(datatransfer.Ongoing)
|
||||
addExample(storeIDExample)
|
||||
addExample(&storeIDExample)
|
||||
addExample(multistoreIDExample)
|
||||
addExample(&multistoreIDExample)
|
||||
addExample(retrievalmarket.ClientEventDealAccepted)
|
||||
addExample(retrievalmarket.DealStatusNew)
|
||||
addExample(&textSelExample)
|
||||
addExample(network.ReachabilityPublic)
|
||||
addExample(build.NewestNetworkVersion)
|
||||
addExample(map[string]int{"name": 42})
|
||||
@ -179,7 +176,7 @@ func init() {
|
||||
ExampleValues[reflect.TypeOf(struct{ A multiaddr.Multiaddr }{}).Field(0).Type] = maddr
|
||||
|
||||
// miner specific
|
||||
addExample(filestore2.Path(".lotusminer/fstmp123"))
|
||||
addExample(filestore.Path(".lotusminer/fstmp123"))
|
||||
si := uint64(12)
|
||||
addExample(&si)
|
||||
addExample(retrievalmarket.DealID(5))
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||
miner "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
types "github.com/filecoin-project/lotus/chain/types"
|
||||
alerting "github.com/filecoin-project/lotus/journal/alerting"
|
||||
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||
dtypes "github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
imports "github.com/filecoin-project/lotus/node/repo/imports"
|
||||
@ -299,36 +300,6 @@ func (mr *MockFullNodeMockRecorder) ChainGetPath(arg0, arg1, arg2 interface{}) *
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetPath", reflect.TypeOf((*MockFullNode)(nil).ChainGetPath), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// ChainGetRandomnessFromBeacon mocks base method.
|
||||
func (m *MockFullNode) ChainGetRandomnessFromBeacon(arg0 context.Context, arg1 types.TipSetKey, arg2 crypto.DomainSeparationTag, arg3 abi.ChainEpoch, arg4 []byte) (abi.Randomness, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ChainGetRandomnessFromBeacon", arg0, arg1, arg2, arg3, arg4)
|
||||
ret0, _ := ret[0].(abi.Randomness)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ChainGetRandomnessFromBeacon indicates an expected call of ChainGetRandomnessFromBeacon.
|
||||
func (mr *MockFullNodeMockRecorder) ChainGetRandomnessFromBeacon(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetRandomnessFromBeacon", reflect.TypeOf((*MockFullNode)(nil).ChainGetRandomnessFromBeacon), arg0, arg1, arg2, arg3, arg4)
|
||||
}
|
||||
|
||||
// ChainGetRandomnessFromTickets mocks base method.
|
||||
func (m *MockFullNode) ChainGetRandomnessFromTickets(arg0 context.Context, arg1 types.TipSetKey, arg2 crypto.DomainSeparationTag, arg3 abi.ChainEpoch, arg4 []byte) (abi.Randomness, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ChainGetRandomnessFromTickets", arg0, arg1, arg2, arg3, arg4)
|
||||
ret0, _ := ret[0].(abi.Randomness)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ChainGetRandomnessFromTickets indicates an expected call of ChainGetRandomnessFromTickets.
|
||||
func (mr *MockFullNodeMockRecorder) ChainGetRandomnessFromTickets(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetRandomnessFromTickets", reflect.TypeOf((*MockFullNode)(nil).ChainGetRandomnessFromTickets), arg0, arg1, arg2, arg3, arg4)
|
||||
}
|
||||
|
||||
// ChainGetTipSet mocks base method.
|
||||
func (m *MockFullNode) ChainGetTipSet(arg0 context.Context, arg1 types.TipSetKey) (*types.TipSet, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@ -995,6 +966,21 @@ func (mr *MockFullNodeMockRecorder) ID(arg0 interface{}) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ID", reflect.TypeOf((*MockFullNode)(nil).ID), arg0)
|
||||
}
|
||||
|
||||
// LogAlerts mocks base method.
|
||||
func (m *MockFullNode) LogAlerts(arg0 context.Context) ([]alerting.Alert, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "LogAlerts", arg0)
|
||||
ret0, _ := ret[0].([]alerting.Alert)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// LogAlerts indicates an expected call of LogAlerts.
|
||||
func (mr *MockFullNodeMockRecorder) LogAlerts(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LogAlerts", reflect.TypeOf((*MockFullNode)(nil).LogAlerts), arg0)
|
||||
}
|
||||
|
||||
// LogList mocks base method.
|
||||
func (m *MockFullNode) LogList(arg0 context.Context) ([]string, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@ -2275,6 +2261,36 @@ func (mr *MockFullNodeMockRecorder) StateGetActor(arg0, arg1, arg2 interface{})
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetActor", reflect.TypeOf((*MockFullNode)(nil).StateGetActor), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// StateGetRandomnessFromBeacon mocks base method.
|
||||
func (m *MockFullNode) StateGetRandomnessFromBeacon(arg0 context.Context, arg1 crypto.DomainSeparationTag, arg2 abi.ChainEpoch, arg3 []byte, arg4 types.TipSetKey) (abi.Randomness, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "StateGetRandomnessFromBeacon", arg0, arg1, arg2, arg3, arg4)
|
||||
ret0, _ := ret[0].(abi.Randomness)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// StateGetRandomnessFromBeacon indicates an expected call of StateGetRandomnessFromBeacon.
|
||||
func (mr *MockFullNodeMockRecorder) StateGetRandomnessFromBeacon(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetRandomnessFromBeacon", reflect.TypeOf((*MockFullNode)(nil).StateGetRandomnessFromBeacon), arg0, arg1, arg2, arg3, arg4)
|
||||
}
|
||||
|
||||
// StateGetRandomnessFromTickets mocks base method.
|
||||
func (m *MockFullNode) StateGetRandomnessFromTickets(arg0 context.Context, arg1 crypto.DomainSeparationTag, arg2 abi.ChainEpoch, arg3 []byte, arg4 types.TipSetKey) (abi.Randomness, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "StateGetRandomnessFromTickets", arg0, arg1, arg2, arg3, arg4)
|
||||
ret0, _ := ret[0].(abi.Randomness)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// StateGetRandomnessFromTickets indicates an expected call of StateGetRandomnessFromTickets.
|
||||
func (mr *MockFullNodeMockRecorder) StateGetRandomnessFromTickets(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetRandomnessFromTickets", reflect.TypeOf((*MockFullNode)(nil).StateGetRandomnessFromTickets), arg0, arg1, arg2, arg3, arg4)
|
||||
}
|
||||
|
||||
// StateListActors mocks base method.
|
||||
func (m *MockFullNode) StateListActors(arg0 context.Context, arg1 types.TipSetKey) ([]address.Address, error) {
|
||||
m.ctrl.T.Helper()
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
||||
"github.com/filecoin-project/lotus/journal/alerting"
|
||||
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||
@ -63,6 +64,8 @@ type CommonStruct struct {
|
||||
|
||||
Discover func(p0 context.Context) (apitypes.OpenRPCDocument, error) `perm:"read"`
|
||||
|
||||
LogAlerts func(p0 context.Context) ([]alerting.Alert, error) `perm:"admin"`
|
||||
|
||||
LogList func(p0 context.Context) ([]string, error) `perm:"write"`
|
||||
|
||||
LogSetLevel func(p0 context.Context, p1 string, p2 string) error `perm:"write"`
|
||||
@ -127,10 +130,6 @@ type FullNodeStruct struct {
|
||||
|
||||
ChainGetPath func(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*HeadChange, error) `perm:"read"`
|
||||
|
||||
ChainGetRandomnessFromBeacon func(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) `perm:"read"`
|
||||
|
||||
ChainGetRandomnessFromTickets func(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) `perm:"read"`
|
||||
|
||||
ChainGetTipSet func(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) `perm:"read"`
|
||||
|
||||
ChainGetTipSetAfterHeight func(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) `perm:"read"`
|
||||
@ -347,6 +346,10 @@ type FullNodeStruct struct {
|
||||
|
||||
StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) `perm:"read"`
|
||||
|
||||
StateGetRandomnessFromBeacon func(p0 context.Context, p1 crypto.DomainSeparationTag, p2 abi.ChainEpoch, p3 []byte, p4 types.TipSetKey) (abi.Randomness, error) `perm:"read"`
|
||||
|
||||
StateGetRandomnessFromTickets func(p0 context.Context, p1 crypto.DomainSeparationTag, p2 abi.ChainEpoch, p3 []byte, p4 types.TipSetKey) (abi.Randomness, error) `perm:"read"`
|
||||
|
||||
StateListActors func(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) `perm:"read"`
|
||||
|
||||
StateListMessages func(p0 context.Context, p1 *MessageMatch, p2 types.TipSetKey, p3 abi.ChainEpoch) ([]cid.Cid, error) `perm:"read"`
|
||||
@ -477,6 +480,8 @@ type GatewayStruct struct {
|
||||
|
||||
ChainGetMessage func(p0 context.Context, p1 cid.Cid) (*types.Message, error) ``
|
||||
|
||||
ChainGetPath func(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*HeadChange, error) ``
|
||||
|
||||
ChainGetTipSet func(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) ``
|
||||
|
||||
ChainGetTipSetAfterHeight func(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) ``
|
||||
@ -678,6 +683,8 @@ type StorageMinerStruct struct {
|
||||
|
||||
MarketRestartDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"`
|
||||
|
||||
MarketRetryPublishDeal func(p0 context.Context, p1 cid.Cid) error `perm:"admin"`
|
||||
|
||||
MarketSetAsk func(p0 context.Context, p1 types.BigInt, p2 types.BigInt, p3 abi.ChainEpoch, p4 abi.PaddedPieceSize, p5 abi.PaddedPieceSize) error `perm:"admin"`
|
||||
|
||||
MarketSetRetrievalAsk func(p0 context.Context, p1 *retrievalmarket.Ask) error `perm:"admin"`
|
||||
@ -946,6 +953,17 @@ func (s *CommonStub) Discover(p0 context.Context) (apitypes.OpenRPCDocument, err
|
||||
return *new(apitypes.OpenRPCDocument), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *CommonStruct) LogAlerts(p0 context.Context) ([]alerting.Alert, error) {
|
||||
if s.Internal.LogAlerts == nil {
|
||||
return *new([]alerting.Alert), ErrNotSupported
|
||||
}
|
||||
return s.Internal.LogAlerts(p0)
|
||||
}
|
||||
|
||||
func (s *CommonStub) LogAlerts(p0 context.Context) ([]alerting.Alert, error) {
|
||||
return *new([]alerting.Alert), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *CommonStruct) LogList(p0 context.Context) ([]string, error) {
|
||||
if s.Internal.LogList == nil {
|
||||
return *new([]string), ErrNotSupported
|
||||
@ -1155,28 +1173,6 @@ func (s *FullNodeStub) ChainGetPath(p0 context.Context, p1 types.TipSetKey, p2 t
|
||||
return *new([]*HeadChange), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ChainGetRandomnessFromBeacon(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) {
|
||||
if s.Internal.ChainGetRandomnessFromBeacon == nil {
|
||||
return *new(abi.Randomness), ErrNotSupported
|
||||
}
|
||||
return s.Internal.ChainGetRandomnessFromBeacon(p0, p1, p2, p3, p4)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ChainGetRandomnessFromBeacon(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) {
|
||||
return *new(abi.Randomness), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ChainGetRandomnessFromTickets(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) {
|
||||
if s.Internal.ChainGetRandomnessFromTickets == nil {
|
||||
return *new(abi.Randomness), ErrNotSupported
|
||||
}
|
||||
return s.Internal.ChainGetRandomnessFromTickets(p0, p1, p2, p3, p4)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ChainGetRandomnessFromTickets(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) {
|
||||
return *new(abi.Randomness), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ChainGetTipSet(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) {
|
||||
if s.Internal.ChainGetTipSet == nil {
|
||||
return nil, ErrNotSupported
|
||||
@ -2365,6 +2361,28 @@ func (s *FullNodeStub) StateGetActor(p0 context.Context, p1 address.Address, p2
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) StateGetRandomnessFromBeacon(p0 context.Context, p1 crypto.DomainSeparationTag, p2 abi.ChainEpoch, p3 []byte, p4 types.TipSetKey) (abi.Randomness, error) {
|
||||
if s.Internal.StateGetRandomnessFromBeacon == nil {
|
||||
return *new(abi.Randomness), ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateGetRandomnessFromBeacon(p0, p1, p2, p3, p4)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) StateGetRandomnessFromBeacon(p0 context.Context, p1 crypto.DomainSeparationTag, p2 abi.ChainEpoch, p3 []byte, p4 types.TipSetKey) (abi.Randomness, error) {
|
||||
return *new(abi.Randomness), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) StateGetRandomnessFromTickets(p0 context.Context, p1 crypto.DomainSeparationTag, p2 abi.ChainEpoch, p3 []byte, p4 types.TipSetKey) (abi.Randomness, error) {
|
||||
if s.Internal.StateGetRandomnessFromTickets == nil {
|
||||
return *new(abi.Randomness), ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateGetRandomnessFromTickets(p0, p1, p2, p3, p4)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) StateGetRandomnessFromTickets(p0 context.Context, p1 crypto.DomainSeparationTag, p2 abi.ChainEpoch, p3 []byte, p4 types.TipSetKey) (abi.Randomness, error) {
|
||||
return *new(abi.Randomness), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) StateListActors(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) {
|
||||
if s.Internal.StateListActors == nil {
|
||||
return *new([]address.Address), ErrNotSupported
|
||||
@ -3025,6 +3043,17 @@ func (s *GatewayStub) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Me
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) ChainGetPath(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*HeadChange, error) {
|
||||
if s.Internal.ChainGetPath == nil {
|
||||
return *new([]*HeadChange), ErrNotSupported
|
||||
}
|
||||
return s.Internal.ChainGetPath(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *GatewayStub) ChainGetPath(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*HeadChange, error) {
|
||||
return *new([]*HeadChange), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) ChainGetTipSet(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) {
|
||||
if s.Internal.ChainGetTipSet == nil {
|
||||
return nil, ErrNotSupported
|
||||
@ -3993,6 +4022,17 @@ func (s *StorageMinerStub) MarketRestartDataTransfer(p0 context.Context, p1 data
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) MarketRetryPublishDeal(p0 context.Context, p1 cid.Cid) error {
|
||||
if s.Internal.MarketRetryPublishDeal == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.MarketRetryPublishDeal(p0, p1)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) MarketRetryPublishDeal(p0 context.Context, p1 cid.Cid) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) MarketSetAsk(p0 context.Context, p1 types.BigInt, p2 types.BigInt, p3 abi.ChainEpoch, p4 abi.PaddedPieceSize, p5 abi.PaddedPieceSize) error {
|
||||
if s.Internal.MarketSetAsk == nil {
|
||||
return ErrNotSupported
|
||||
|
@ -598,6 +598,11 @@ type FullNode interface {
|
||||
// StateNetworkVersion returns the network version at the given tipset
|
||||
StateNetworkVersion(context.Context, types.TipSetKey) (apitypes.NetworkVersion, error) //perm:read
|
||||
|
||||
// StateGetRandomnessFromTickets is used to sample the chain for randomness.
|
||||
StateGetRandomnessFromTickets(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) //perm:read
|
||||
// StateGetRandomnessFromBeacon is used to sample the beacon for randomness.
|
||||
StateGetRandomnessFromBeacon(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) //perm:read
|
||||
|
||||
// MethodGroup: Msig
|
||||
// The Msig methods are used to interact with multisig wallets on the
|
||||
// filecoin network
|
||||
|
@ -267,6 +267,10 @@ type FullNodeStruct struct {
|
||||
|
||||
StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) `perm:"read"`
|
||||
|
||||
StateGetRandomnessFromBeacon func(p0 context.Context, p1 crypto.DomainSeparationTag, p2 abi.ChainEpoch, p3 []byte, p4 types.TipSetKey) (abi.Randomness, error) `perm:"read"`
|
||||
|
||||
StateGetRandomnessFromTickets func(p0 context.Context, p1 crypto.DomainSeparationTag, p2 abi.ChainEpoch, p3 []byte, p4 types.TipSetKey) (abi.Randomness, error) `perm:"read"`
|
||||
|
||||
StateGetReceipt func(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) `perm:"read"`
|
||||
|
||||
StateListActors func(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) `perm:"read"`
|
||||
@ -1742,6 +1746,28 @@ func (s *FullNodeStub) StateGetActor(p0 context.Context, p1 address.Address, p2
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) StateGetRandomnessFromBeacon(p0 context.Context, p1 crypto.DomainSeparationTag, p2 abi.ChainEpoch, p3 []byte, p4 types.TipSetKey) (abi.Randomness, error) {
|
||||
if s.Internal.StateGetRandomnessFromBeacon == nil {
|
||||
return *new(abi.Randomness), ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateGetRandomnessFromBeacon(p0, p1, p2, p3, p4)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) StateGetRandomnessFromBeacon(p0 context.Context, p1 crypto.DomainSeparationTag, p2 abi.ChainEpoch, p3 []byte, p4 types.TipSetKey) (abi.Randomness, error) {
|
||||
return *new(abi.Randomness), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) StateGetRandomnessFromTickets(p0 context.Context, p1 crypto.DomainSeparationTag, p2 abi.ChainEpoch, p3 []byte, p4 types.TipSetKey) (abi.Randomness, error) {
|
||||
if s.Internal.StateGetRandomnessFromTickets == nil {
|
||||
return *new(abi.Randomness), ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateGetRandomnessFromTickets(p0, p1, p2, p3, p4)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) StateGetRandomnessFromTickets(p0 context.Context, p1 crypto.DomainSeparationTag, p2 abi.ChainEpoch, p3 []byte, p4 types.TipSetKey) (abi.Randomness, error) {
|
||||
return *new(abi.Randomness), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) StateGetReceipt(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) {
|
||||
if s.Internal.StateGetReceipt == nil {
|
||||
return nil, ErrNotSupported
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||
miner "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
types "github.com/filecoin-project/lotus/chain/types"
|
||||
alerting "github.com/filecoin-project/lotus/journal/alerting"
|
||||
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||
dtypes "github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
imports "github.com/filecoin-project/lotus/node/repo/imports"
|
||||
@ -950,6 +951,21 @@ func (mr *MockFullNodeMockRecorder) ID(arg0 interface{}) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ID", reflect.TypeOf((*MockFullNode)(nil).ID), arg0)
|
||||
}
|
||||
|
||||
// LogAlerts mocks base method.
|
||||
func (m *MockFullNode) LogAlerts(arg0 context.Context) ([]alerting.Alert, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "LogAlerts", arg0)
|
||||
ret0, _ := ret[0].([]alerting.Alert)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// LogAlerts indicates an expected call of LogAlerts.
|
||||
func (mr *MockFullNodeMockRecorder) LogAlerts(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LogAlerts", reflect.TypeOf((*MockFullNode)(nil).LogAlerts), arg0)
|
||||
}
|
||||
|
||||
// LogList mocks base method.
|
||||
func (m *MockFullNode) LogList(arg0 context.Context) ([]string, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@ -2155,6 +2171,36 @@ func (mr *MockFullNodeMockRecorder) StateGetActor(arg0, arg1, arg2 interface{})
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetActor", reflect.TypeOf((*MockFullNode)(nil).StateGetActor), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// StateGetRandomnessFromBeacon mocks base method.
|
||||
func (m *MockFullNode) StateGetRandomnessFromBeacon(arg0 context.Context, arg1 crypto.DomainSeparationTag, arg2 abi.ChainEpoch, arg3 []byte, arg4 types.TipSetKey) (abi.Randomness, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "StateGetRandomnessFromBeacon", arg0, arg1, arg2, arg3, arg4)
|
||||
ret0, _ := ret[0].(abi.Randomness)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// StateGetRandomnessFromBeacon indicates an expected call of StateGetRandomnessFromBeacon.
|
||||
func (mr *MockFullNodeMockRecorder) StateGetRandomnessFromBeacon(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetRandomnessFromBeacon", reflect.TypeOf((*MockFullNode)(nil).StateGetRandomnessFromBeacon), arg0, arg1, arg2, arg3, arg4)
|
||||
}
|
||||
|
||||
// StateGetRandomnessFromTickets mocks base method.
|
||||
func (m *MockFullNode) StateGetRandomnessFromTickets(arg0 context.Context, arg1 crypto.DomainSeparationTag, arg2 abi.ChainEpoch, arg3 []byte, arg4 types.TipSetKey) (abi.Randomness, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "StateGetRandomnessFromTickets", arg0, arg1, arg2, arg3, arg4)
|
||||
ret0, _ := ret[0].(abi.Randomness)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// StateGetRandomnessFromTickets indicates an expected call of StateGetRandomnessFromTickets.
|
||||
func (mr *MockFullNodeMockRecorder) StateGetRandomnessFromTickets(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetRandomnessFromTickets", reflect.TypeOf((*MockFullNode)(nil).StateGetRandomnessFromTickets), arg0, arg1, arg2, arg3, arg4)
|
||||
}
|
||||
|
||||
// StateGetReceipt mocks base method.
|
||||
func (m *MockFullNode) StateGetReceipt(arg0 context.Context, arg1 cid.Cid, arg2 types.TipSetKey) (*types.MessageReceipt, error) {
|
||||
m.ctrl.T.Helper()
|
||||
|
@ -3,6 +3,8 @@ package v0api
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"golang.org/x/xerrors"
|
||||
@ -184,4 +186,12 @@ func (w *WrapperV1Full) MsigRemoveSigner(ctx context.Context, msig address.Addre
|
||||
return w.executePrototype(ctx, p)
|
||||
}
|
||||
|
||||
func (w *WrapperV1Full) ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) {
|
||||
return w.StateGetRandomnessFromTickets(ctx, personalization, randEpoch, entropy, tsk)
|
||||
}
|
||||
|
||||
func (w *WrapperV1Full) ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) {
|
||||
return w.StateGetRandomnessFromBeacon(ctx, personalization, randEpoch, entropy, tsk)
|
||||
}
|
||||
|
||||
var _ FullNode = &WrapperV1Full{}
|
||||
|
@ -54,7 +54,7 @@ func VersionForType(nodeType NodeType) (Version, error) {
|
||||
|
||||
// semver versions of the rpc api exposed
|
||||
var (
|
||||
FullAPIVersion0 = newVer(1, 3, 0)
|
||||
FullAPIVersion0 = newVer(1, 4, 0)
|
||||
FullAPIVersion1 = newVer(2, 1, 0)
|
||||
|
||||
MinerAPIVersion0 = newVer(1, 2, 0)
|
||||
|
@ -47,8 +47,12 @@ func (t *TimedCacheBlockstore) Start(_ context.Context) error {
|
||||
return fmt.Errorf("already started")
|
||||
}
|
||||
t.closeCh = make(chan struct{})
|
||||
|
||||
// Create this timer before starting the goroutine. Otherwise, creating the timer will race
|
||||
// with adding time to the mock clock, and we could add time _first_, then stall waiting for
|
||||
// a timer that'll never fire.
|
||||
ticker := t.clock.Ticker(t.interval)
|
||||
go func() {
|
||||
ticker := t.clock.Ticker(t.interval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
|
@ -1,2 +1,2 @@
|
||||
/dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWBbZd7Su9XfLUQ12RynGQ3ZmGY1nGqFntmqop9pLNJE6g
|
||||
/dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWGKRzEY4tJFTmAmrYUpa1CVVohmV9YjJbC9v5XWY2gUji
|
||||
/dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWBzv5sf4eTyo8cjJGfGnpxo6QkEPkRShG9GqjE2A5QaW5
|
||||
/dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWBo9TSD4XXRFtu6snv6QNYvXgRaSaVb116YiYEsDWgKtq
|
||||
|
@ -1,4 +0,0 @@
|
||||
/dns4/bootstrap-2.nerpa.interplanetary.dev/tcp/1347/p2p/12D3KooWQcL6ReWmR6ASWx4iT7EiAmxKDQpvgq1MKNTQZp5NPnWW
|
||||
/dns4/bootstrap-0.nerpa.interplanetary.dev/tcp/1347/p2p/12D3KooWGyJCwCm7EfupM15CFPXM4c7zRVHwwwjcuy9umaGeztMX
|
||||
/dns4/bootstrap-3.nerpa.interplanetary.dev/tcp/1347/p2p/12D3KooWNK9RmfksKXSCQj7ZwAM7L6roqbN4kwJteihq7yPvSgPs
|
||||
/dns4/bootstrap-1.nerpa.interplanetary.dev/tcp/1347/p2p/12D3KooWCWSaH6iUyXYspYxELjDfzToBsyVGVz3QvC7ysXv7wESo
|
Binary file not shown.
Binary file not shown.
6
build/limits.go
Normal file
6
build/limits.go
Normal file
@ -0,0 +1,6 @@
|
||||
package build
|
||||
|
||||
var (
|
||||
DefaultFDLimit uint64 = 16 << 10
|
||||
MinerFDLimit uint64 = 100_000
|
||||
)
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
183
build/panic_reporter.go
Normal file
183
build/panic_reporter.go
Normal file
@ -0,0 +1,183 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime/debug"
|
||||
"runtime/pprof"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/icza/backscanner"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
panicLog = logging.Logger("panic-reporter")
|
||||
defaultJournalTail = 500
|
||||
)
|
||||
|
||||
// PanicReportingPath is the name of the subdir created within the repoPath
|
||||
// path provided to GeneratePanicReport
|
||||
var PanicReportingPath = "panic-reports"
|
||||
|
||||
// PanicReportJournalTail is the number of lines captured from the end of
|
||||
// the lotus journal to be included in the panic report.
|
||||
var PanicReportJournalTail = defaultJournalTail
|
||||
|
||||
// GeneratePanicReport produces a timestamped dump of the application state
|
||||
// for inspection and debugging purposes. Call this function from any place
|
||||
// where a panic or severe error needs to be examined. `persistPath` is the
|
||||
// path where the reports should be saved. `repoPath` is the path where the
|
||||
// journal should be read from. `label` is an optional string to include
|
||||
// next to the report timestamp.
|
||||
func GeneratePanicReport(persistPath, repoPath, label string) {
|
||||
// make sure we always dump the latest logs on the way out
|
||||
// especially since we're probably panicking
|
||||
defer panicLog.Sync() //nolint:errcheck
|
||||
|
||||
if persistPath == "" && repoPath == "" {
|
||||
panicLog.Warn("missing persist and repo paths, aborting panic report creation")
|
||||
return
|
||||
}
|
||||
|
||||
reportPath := filepath.Join(repoPath, PanicReportingPath, generateReportName(label))
|
||||
if persistPath != "" {
|
||||
reportPath = filepath.Join(persistPath, generateReportName(label))
|
||||
}
|
||||
panicLog.Warnf("generating panic report at %s", reportPath)
|
||||
|
||||
tl := os.Getenv("LOTUS_PANIC_JOURNAL_LOOKBACK")
|
||||
if tl != "" && PanicReportJournalTail == defaultJournalTail {
|
||||
i, err := strconv.Atoi(tl)
|
||||
if err == nil {
|
||||
PanicReportJournalTail = i
|
||||
}
|
||||
}
|
||||
|
||||
err := os.MkdirAll(reportPath, 0755)
|
||||
if err != nil {
|
||||
panicLog.Error(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
writeAppVersion(filepath.Join(reportPath, "version"))
|
||||
writeStackTrace(filepath.Join(reportPath, "stacktrace.dump"))
|
||||
writeProfile("goroutines", filepath.Join(reportPath, "goroutines.pprof.gz"))
|
||||
writeProfile("heap", filepath.Join(reportPath, "heap.pprof.gz"))
|
||||
writeJournalTail(PanicReportJournalTail, repoPath, filepath.Join(reportPath, "journal.ndjson"))
|
||||
}
|
||||
|
||||
func writeAppVersion(file string) {
|
||||
f, err := os.Create(file)
|
||||
if err != nil {
|
||||
panicLog.Error(err.Error())
|
||||
}
|
||||
defer f.Close() //nolint:errcheck
|
||||
|
||||
versionString := []byte(BuildVersion + BuildTypeString() + CurrentCommit + "\n")
|
||||
if _, err := f.Write(versionString); err != nil {
|
||||
panicLog.Error(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func writeStackTrace(file string) {
|
||||
f, err := os.Create(file)
|
||||
if err != nil {
|
||||
panicLog.Error(err.Error())
|
||||
}
|
||||
defer f.Close() //nolint:errcheck
|
||||
|
||||
if _, err := f.Write(debug.Stack()); err != nil {
|
||||
panicLog.Error(err.Error())
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func writeProfile(profileType string, file string) {
|
||||
p := pprof.Lookup(profileType)
|
||||
if p == nil {
|
||||
panicLog.Warnf("%s profile not available", profileType)
|
||||
return
|
||||
}
|
||||
f, err := os.Create(file)
|
||||
if err != nil {
|
||||
panicLog.Error(err.Error())
|
||||
return
|
||||
}
|
||||
defer f.Close() //nolint:errcheck
|
||||
|
||||
if err := p.WriteTo(f, 0); err != nil {
|
||||
panicLog.Error(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func writeJournalTail(tailLen int, repoPath, file string) {
|
||||
if repoPath == "" {
|
||||
panicLog.Warn("repo path is empty, aborting copy of journal log")
|
||||
return
|
||||
}
|
||||
|
||||
f, err := os.Create(file)
|
||||
if err != nil {
|
||||
panicLog.Error(err.Error())
|
||||
return
|
||||
}
|
||||
defer f.Close() //nolint:errcheck
|
||||
|
||||
jPath, err := getLatestJournalFilePath(repoPath)
|
||||
if err != nil {
|
||||
panicLog.Warnf("failed getting latest journal: %s", err.Error())
|
||||
return
|
||||
}
|
||||
j, err := os.OpenFile(jPath, os.O_RDONLY, 0400)
|
||||
if err != nil {
|
||||
panicLog.Error(err.Error())
|
||||
return
|
||||
}
|
||||
js, err := j.Stat()
|
||||
if err != nil {
|
||||
panicLog.Error(err.Error())
|
||||
return
|
||||
}
|
||||
jScan := backscanner.New(j, int(js.Size()))
|
||||
linesWritten := 0
|
||||
for {
|
||||
if linesWritten > tailLen {
|
||||
break
|
||||
}
|
||||
line, _, err := jScan.LineBytes()
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
panicLog.Error(err.Error())
|
||||
}
|
||||
break
|
||||
}
|
||||
if _, err := f.Write(line); err != nil {
|
||||
panicLog.Error(err.Error())
|
||||
break
|
||||
}
|
||||
if _, err := f.Write([]byte("\n")); err != nil {
|
||||
panicLog.Error(err.Error())
|
||||
break
|
||||
}
|
||||
linesWritten++
|
||||
}
|
||||
}
|
||||
|
||||
func getLatestJournalFilePath(repoPath string) (string, error) {
|
||||
journalPath := filepath.Join(repoPath, "journal")
|
||||
entries, err := os.ReadDir(journalPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(journalPath, entries[len(entries)-1].Name()), nil
|
||||
}
|
||||
|
||||
func generateReportName(label string) string {
|
||||
label = strings.ReplaceAll(label, " ", "")
|
||||
return fmt.Sprintf("report_%s_%s", label, time.Now().Format("2006-01-02T150405"))
|
||||
}
|
@ -1,3 +1,4 @@
|
||||
//go:build debug || 2k
|
||||
// +build debug 2k
|
||||
|
||||
package build
|
||||
@ -9,12 +10,15 @@ import (
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
)
|
||||
|
||||
const BootstrappersFile = ""
|
||||
const GenesisFile = ""
|
||||
|
||||
const GenesisNetworkVersion = network.Version14
|
||||
|
||||
var UpgradeBreezeHeight = abi.ChainEpoch(-1)
|
||||
|
||||
const BreezeGasTampingDuration = 0
|
||||
@ -41,6 +45,8 @@ var UpgradeTurboHeight = abi.ChainEpoch(-15)
|
||||
|
||||
var UpgradeHyperdriveHeight = abi.ChainEpoch(-16)
|
||||
|
||||
var UpgradeChocolateHeight = abi.ChainEpoch(-17)
|
||||
|
||||
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
}
|
||||
@ -81,8 +87,10 @@ func init() {
|
||||
UpgradeNorwegianHeight = getUpgradeHeight("LOTUS_NORWEGIAN_HEIGHT", UpgradeNorwegianHeight)
|
||||
UpgradeTurboHeight = getUpgradeHeight("LOTUS_ACTORSV4_HEIGHT", UpgradeTurboHeight)
|
||||
UpgradeHyperdriveHeight = getUpgradeHeight("LOTUS_HYPERDRIVE_HEIGHT", UpgradeHyperdriveHeight)
|
||||
UpgradeChocolateHeight = getUpgradeHeight("LOTUS_CHOCOLATE_HEIGHT", UpgradeChocolateHeight)
|
||||
|
||||
BuildType |= Build2k
|
||||
|
||||
}
|
||||
|
||||
const BlockDelaySecs = uint64(4)
|
||||
|
@ -1,3 +1,4 @@
|
||||
//go:build butterflynet
|
||||
// +build butterflynet
|
||||
|
||||
package build
|
||||
@ -5,6 +6,7 @@ package build
|
||||
import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
"github.com/ipfs/go-cid"
|
||||
@ -14,6 +16,8 @@ var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
}
|
||||
|
||||
const GenesisNetworkVersion = network.Version13
|
||||
|
||||
const BootstrappersFile = "butterflynet.pi"
|
||||
const GenesisFile = "butterflynet.car"
|
||||
|
||||
@ -23,19 +27,20 @@ const UpgradeSmokeHeight = -2
|
||||
const UpgradeIgnitionHeight = -3
|
||||
const UpgradeRefuelHeight = -4
|
||||
|
||||
var UpgradeAssemblyHeight = abi.ChainEpoch(30)
|
||||
var UpgradeAssemblyHeight = abi.ChainEpoch(-5)
|
||||
|
||||
const UpgradeTapeHeight = 60
|
||||
const UpgradeLiftoffHeight = -5
|
||||
const UpgradeKumquatHeight = 90
|
||||
const UpgradeCalicoHeight = 120
|
||||
const UpgradePersianHeight = 150
|
||||
const UpgradeClausHeight = 180
|
||||
const UpgradeOrangeHeight = 210
|
||||
const UpgradeTrustHeight = 240
|
||||
const UpgradeNorwegianHeight = UpgradeTrustHeight + (builtin2.EpochsInHour * 12)
|
||||
const UpgradeTurboHeight = 8922
|
||||
const UpgradeHyperdriveHeight = 9999999
|
||||
const UpgradeTapeHeight = -6
|
||||
const UpgradeLiftoffHeight = -7
|
||||
const UpgradeKumquatHeight = -8
|
||||
const UpgradeCalicoHeight = -9
|
||||
const UpgradePersianHeight = -10
|
||||
const UpgradeClausHeight = -11
|
||||
const UpgradeOrangeHeight = -12
|
||||
const UpgradeTrustHeight = -13
|
||||
const UpgradeNorwegianHeight = -14
|
||||
const UpgradeTurboHeight = -15
|
||||
const UpgradeHyperdriveHeight = -16
|
||||
const UpgradeChocolateHeight = 6360
|
||||
|
||||
func init() {
|
||||
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2 << 30))
|
||||
|
@ -1,3 +1,4 @@
|
||||
//go:build calibnet
|
||||
// +build calibnet
|
||||
|
||||
package build
|
||||
@ -5,6 +6,7 @@ package build
|
||||
import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
"github.com/ipfs/go-cid"
|
||||
@ -14,6 +16,8 @@ var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
}
|
||||
|
||||
const GenesisNetworkVersion = network.Version0
|
||||
|
||||
const BootstrappersFile = "calibnet.pi"
|
||||
const GenesisFile = "calibnet.car"
|
||||
|
||||
@ -48,6 +52,8 @@ const UpgradeTurboHeight = 390
|
||||
|
||||
const UpgradeHyperdriveHeight = 420
|
||||
|
||||
const UpgradeChocolateHeight = 312746
|
||||
|
||||
func init() {
|
||||
policy.SetConsensusMinerMinPower(abi.NewStoragePower(32 << 30))
|
||||
policy.SetSupportedProofTypes(
|
||||
@ -60,6 +66,7 @@ func init() {
|
||||
Devnet = true
|
||||
|
||||
BuildType = BuildCalibnet
|
||||
|
||||
}
|
||||
|
||||
const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
|
||||
|
@ -1,3 +1,4 @@
|
||||
//go:build debug
|
||||
// +build debug
|
||||
|
||||
package build
|
||||
|
@ -1,3 +1,4 @@
|
||||
//go:build interopnet
|
||||
// +build interopnet
|
||||
|
||||
package build
|
||||
@ -10,14 +11,16 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
)
|
||||
|
||||
const BootstrappersFile = "interopnet.pi"
|
||||
const GenesisFile = "interopnet.car"
|
||||
|
||||
const GenesisNetworkVersion = network.Version13
|
||||
|
||||
var UpgradeBreezeHeight = abi.ChainEpoch(-1)
|
||||
|
||||
const BreezeGasTampingDuration = 0
|
||||
@ -43,6 +46,7 @@ var UpgradeNorwegianHeight = abi.ChainEpoch(-14)
|
||||
var UpgradeTurboHeight = abi.ChainEpoch(-15)
|
||||
|
||||
var UpgradeHyperdriveHeight = abi.ChainEpoch(-16)
|
||||
var UpgradeChocolateHeight = abi.ChainEpoch(-17)
|
||||
|
||||
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
@ -92,6 +96,7 @@ func init() {
|
||||
BuildType |= BuildInteropnet
|
||||
SetAddressNetwork(address.Testnet)
|
||||
Devnet = true
|
||||
|
||||
}
|
||||
|
||||
const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
|
||||
|
@ -1,10 +1,5 @@
|
||||
// +build !debug
|
||||
// +build !2k
|
||||
// +build !testground
|
||||
// +build !calibnet
|
||||
// +build !nerpanet
|
||||
// +build !butterflynet
|
||||
// +build !interopnet
|
||||
//go:build !debug && !2k && !testground && !calibnet && !nerpanet && !butterflynet && !interopnet
|
||||
// +build !debug,!2k,!testground,!calibnet,!nerpanet,!butterflynet,!interopnet
|
||||
|
||||
package build
|
||||
|
||||
@ -12,6 +7,8 @@ import (
|
||||
"math"
|
||||
"os"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
@ -22,6 +19,8 @@ var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
UpgradeSmokeHeight: DrandMainnet,
|
||||
}
|
||||
|
||||
const GenesisNetworkVersion = network.Version0
|
||||
|
||||
const BootstrappersFile = "mainnet.pi"
|
||||
const GenesisFile = "mainnet.car"
|
||||
|
||||
@ -65,13 +64,16 @@ const UpgradeTurboHeight = 712320
|
||||
// 2021-06-30T22:00:00Z
|
||||
var UpgradeHyperdriveHeight = abi.ChainEpoch(892800)
|
||||
|
||||
// 2021-10-26T13:30:00Z
|
||||
var UpgradeChocolateHeight = abi.ChainEpoch(1231620)
|
||||
|
||||
func init() {
|
||||
if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" {
|
||||
SetAddressNetwork(address.Mainnet)
|
||||
}
|
||||
|
||||
if os.Getenv("LOTUS_DISABLE_HYPERDRIVE") == "1" {
|
||||
UpgradeHyperdriveHeight = math.MaxInt64
|
||||
if os.Getenv("LOTUS_DISABLE_CHOCOLATE") == "1" {
|
||||
UpgradeChocolateHeight = math.MaxInt64
|
||||
}
|
||||
|
||||
Devnet = false
|
||||
|
@ -1,9 +1,11 @@
|
||||
//go:build nerpanet
|
||||
// +build nerpanet
|
||||
|
||||
package build
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
@ -14,6 +16,8 @@ var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
}
|
||||
|
||||
const GenesisNetworkVersion = network.Version0
|
||||
|
||||
const BootstrappersFile = "nerpanet.pi"
|
||||
const GenesisFile = "nerpanet.car"
|
||||
|
||||
@ -44,6 +48,8 @@ const UpgradeNorwegianHeight = 201000
|
||||
const UpgradeTurboHeight = 203000
|
||||
const UpgradeHyperdriveHeight = 379178
|
||||
|
||||
const UpgradeChocolateHeight = 999999999
|
||||
|
||||
func init() {
|
||||
// Minimum block production power is set to 4 TiB
|
||||
// Rationale is to discourage small-scale miners from trying to take over the network
|
||||
@ -68,6 +74,7 @@ func init() {
|
||||
Devnet = false
|
||||
|
||||
BuildType = BuildNerpanet
|
||||
|
||||
}
|
||||
|
||||
const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
|
||||
|
@ -1,3 +1,4 @@
|
||||
//go:build !testground
|
||||
// +build !testground
|
||||
|
||||
package build
|
||||
@ -25,7 +26,17 @@ const UnixfsLinksPerLevel = 1024
|
||||
// Consensus / Network
|
||||
|
||||
const AllowableClockDriftSecs = uint64(1)
|
||||
const NewestNetworkVersion = network.Version13
|
||||
|
||||
// TODO: This is still terrible...What's the impact of updating this before mainnet actually upgrades
|
||||
/* inline-gen template
|
||||
|
||||
const NewestNetworkVersion = network.Version{{.latestNetworkVersion}}
|
||||
|
||||
/* inline-gen start */
|
||||
|
||||
const NewestNetworkVersion = network.Version14
|
||||
|
||||
/* inline-gen end */
|
||||
|
||||
// Epochs
|
||||
const ForkLengthThreshold = Finality
|
||||
|
@ -1,3 +1,4 @@
|
||||
//go:build testground
|
||||
// +build testground
|
||||
|
||||
// This file makes hardcoded parameters (const) configurable as vars.
|
||||
@ -97,12 +98,15 @@ var (
|
||||
UpgradeNorwegianHeight abi.ChainEpoch = -13
|
||||
UpgradeTurboHeight abi.ChainEpoch = -14
|
||||
UpgradeHyperdriveHeight abi.ChainEpoch = -15
|
||||
UpgradeChocolateHeight abi.ChainEpoch = -16
|
||||
|
||||
DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
}
|
||||
|
||||
NewestNetworkVersion = network.Version11
|
||||
GenesisNetworkVersion = network.Version0
|
||||
|
||||
NewestNetworkVersion = network.Version14
|
||||
ActorUpgradeNetworkVersion = network.Version4
|
||||
|
||||
Devnet = true
|
||||
|
@ -1,4 +1,5 @@
|
||||
//+build tools
|
||||
//go:build tools
|
||||
// +build tools
|
||||
|
||||
package build
|
||||
|
||||
|
@ -12,11 +12,10 @@ const (
|
||||
BuildDebug = 0x3
|
||||
BuildCalibnet = 0x4
|
||||
BuildInteropnet = 0x5
|
||||
BuildNerpanet = 0x6
|
||||
BuildButterflynet = 0x7
|
||||
)
|
||||
|
||||
func buildType() string {
|
||||
func BuildTypeString() string {
|
||||
switch BuildType {
|
||||
case BuildDefault:
|
||||
return ""
|
||||
@ -30,8 +29,6 @@ func buildType() string {
|
||||
return "+calibnet"
|
||||
case BuildInteropnet:
|
||||
return "+interopnet"
|
||||
case BuildNerpanet:
|
||||
return "+nerpanet"
|
||||
case BuildButterflynet:
|
||||
return "+butterflynet"
|
||||
default:
|
||||
@ -40,12 +37,12 @@ func buildType() string {
|
||||
}
|
||||
|
||||
// BuildVersion is the local build version
|
||||
const BuildVersion = "1.11.2-dev"
|
||||
const BuildVersion = "1.13.2-dev"
|
||||
|
||||
func UserVersion() string {
|
||||
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {
|
||||
return BuildVersion
|
||||
}
|
||||
|
||||
return BuildVersion + buildType() + CurrentCommit
|
||||
return BuildVersion + BuildTypeString() + CurrentCommit
|
||||
}
|
||||
|
@ -21,6 +21,8 @@ import (
|
||||
builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
|
||||
|
||||
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
|
||||
|
||||
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -44,6 +46,10 @@ func init() {
|
||||
builtin.RegisterActorState(builtin5.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load5(store, root)
|
||||
})
|
||||
|
||||
builtin.RegisterActorState(builtin6.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load6(store, root)
|
||||
})
|
||||
}
|
||||
|
||||
var Methods = builtin4.MethodsAccount
|
||||
@ -66,6 +72,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
case builtin5.AccountActorCodeID:
|
||||
return load5(store, act.Head)
|
||||
|
||||
case builtin6.AccountActorCodeID:
|
||||
return load6(store, act.Head)
|
||||
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
@ -88,6 +97,9 @@ func MakeState(store adt.Store, av actors.Version, addr address.Address) (State,
|
||||
case actors.Version5:
|
||||
return make5(store, addr)
|
||||
|
||||
case actors.Version6:
|
||||
return make6(store, addr)
|
||||
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||
}
|
||||
@ -110,6 +122,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
|
||||
case actors.Version5:
|
||||
return builtin5.AccountActorCodeID, nil
|
||||
|
||||
case actors.Version6:
|
||||
return builtin6.AccountActorCodeID, nil
|
||||
|
||||
}
|
||||
|
||||
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
||||
|
40
chain/actors/builtin/account/v6.go
Normal file
40
chain/actors/builtin/account/v6.go
Normal file
@ -0,0 +1,40 @@
|
||||
package account
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
|
||||
account6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/account"
|
||||
)
|
||||
|
||||
var _ State = (*state6)(nil)
|
||||
|
||||
func load6(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state6{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func make6(store adt.Store, addr address.Address) (State, error) {
|
||||
out := state6{store: store}
|
||||
out.State = account6.State{Address: addr}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state6 struct {
|
||||
account6.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state6) PubkeyAddress() (address.Address, error) {
|
||||
return s.Address, nil
|
||||
}
|
||||
|
||||
func (s *state6) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
@ -20,46 +20,49 @@ import (
|
||||
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
|
||||
smoothing5 "github.com/filecoin-project/specs-actors/v5/actors/util/smoothing"
|
||||
|
||||
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||
smoothing6 "github.com/filecoin-project/specs-actors/v6/actors/util/smoothing"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/cbor"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
|
||||
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
|
||||
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
|
||||
miner6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/miner"
|
||||
proof6 "github.com/filecoin-project/specs-actors/v6/actors/runtime/proof"
|
||||
)
|
||||
|
||||
var SystemActorAddr = builtin5.SystemActorAddr
|
||||
var BurntFundsActorAddr = builtin5.BurntFundsActorAddr
|
||||
var CronActorAddr = builtin5.CronActorAddr
|
||||
var SystemActorAddr = builtin6.SystemActorAddr
|
||||
var BurntFundsActorAddr = builtin6.BurntFundsActorAddr
|
||||
var CronActorAddr = builtin6.CronActorAddr
|
||||
var SaftAddress = makeAddress("t0122")
|
||||
var ReserveAddress = makeAddress("t090")
|
||||
var RootVerifierAddress = makeAddress("t080")
|
||||
|
||||
var (
|
||||
ExpectedLeadersPerEpoch = builtin5.ExpectedLeadersPerEpoch
|
||||
ExpectedLeadersPerEpoch = builtin6.ExpectedLeadersPerEpoch
|
||||
)
|
||||
|
||||
const (
|
||||
EpochDurationSeconds = builtin5.EpochDurationSeconds
|
||||
EpochsInDay = builtin5.EpochsInDay
|
||||
SecondsInDay = builtin5.SecondsInDay
|
||||
EpochDurationSeconds = builtin6.EpochDurationSeconds
|
||||
EpochsInDay = builtin6.EpochsInDay
|
||||
SecondsInDay = builtin6.SecondsInDay
|
||||
)
|
||||
|
||||
const (
|
||||
MethodSend = builtin5.MethodSend
|
||||
MethodConstructor = builtin5.MethodConstructor
|
||||
MethodSend = builtin6.MethodSend
|
||||
MethodConstructor = builtin6.MethodConstructor
|
||||
)
|
||||
|
||||
// These are all just type aliases across actor versions. In the future, that might change
|
||||
// and we might need to do something fancier.
|
||||
type SectorInfo = proof5.SectorInfo
|
||||
type PoStProof = proof5.PoStProof
|
||||
type SectorInfo = proof6.SectorInfo
|
||||
type PoStProof = proof6.PoStProof
|
||||
type FilterEstimate = smoothing0.FilterEstimate
|
||||
|
||||
func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.StoragePower {
|
||||
return miner5.QAPowerForWeight(size, duration, dealWeight, verifiedWeight)
|
||||
return miner6.QAPowerForWeight(size, duration, dealWeight, verifiedWeight)
|
||||
}
|
||||
|
||||
func FromV0FilterEstimate(v0 smoothing0.FilterEstimate) FilterEstimate {
|
||||
@ -92,6 +95,12 @@ func FromV5FilterEstimate(v5 smoothing5.FilterEstimate) FilterEstimate {
|
||||
|
||||
}
|
||||
|
||||
func FromV6FilterEstimate(v6 smoothing6.FilterEstimate) FilterEstimate {
|
||||
|
||||
return (FilterEstimate)(v6)
|
||||
|
||||
}
|
||||
|
||||
type ActorStateLoader func(store adt.Store, root cid.Cid) (cbor.Marshaler, error)
|
||||
|
||||
var ActorStateLoaders = make(map[cid.Cid]ActorStateLoader)
|
||||
@ -126,6 +135,9 @@ func ActorNameByCode(c cid.Cid) string {
|
||||
case builtin5.IsBuiltinActor(c):
|
||||
return builtin5.ActorNameByCode(c)
|
||||
|
||||
case builtin6.IsBuiltinActor(c):
|
||||
return builtin6.ActorNameByCode(c)
|
||||
|
||||
default:
|
||||
return "<unknown>"
|
||||
}
|
||||
@ -153,6 +165,10 @@ func IsBuiltinActor(c cid.Cid) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
if builtin6.IsBuiltinActor(c) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@ -178,6 +194,10 @@ func IsAccountActor(c cid.Cid) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
if c == builtin6.AccountActorCodeID {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@ -203,6 +223,10 @@ func IsStorageMinerActor(c cid.Cid) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
if c == builtin6.StorageMinerActorCodeID {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@ -228,6 +252,10 @@ func IsMultisigActor(c cid.Cid) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
if c == builtin6.MultisigActorCodeID {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@ -253,6 +281,10 @@ func IsPaymentChannelActor(c cid.Cid) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
if c == builtin6.PaymentChannelActorCodeID {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -15,6 +15,8 @@ import (
|
||||
builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
|
||||
|
||||
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
|
||||
|
||||
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||
)
|
||||
|
||||
func MakeState(store adt.Store, av actors.Version) (State, error) {
|
||||
@ -35,6 +37,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) {
|
||||
case actors.Version5:
|
||||
return make5(store)
|
||||
|
||||
case actors.Version6:
|
||||
return make6(store)
|
||||
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||
}
|
||||
@ -57,14 +62,17 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
|
||||
case actors.Version5:
|
||||
return builtin5.CronActorCodeID, nil
|
||||
|
||||
case actors.Version6:
|
||||
return builtin6.CronActorCodeID, nil
|
||||
|
||||
}
|
||||
|
||||
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
||||
}
|
||||
|
||||
var (
|
||||
Address = builtin5.CronActorAddr
|
||||
Methods = builtin5.MethodsCron
|
||||
Address = builtin6.CronActorAddr
|
||||
Methods = builtin6.MethodsCron
|
||||
)
|
||||
|
||||
type State interface {
|
||||
|
35
chain/actors/builtin/cron/v6.go
Normal file
35
chain/actors/builtin/cron/v6.go
Normal file
@ -0,0 +1,35 @@
|
||||
package cron
|
||||
|
||||
import (
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
|
||||
cron6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/cron"
|
||||
)
|
||||
|
||||
var _ State = (*state6)(nil)
|
||||
|
||||
func load6(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state6{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func make6(store adt.Store) (State, error) {
|
||||
out := state6{store: store}
|
||||
out.State = *cron6.ConstructState(cron6.BuiltInEntries())
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state6 struct {
|
||||
cron6.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state6) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
@ -23,6 +23,8 @@ import (
|
||||
builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
|
||||
|
||||
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
|
||||
|
||||
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -46,11 +48,15 @@ func init() {
|
||||
builtin.RegisterActorState(builtin5.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load5(store, root)
|
||||
})
|
||||
|
||||
builtin.RegisterActorState(builtin6.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load6(store, root)
|
||||
})
|
||||
}
|
||||
|
||||
var (
|
||||
Address = builtin5.InitActorAddr
|
||||
Methods = builtin5.MethodsInit
|
||||
Address = builtin6.InitActorAddr
|
||||
Methods = builtin6.MethodsInit
|
||||
)
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
@ -71,6 +77,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
case builtin5.InitActorCodeID:
|
||||
return load5(store, act.Head)
|
||||
|
||||
case builtin6.InitActorCodeID:
|
||||
return load6(store, act.Head)
|
||||
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
@ -93,6 +102,9 @@ func MakeState(store adt.Store, av actors.Version, networkName string) (State, e
|
||||
case actors.Version5:
|
||||
return make5(store, networkName)
|
||||
|
||||
case actors.Version6:
|
||||
return make6(store, networkName)
|
||||
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||
}
|
||||
@ -115,6 +127,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
|
||||
case actors.Version5:
|
||||
return builtin5.InitActorCodeID, nil
|
||||
|
||||
case actors.Version6:
|
||||
return builtin6.InitActorCodeID, nil
|
||||
|
||||
}
|
||||
|
||||
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
||||
|
114
chain/actors/builtin/init/v6.go
Normal file
114
chain/actors/builtin/init/v6.go
Normal file
@ -0,0 +1,114 @@
|
||||
package init
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
|
||||
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||
|
||||
init6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/init"
|
||||
adt6 "github.com/filecoin-project/specs-actors/v6/actors/util/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state6)(nil)
|
||||
|
||||
func load6(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state6{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func make6(store adt.Store, networkName string) (State, error) {
|
||||
out := state6{store: store}
|
||||
|
||||
s, err := init6.ConstructState(store, networkName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out.State = *s
|
||||
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state6 struct {
|
||||
init6.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state6) ResolveAddress(address address.Address) (address.Address, bool, error) {
|
||||
return s.State.ResolveAddress(s.store, address)
|
||||
}
|
||||
|
||||
func (s *state6) MapAddressToNewID(address address.Address) (address.Address, error) {
|
||||
return s.State.MapAddressToNewID(s.store, address)
|
||||
}
|
||||
|
||||
func (s *state6) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error {
|
||||
addrs, err := adt6.AsMap(s.store, s.State.AddressMap, builtin6.DefaultHamtBitwidth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var actorID cbg.CborInt
|
||||
return addrs.ForEach(&actorID, func(key string) error {
|
||||
addr, err := address.NewFromBytes([]byte(key))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cb(abi.ActorID(actorID), addr)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *state6) NetworkName() (dtypes.NetworkName, error) {
|
||||
return dtypes.NetworkName(s.State.NetworkName), nil
|
||||
}
|
||||
|
||||
func (s *state6) SetNetworkName(name string) error {
|
||||
s.State.NetworkName = name
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *state6) SetNextID(id abi.ActorID) error {
|
||||
s.State.NextID = id
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *state6) Remove(addrs ...address.Address) (err error) {
|
||||
m, err := adt6.AsMap(s.store, s.State.AddressMap, builtin6.DefaultHamtBitwidth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, addr := range addrs {
|
||||
if err = m.Delete(abi.AddrKey(addr)); err != nil {
|
||||
return xerrors.Errorf("failed to delete entry for address: %s; err: %w", addr, err)
|
||||
}
|
||||
}
|
||||
amr, err := m.Root()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to get address map root: %w", err)
|
||||
}
|
||||
s.State.AddressMap = amr
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *state6) SetAddressMap(mcid cid.Cid) error {
|
||||
s.State.AddressMap = mcid
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *state6) AddressMap() (adt.Map, error) {
|
||||
return adt6.AsMap(s.store, s.State.AddressMap, builtin6.DefaultHamtBitwidth)
|
||||
}
|
||||
|
||||
func (s *state6) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
@ -1,6 +1,7 @@
|
||||
package market
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
@ -103,7 +104,28 @@ type DealProposals interface {
|
||||
}
|
||||
|
||||
type PublishStorageDealsParams = market0.PublishStorageDealsParams
|
||||
type PublishStorageDealsReturn = market0.PublishStorageDealsReturn
|
||||
|
||||
type PublishStorageDealsReturn interface {
|
||||
DealIDs() ([]abi.DealID, error)
|
||||
// Note that this index is based on the batch of deals that were published, NOT the DealID
|
||||
IsDealValid(index uint64) (bool, error)
|
||||
}
|
||||
|
||||
func DecodePublishStorageDealsReturn(b []byte, nv network.Version) (PublishStorageDealsReturn, error) {
|
||||
av, err := actors.VersionForNetwork(nv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch av {
|
||||
{{range .versions}}
|
||||
case actors.Version{{.}}:
|
||||
return decodePublishStorageDealsReturn{{.}}(b)
|
||||
{{end}}
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||
}
|
||||
|
||||
type VerifyDealsForActivationParams = market0.VerifyDealsForActivationParams
|
||||
type WithdrawBalanceParams = market0.WithdrawBalanceParams
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
package market
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
@ -22,6 +23,8 @@ import (
|
||||
|
||||
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
|
||||
|
||||
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
@ -49,11 +52,15 @@ func init() {
|
||||
builtin.RegisterActorState(builtin5.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load5(store, root)
|
||||
})
|
||||
|
||||
builtin.RegisterActorState(builtin6.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load6(store, root)
|
||||
})
|
||||
}
|
||||
|
||||
var (
|
||||
Address = builtin5.StorageMarketActorAddr
|
||||
Methods = builtin5.MethodsMarket
|
||||
Address = builtin6.StorageMarketActorAddr
|
||||
Methods = builtin6.MethodsMarket
|
||||
)
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
@ -74,6 +81,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
case builtin5.StorageMarketActorCodeID:
|
||||
return load5(store, act.Head)
|
||||
|
||||
case builtin6.StorageMarketActorCodeID:
|
||||
return load6(store, act.Head)
|
||||
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
@ -96,6 +106,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) {
|
||||
case actors.Version5:
|
||||
return make5(store)
|
||||
|
||||
case actors.Version6:
|
||||
return make6(store)
|
||||
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||
}
|
||||
@ -118,6 +131,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
|
||||
case actors.Version5:
|
||||
return builtin5.StorageMarketActorCodeID, nil
|
||||
|
||||
case actors.Version6:
|
||||
return builtin6.StorageMarketActorCodeID, nil
|
||||
|
||||
}
|
||||
|
||||
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
||||
@ -162,7 +178,43 @@ type DealProposals interface {
|
||||
}
|
||||
|
||||
type PublishStorageDealsParams = market0.PublishStorageDealsParams
|
||||
type PublishStorageDealsReturn = market0.PublishStorageDealsReturn
|
||||
|
||||
type PublishStorageDealsReturn interface {
|
||||
DealIDs() ([]abi.DealID, error)
|
||||
// Note that this index is based on the batch of deals that were published, NOT the DealID
|
||||
IsDealValid(index uint64) (bool, error)
|
||||
}
|
||||
|
||||
func DecodePublishStorageDealsReturn(b []byte, nv network.Version) (PublishStorageDealsReturn, error) {
|
||||
av, err := actors.VersionForNetwork(nv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch av {
|
||||
|
||||
case actors.Version0:
|
||||
return decodePublishStorageDealsReturn0(b)
|
||||
|
||||
case actors.Version2:
|
||||
return decodePublishStorageDealsReturn2(b)
|
||||
|
||||
case actors.Version3:
|
||||
return decodePublishStorageDealsReturn3(b)
|
||||
|
||||
case actors.Version4:
|
||||
return decodePublishStorageDealsReturn4(b)
|
||||
|
||||
case actors.Version5:
|
||||
return decodePublishStorageDealsReturn5(b)
|
||||
|
||||
case actors.Version6:
|
||||
return decodePublishStorageDealsReturn6(b)
|
||||
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||
}
|
||||
|
||||
type VerifyDealsForActivationParams = market0.VerifyDealsForActivationParams
|
||||
type WithdrawBalanceParams = market0.WithdrawBalanceParams
|
||||
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
@ -236,3 +237,31 @@ func fromV{{.v}}DealProposal(v{{.v}} market{{.v}}.DealProposal) DealProposal {
|
||||
func (s *state{{.v}}) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
var _ PublishStorageDealsReturn = (*publishStorageDealsReturn{{.v}})(nil)
|
||||
|
||||
func decodePublishStorageDealsReturn{{.v}}(b []byte) (PublishStorageDealsReturn, error) {
|
||||
var retval market{{.v}}.PublishStorageDealsReturn
|
||||
if err := retval.UnmarshalCBOR(bytes.NewReader(b)); err != nil {
|
||||
return nil, xerrors.Errorf("failed to unmarshal PublishStorageDealsReturn: %w", err)
|
||||
}
|
||||
|
||||
return &publishStorageDealsReturn{{.v}}{retval}, nil
|
||||
}
|
||||
|
||||
type publishStorageDealsReturn{{.v}} struct {
|
||||
market{{.v}}.PublishStorageDealsReturn
|
||||
}
|
||||
|
||||
func (r *publishStorageDealsReturn{{.v}}) IsDealValid(index uint64) (bool, error) {
|
||||
{{if (ge .v 6)}}
|
||||
return r.ValidDeals.IsSet(index)
|
||||
{{else}}
|
||||
// PublishStorageDeals only succeeded if all deals were valid in this version of actors
|
||||
return true, nil
|
||||
{{end}}
|
||||
}
|
||||
|
||||
func (r *publishStorageDealsReturn{{.v}}) DealIDs() ([]abi.DealID, error) {
|
||||
return r.IDs, nil
|
||||
}
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
@ -229,3 +230,29 @@ func fromV0DealProposal(v0 market0.DealProposal) DealProposal {
|
||||
func (s *state0) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
var _ PublishStorageDealsReturn = (*publishStorageDealsReturn0)(nil)
|
||||
|
||||
func decodePublishStorageDealsReturn0(b []byte) (PublishStorageDealsReturn, error) {
|
||||
var retval market0.PublishStorageDealsReturn
|
||||
if err := retval.UnmarshalCBOR(bytes.NewReader(b)); err != nil {
|
||||
return nil, xerrors.Errorf("failed to unmarshal PublishStorageDealsReturn: %w", err)
|
||||
}
|
||||
|
||||
return &publishStorageDealsReturn0{retval}, nil
|
||||
}
|
||||
|
||||
type publishStorageDealsReturn0 struct {
|
||||
market0.PublishStorageDealsReturn
|
||||
}
|
||||
|
||||
func (r *publishStorageDealsReturn0) IsDealValid(index uint64) (bool, error) {
|
||||
|
||||
// PublishStorageDeals only succeeded if all deals were valid in this version of actors
|
||||
return true, nil
|
||||
|
||||
}
|
||||
|
||||
func (r *publishStorageDealsReturn0) DealIDs() ([]abi.DealID, error) {
|
||||
return r.IDs, nil
|
||||
}
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
@ -229,3 +230,29 @@ func fromV2DealProposal(v2 market2.DealProposal) DealProposal {
|
||||
func (s *state2) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
var _ PublishStorageDealsReturn = (*publishStorageDealsReturn2)(nil)
|
||||
|
||||
func decodePublishStorageDealsReturn2(b []byte) (PublishStorageDealsReturn, error) {
|
||||
var retval market2.PublishStorageDealsReturn
|
||||
if err := retval.UnmarshalCBOR(bytes.NewReader(b)); err != nil {
|
||||
return nil, xerrors.Errorf("failed to unmarshal PublishStorageDealsReturn: %w", err)
|
||||
}
|
||||
|
||||
return &publishStorageDealsReturn2{retval}, nil
|
||||
}
|
||||
|
||||
type publishStorageDealsReturn2 struct {
|
||||
market2.PublishStorageDealsReturn
|
||||
}
|
||||
|
||||
func (r *publishStorageDealsReturn2) IsDealValid(index uint64) (bool, error) {
|
||||
|
||||
// PublishStorageDeals only succeeded if all deals were valid in this version of actors
|
||||
return true, nil
|
||||
|
||||
}
|
||||
|
||||
func (r *publishStorageDealsReturn2) DealIDs() ([]abi.DealID, error) {
|
||||
return r.IDs, nil
|
||||
}
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
@ -224,3 +225,29 @@ func fromV3DealProposal(v3 market3.DealProposal) DealProposal {
|
||||
func (s *state3) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
var _ PublishStorageDealsReturn = (*publishStorageDealsReturn3)(nil)
|
||||
|
||||
func decodePublishStorageDealsReturn3(b []byte) (PublishStorageDealsReturn, error) {
|
||||
var retval market3.PublishStorageDealsReturn
|
||||
if err := retval.UnmarshalCBOR(bytes.NewReader(b)); err != nil {
|
||||
return nil, xerrors.Errorf("failed to unmarshal PublishStorageDealsReturn: %w", err)
|
||||
}
|
||||
|
||||
return &publishStorageDealsReturn3{retval}, nil
|
||||
}
|
||||
|
||||
type publishStorageDealsReturn3 struct {
|
||||
market3.PublishStorageDealsReturn
|
||||
}
|
||||
|
||||
func (r *publishStorageDealsReturn3) IsDealValid(index uint64) (bool, error) {
|
||||
|
||||
// PublishStorageDeals only succeeded if all deals were valid in this version of actors
|
||||
return true, nil
|
||||
|
||||
}
|
||||
|
||||
func (r *publishStorageDealsReturn3) DealIDs() ([]abi.DealID, error) {
|
||||
return r.IDs, nil
|
||||
}
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
@ -224,3 +225,29 @@ func fromV4DealProposal(v4 market4.DealProposal) DealProposal {
|
||||
func (s *state4) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
var _ PublishStorageDealsReturn = (*publishStorageDealsReturn4)(nil)
|
||||
|
||||
func decodePublishStorageDealsReturn4(b []byte) (PublishStorageDealsReturn, error) {
|
||||
var retval market4.PublishStorageDealsReturn
|
||||
if err := retval.UnmarshalCBOR(bytes.NewReader(b)); err != nil {
|
||||
return nil, xerrors.Errorf("failed to unmarshal PublishStorageDealsReturn: %w", err)
|
||||
}
|
||||
|
||||
return &publishStorageDealsReturn4{retval}, nil
|
||||
}
|
||||
|
||||
type publishStorageDealsReturn4 struct {
|
||||
market4.PublishStorageDealsReturn
|
||||
}
|
||||
|
||||
func (r *publishStorageDealsReturn4) IsDealValid(index uint64) (bool, error) {
|
||||
|
||||
// PublishStorageDeals only succeeded if all deals were valid in this version of actors
|
||||
return true, nil
|
||||
|
||||
}
|
||||
|
||||
func (r *publishStorageDealsReturn4) DealIDs() ([]abi.DealID, error) {
|
||||
return r.IDs, nil
|
||||
}
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
@ -224,3 +225,29 @@ func fromV5DealProposal(v5 market5.DealProposal) DealProposal {
|
||||
func (s *state5) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
var _ PublishStorageDealsReturn = (*publishStorageDealsReturn5)(nil)
|
||||
|
||||
func decodePublishStorageDealsReturn5(b []byte) (PublishStorageDealsReturn, error) {
|
||||
var retval market5.PublishStorageDealsReturn
|
||||
if err := retval.UnmarshalCBOR(bytes.NewReader(b)); err != nil {
|
||||
return nil, xerrors.Errorf("failed to unmarshal PublishStorageDealsReturn: %w", err)
|
||||
}
|
||||
|
||||
return &publishStorageDealsReturn5{retval}, nil
|
||||
}
|
||||
|
||||
type publishStorageDealsReturn5 struct {
|
||||
market5.PublishStorageDealsReturn
|
||||
}
|
||||
|
||||
func (r *publishStorageDealsReturn5) IsDealValid(index uint64) (bool, error) {
|
||||
|
||||
// PublishStorageDeals only succeeded if all deals were valid in this version of actors
|
||||
return true, nil
|
||||
|
||||
}
|
||||
|
||||
func (r *publishStorageDealsReturn5) DealIDs() ([]abi.DealID, error) {
|
||||
return r.IDs, nil
|
||||
}
|
||||
|
252
chain/actors/builtin/market/v6.go
Normal file
252
chain/actors/builtin/market/v6.go
Normal file
@ -0,0 +1,252 @@
|
||||
package market
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
|
||||
market6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/market"
|
||||
adt6 "github.com/filecoin-project/specs-actors/v6/actors/util/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state6)(nil)
|
||||
|
||||
func load6(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state6{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func make6(store adt.Store) (State, error) {
|
||||
out := state6{store: store}
|
||||
|
||||
s, err := market6.ConstructState(store)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out.State = *s
|
||||
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state6 struct {
|
||||
market6.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state6) TotalLocked() (abi.TokenAmount, error) {
|
||||
fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral)
|
||||
fml = types.BigAdd(fml, s.TotalClientStorageFee)
|
||||
return fml, nil
|
||||
}
|
||||
|
||||
func (s *state6) BalancesChanged(otherState State) (bool, error) {
|
||||
otherState6, ok := otherState.(*state6)
|
||||
if !ok {
|
||||
// there's no way to compare different versions of the state, so let's
|
||||
// just say that means the state of balances has changed
|
||||
return true, nil
|
||||
}
|
||||
return !s.State.EscrowTable.Equals(otherState6.State.EscrowTable) || !s.State.LockedTable.Equals(otherState6.State.LockedTable), nil
|
||||
}
|
||||
|
||||
func (s *state6) StatesChanged(otherState State) (bool, error) {
|
||||
otherState6, ok := otherState.(*state6)
|
||||
if !ok {
|
||||
// there's no way to compare different versions of the state, so let's
|
||||
// just say that means the state of balances has changed
|
||||
return true, nil
|
||||
}
|
||||
return !s.State.States.Equals(otherState6.State.States), nil
|
||||
}
|
||||
|
||||
func (s *state6) States() (DealStates, error) {
|
||||
stateArray, err := adt6.AsArray(s.store, s.State.States, market6.StatesAmtBitwidth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &dealStates6{stateArray}, nil
|
||||
}
|
||||
|
||||
func (s *state6) ProposalsChanged(otherState State) (bool, error) {
|
||||
otherState6, ok := otherState.(*state6)
|
||||
if !ok {
|
||||
// there's no way to compare different versions of the state, so let's
|
||||
// just say that means the state of balances has changed
|
||||
return true, nil
|
||||
}
|
||||
return !s.State.Proposals.Equals(otherState6.State.Proposals), nil
|
||||
}
|
||||
|
||||
func (s *state6) Proposals() (DealProposals, error) {
|
||||
proposalArray, err := adt6.AsArray(s.store, s.State.Proposals, market6.ProposalsAmtBitwidth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &dealProposals6{proposalArray}, nil
|
||||
}
|
||||
|
||||
func (s *state6) EscrowTable() (BalanceTable, error) {
|
||||
bt, err := adt6.AsBalanceTable(s.store, s.State.EscrowTable)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &balanceTable6{bt}, nil
|
||||
}
|
||||
|
||||
func (s *state6) LockedTable() (BalanceTable, error) {
|
||||
bt, err := adt6.AsBalanceTable(s.store, s.State.LockedTable)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &balanceTable6{bt}, nil
|
||||
}
|
||||
|
||||
func (s *state6) VerifyDealsForActivation(
|
||||
minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch,
|
||||
) (weight, verifiedWeight abi.DealWeight, err error) {
|
||||
w, vw, _, err := market6.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch)
|
||||
return w, vw, err
|
||||
}
|
||||
|
||||
func (s *state6) NextID() (abi.DealID, error) {
|
||||
return s.State.NextID, nil
|
||||
}
|
||||
|
||||
type balanceTable6 struct {
|
||||
*adt6.BalanceTable
|
||||
}
|
||||
|
||||
func (bt *balanceTable6) ForEach(cb func(address.Address, abi.TokenAmount) error) error {
|
||||
asMap := (*adt6.Map)(bt.BalanceTable)
|
||||
var ta abi.TokenAmount
|
||||
return asMap.ForEach(&ta, func(key string) error {
|
||||
a, err := address.NewFromBytes([]byte(key))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cb(a, ta)
|
||||
})
|
||||
}
|
||||
|
||||
type dealStates6 struct {
|
||||
adt.Array
|
||||
}
|
||||
|
||||
func (s *dealStates6) Get(dealID abi.DealID) (*DealState, bool, error) {
|
||||
var deal6 market6.DealState
|
||||
found, err := s.Array.Get(uint64(dealID), &deal6)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if !found {
|
||||
return nil, false, nil
|
||||
}
|
||||
deal := fromV6DealState(deal6)
|
||||
return &deal, true, nil
|
||||
}
|
||||
|
||||
func (s *dealStates6) ForEach(cb func(dealID abi.DealID, ds DealState) error) error {
|
||||
var ds6 market6.DealState
|
||||
return s.Array.ForEach(&ds6, func(idx int64) error {
|
||||
return cb(abi.DealID(idx), fromV6DealState(ds6))
|
||||
})
|
||||
}
|
||||
|
||||
func (s *dealStates6) decode(val *cbg.Deferred) (*DealState, error) {
|
||||
var ds6 market6.DealState
|
||||
if err := ds6.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ds := fromV6DealState(ds6)
|
||||
return &ds, nil
|
||||
}
|
||||
|
||||
func (s *dealStates6) array() adt.Array {
|
||||
return s.Array
|
||||
}
|
||||
|
||||
func fromV6DealState(v6 market6.DealState) DealState {
|
||||
return (DealState)(v6)
|
||||
}
|
||||
|
||||
type dealProposals6 struct {
|
||||
adt.Array
|
||||
}
|
||||
|
||||
func (s *dealProposals6) Get(dealID abi.DealID) (*DealProposal, bool, error) {
|
||||
var proposal6 market6.DealProposal
|
||||
found, err := s.Array.Get(uint64(dealID), &proposal6)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if !found {
|
||||
return nil, false, nil
|
||||
}
|
||||
proposal := fromV6DealProposal(proposal6)
|
||||
return &proposal, true, nil
|
||||
}
|
||||
|
||||
func (s *dealProposals6) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error {
|
||||
var dp6 market6.DealProposal
|
||||
return s.Array.ForEach(&dp6, func(idx int64) error {
|
||||
return cb(abi.DealID(idx), fromV6DealProposal(dp6))
|
||||
})
|
||||
}
|
||||
|
||||
func (s *dealProposals6) decode(val *cbg.Deferred) (*DealProposal, error) {
|
||||
var dp6 market6.DealProposal
|
||||
if err := dp6.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dp := fromV6DealProposal(dp6)
|
||||
return &dp, nil
|
||||
}
|
||||
|
||||
func (s *dealProposals6) array() adt.Array {
|
||||
return s.Array
|
||||
}
|
||||
|
||||
func fromV6DealProposal(v6 market6.DealProposal) DealProposal {
|
||||
return (DealProposal)(v6)
|
||||
}
|
||||
|
||||
func (s *state6) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
var _ PublishStorageDealsReturn = (*publishStorageDealsReturn6)(nil)
|
||||
|
||||
func decodePublishStorageDealsReturn6(b []byte) (PublishStorageDealsReturn, error) {
|
||||
var retval market6.PublishStorageDealsReturn
|
||||
if err := retval.UnmarshalCBOR(bytes.NewReader(b)); err != nil {
|
||||
return nil, xerrors.Errorf("failed to unmarshal PublishStorageDealsReturn: %w", err)
|
||||
}
|
||||
|
||||
return &publishStorageDealsReturn6{retval}, nil
|
||||
}
|
||||
|
||||
type publishStorageDealsReturn6 struct {
|
||||
market6.PublishStorageDealsReturn
|
||||
}
|
||||
|
||||
func (r *publishStorageDealsReturn6) IsDealValid(index uint64) (bool, error) {
|
||||
|
||||
return r.ValidDeals.IsSet(index)
|
||||
|
||||
}
|
||||
|
||||
func (r *publishStorageDealsReturn6) DealIDs() ([]abi.DealID, error) {
|
||||
return r.IDs, nil
|
||||
}
|
@ -157,6 +157,12 @@ type Partition interface {
|
||||
|
||||
// Active sectors are those that are neither terminated nor faulty nor unproven, i.e. actively contributing power.
|
||||
ActiveSectors() (bitfield.BitField, error)
|
||||
|
||||
// Unproven sectors in this partition. This bitfield will be cleared on
|
||||
// a successful window post (or at the end of the partition's next
|
||||
// deadline). At that time, any still unproven sectors will be added to
|
||||
// the faulty sector bitfield.
|
||||
UnprovenSectors() (bitfield.BitField, error)
|
||||
}
|
||||
|
||||
type SectorOnChainInfo struct {
|
||||
|
@ -33,6 +33,8 @@ import (
|
||||
builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
|
||||
|
||||
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
|
||||
|
||||
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -57,9 +59,13 @@ func init() {
|
||||
return load5(store, root)
|
||||
})
|
||||
|
||||
builtin.RegisterActorState(builtin6.StorageMinerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load6(store, root)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
var Methods = builtin5.MethodsMiner
|
||||
var Methods = builtin6.MethodsMiner
|
||||
|
||||
// Unchanged between v0, v2, v3, v4, and v5 actors
|
||||
var WPoStProvingPeriod = miner0.WPoStProvingPeriod
|
||||
@ -93,6 +99,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
case builtin5.StorageMinerActorCodeID:
|
||||
return load5(store, act.Head)
|
||||
|
||||
case builtin6.StorageMinerActorCodeID:
|
||||
return load6(store, act.Head)
|
||||
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
@ -115,6 +124,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) {
|
||||
case actors.Version5:
|
||||
return make5(store)
|
||||
|
||||
case actors.Version6:
|
||||
return make6(store)
|
||||
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||
}
|
||||
@ -137,6 +149,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
|
||||
case actors.Version5:
|
||||
return builtin5.StorageMinerActorCodeID, nil
|
||||
|
||||
case actors.Version6:
|
||||
return builtin6.StorageMinerActorCodeID, nil
|
||||
|
||||
}
|
||||
|
||||
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
||||
@ -216,6 +231,12 @@ type Partition interface {
|
||||
|
||||
// Active sectors are those that are neither terminated nor faulty nor unproven, i.e. actively contributing power.
|
||||
ActiveSectors() (bitfield.BitField, error)
|
||||
|
||||
// Unproven sectors in this partition. This bitfield will be cleared on
|
||||
// a successful window post (or at the end of the partition's next
|
||||
// deadline). At that time, any still unproven sectors will be added to
|
||||
// the faulty sector bitfield.
|
||||
UnprovenSectors() (bitfield.BitField, error)
|
||||
}
|
||||
|
||||
type SectorOnChainInfo struct {
|
||||
|
@ -549,6 +549,10 @@ func (p *partition{{.v}}) RecoveringSectors() (bitfield.BitField, error) {
|
||||
return p.Partition.Recoveries, nil
|
||||
}
|
||||
|
||||
func (p *partition{{.v}}) UnprovenSectors() (bitfield.BitField, error) {
|
||||
return {{if (ge .v 2)}}p.Partition.Unproven{{else}}bitfield.New(){{end}}, nil
|
||||
}
|
||||
|
||||
func fromV{{.v}}SectorOnChainInfo(v{{.v}} miner{{.v}}.SectorOnChainInfo) SectorOnChainInfo {
|
||||
{{if (ge .v 2)}}
|
||||
return SectorOnChainInfo{
|
||||
|
@ -67,3 +67,22 @@ func SealProofTypeFromSectorSize(ssize abi.SectorSize, nv network.Version) (abi.
|
||||
|
||||
return 0, xerrors.Errorf("unsupported network version")
|
||||
}
|
||||
|
||||
// WindowPoStProofTypeFromSectorSize returns preferred post proof type for creating
|
||||
// new miner actors and new sectors
|
||||
func WindowPoStProofTypeFromSectorSize(ssize abi.SectorSize) (abi.RegisteredPoStProof, error) {
|
||||
switch ssize {
|
||||
case 2 << 10:
|
||||
return abi.RegisteredPoStProof_StackedDrgWindow2KiBV1, nil
|
||||
case 8 << 20:
|
||||
return abi.RegisteredPoStProof_StackedDrgWindow8MiBV1, nil
|
||||
case 512 << 20:
|
||||
return abi.RegisteredPoStProof_StackedDrgWindow512MiBV1, nil
|
||||
case 32 << 30:
|
||||
return abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, nil
|
||||
case 64 << 30:
|
||||
return abi.RegisteredPoStProof_StackedDrgWindow64GiBV1, nil
|
||||
default:
|
||||
return 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize)
|
||||
}
|
||||
}
|
||||
|
@ -500,6 +500,10 @@ func (p *partition0) RecoveringSectors() (bitfield.BitField, error) {
|
||||
return p.Partition.Recoveries, nil
|
||||
}
|
||||
|
||||
func (p *partition0) UnprovenSectors() (bitfield.BitField, error) {
|
||||
return bitfield.New(), nil
|
||||
}
|
||||
|
||||
func fromV0SectorOnChainInfo(v0 miner0.SectorOnChainInfo) SectorOnChainInfo {
|
||||
|
||||
return (SectorOnChainInfo)(v0)
|
||||
|
@ -530,6 +530,10 @@ func (p *partition2) RecoveringSectors() (bitfield.BitField, error) {
|
||||
return p.Partition.Recoveries, nil
|
||||
}
|
||||
|
||||
func (p *partition2) UnprovenSectors() (bitfield.BitField, error) {
|
||||
return p.Partition.Unproven, nil
|
||||
}
|
||||
|
||||
func fromV2SectorOnChainInfo(v2 miner2.SectorOnChainInfo) SectorOnChainInfo {
|
||||
|
||||
return SectorOnChainInfo{
|
||||
|
@ -531,6 +531,10 @@ func (p *partition3) RecoveringSectors() (bitfield.BitField, error) {
|
||||
return p.Partition.Recoveries, nil
|
||||
}
|
||||
|
||||
func (p *partition3) UnprovenSectors() (bitfield.BitField, error) {
|
||||
return p.Partition.Unproven, nil
|
||||
}
|
||||
|
||||
func fromV3SectorOnChainInfo(v3 miner3.SectorOnChainInfo) SectorOnChainInfo {
|
||||
|
||||
return SectorOnChainInfo{
|
||||
|
@ -531,6 +531,10 @@ func (p *partition4) RecoveringSectors() (bitfield.BitField, error) {
|
||||
return p.Partition.Recoveries, nil
|
||||
}
|
||||
|
||||
func (p *partition4) UnprovenSectors() (bitfield.BitField, error) {
|
||||
return p.Partition.Unproven, nil
|
||||
}
|
||||
|
||||
func fromV4SectorOnChainInfo(v4 miner4.SectorOnChainInfo) SectorOnChainInfo {
|
||||
|
||||
return SectorOnChainInfo{
|
||||
|
@ -531,6 +531,10 @@ func (p *partition5) RecoveringSectors() (bitfield.BitField, error) {
|
||||
return p.Partition.Recoveries, nil
|
||||
}
|
||||
|
||||
func (p *partition5) UnprovenSectors() (bitfield.BitField, error) {
|
||||
return p.Partition.Unproven, nil
|
||||
}
|
||||
|
||||
func fromV5SectorOnChainInfo(v5 miner5.SectorOnChainInfo) SectorOnChainInfo {
|
||||
|
||||
return SectorOnChainInfo{
|
||||
|
570
chain/actors/builtin/miner/v6.go
Normal file
570
chain/actors/builtin/miner/v6.go
Normal file
@ -0,0 +1,570 @@
|
||||
package miner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
rle "github.com/filecoin-project/go-bitfield/rle"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/dline"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
|
||||
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||
|
||||
miner6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/miner"
|
||||
adt6 "github.com/filecoin-project/specs-actors/v6/actors/util/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state6)(nil)
|
||||
|
||||
func load6(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state6{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func make6(store adt.Store) (State, error) {
|
||||
out := state6{store: store}
|
||||
out.State = miner6.State{}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state6 struct {
|
||||
miner6.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
type deadline6 struct {
|
||||
miner6.Deadline
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
type partition6 struct {
|
||||
miner6.Partition
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state6) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = xerrors.Errorf("failed to get available balance: %w", r)
|
||||
available = abi.NewTokenAmount(0)
|
||||
}
|
||||
}()
|
||||
// this panics if the miner doesnt have enough funds to cover their locked pledge
|
||||
available, err = s.GetAvailableBalance(bal)
|
||||
return available, err
|
||||
}
|
||||
|
||||
func (s *state6) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) {
|
||||
return s.CheckVestedFunds(s.store, epoch)
|
||||
}
|
||||
|
||||
func (s *state6) LockedFunds() (LockedFunds, error) {
|
||||
return LockedFunds{
|
||||
VestingFunds: s.State.LockedFunds,
|
||||
InitialPledgeRequirement: s.State.InitialPledge,
|
||||
PreCommitDeposits: s.State.PreCommitDeposits,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *state6) FeeDebt() (abi.TokenAmount, error) {
|
||||
return s.State.FeeDebt, nil
|
||||
}
|
||||
|
||||
func (s *state6) InitialPledge() (abi.TokenAmount, error) {
|
||||
return s.State.InitialPledge, nil
|
||||
}
|
||||
|
||||
func (s *state6) PreCommitDeposits() (abi.TokenAmount, error) {
|
||||
return s.State.PreCommitDeposits, nil
|
||||
}
|
||||
|
||||
func (s *state6) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) {
|
||||
info, ok, err := s.State.GetSector(s.store, num)
|
||||
if !ok || err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ret := fromV6SectorOnChainInfo(*info)
|
||||
return &ret, nil
|
||||
}
|
||||
|
||||
func (s *state6) FindSector(num abi.SectorNumber) (*SectorLocation, error) {
|
||||
dlIdx, partIdx, err := s.State.FindSector(s.store, num)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &SectorLocation{
|
||||
Deadline: dlIdx,
|
||||
Partition: partIdx,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *state6) NumLiveSectors() (uint64, error) {
|
||||
dls, err := s.State.LoadDeadlines(s.store)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
var total uint64
|
||||
if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner6.Deadline) error {
|
||||
total += dl.LiveSectors
|
||||
return nil
|
||||
}); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return total, nil
|
||||
}
|
||||
|
||||
// GetSectorExpiration returns the effective expiration of the given sector.
|
||||
//
|
||||
// If the sector does not expire early, the Early expiration field is 0.
|
||||
func (s *state6) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) {
|
||||
dls, err := s.State.LoadDeadlines(s.store)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// NOTE: this can be optimized significantly.
|
||||
// 1. If the sector is non-faulty, it will either expire on-time (can be
|
||||
// learned from the sector info), or in the next quantized expiration
|
||||
// epoch (i.e., the first element in the partition's expiration queue.
|
||||
// 2. If it's faulty, it will expire early within the first 14 entries
|
||||
// of the expiration queue.
|
||||
stopErr := errors.New("stop")
|
||||
out := SectorExpiration{}
|
||||
err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner6.Deadline) error {
|
||||
partitions, err := dl.PartitionsArray(s.store)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
quant := s.State.QuantSpecForDeadline(dlIdx)
|
||||
var part miner6.Partition
|
||||
return partitions.ForEach(&part, func(partIdx int64) error {
|
||||
if found, err := part.Sectors.IsSet(uint64(num)); err != nil {
|
||||
return err
|
||||
} else if !found {
|
||||
return nil
|
||||
}
|
||||
if found, err := part.Terminated.IsSet(uint64(num)); err != nil {
|
||||
return err
|
||||
} else if found {
|
||||
// already terminated
|
||||
return stopErr
|
||||
}
|
||||
|
||||
q, err := miner6.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant, miner6.PartitionExpirationAmtBitwidth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var exp miner6.ExpirationSet
|
||||
return q.ForEach(&exp, func(epoch int64) error {
|
||||
if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil {
|
||||
return err
|
||||
} else if early {
|
||||
out.Early = abi.ChainEpoch(epoch)
|
||||
return nil
|
||||
}
|
||||
if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil {
|
||||
return err
|
||||
} else if onTime {
|
||||
out.OnTime = abi.ChainEpoch(epoch)
|
||||
return stopErr
|
||||
}
|
||||
return nil
|
||||
})
|
||||
})
|
||||
})
|
||||
if err == stopErr {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if out.Early == 0 && out.OnTime == 0 {
|
||||
return nil, xerrors.Errorf("failed to find sector %d", num)
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func (s *state6) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) {
|
||||
info, ok, err := s.State.GetPrecommittedSector(s.store, num)
|
||||
if !ok || err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ret := fromV6SectorPreCommitOnChainInfo(*info)
|
||||
|
||||
return &ret, nil
|
||||
}
|
||||
|
||||
func (s *state6) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error {
|
||||
precommitted, err := adt6.AsMap(s.store, s.State.PreCommittedSectors, builtin6.DefaultHamtBitwidth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var info miner6.SectorPreCommitOnChainInfo
|
||||
if err := precommitted.ForEach(&info, func(_ string) error {
|
||||
return cb(fromV6SectorPreCommitOnChainInfo(info))
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *state6) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) {
|
||||
sectors, err := miner6.LoadSectors(s.store, s.State.Sectors)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If no sector numbers are specified, load all.
|
||||
if snos == nil {
|
||||
infos := make([]*SectorOnChainInfo, 0, sectors.Length())
|
||||
var info6 miner6.SectorOnChainInfo
|
||||
if err := sectors.ForEach(&info6, func(_ int64) error {
|
||||
info := fromV6SectorOnChainInfo(info6)
|
||||
infos = append(infos, &info)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return infos, nil
|
||||
}
|
||||
|
||||
// Otherwise, load selected.
|
||||
infos6, err := sectors.Load(*snos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
infos := make([]*SectorOnChainInfo, len(infos6))
|
||||
for i, info6 := range infos6 {
|
||||
info := fromV6SectorOnChainInfo(*info6)
|
||||
infos[i] = &info
|
||||
}
|
||||
return infos, nil
|
||||
}
|
||||
|
||||
func (s *state6) loadAllocatedSectorNumbers() (bitfield.BitField, error) {
|
||||
var allocatedSectors bitfield.BitField
|
||||
err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors)
|
||||
return allocatedSectors, err
|
||||
}
|
||||
|
||||
func (s *state6) IsAllocated(num abi.SectorNumber) (bool, error) {
|
||||
allocatedSectors, err := s.loadAllocatedSectorNumbers()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return allocatedSectors.IsSet(uint64(num))
|
||||
}
|
||||
|
||||
func (s *state6) GetProvingPeriodStart() (abi.ChainEpoch, error) {
|
||||
return s.State.ProvingPeriodStart, nil
|
||||
}
|
||||
|
||||
func (s *state6) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) {
|
||||
allocatedSectors, err := s.loadAllocatedSectorNumbers()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
allocatedRuns, err := allocatedSectors.RunIterator()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
unallocatedRuns, err := rle.Subtract(
|
||||
&rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}},
|
||||
allocatedRuns,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
iter, err := rle.BitsFromRuns(unallocatedRuns)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sectors := make([]abi.SectorNumber, 0, count)
|
||||
for iter.HasNext() && len(sectors) < count {
|
||||
nextNo, err := iter.Next()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sectors = append(sectors, abi.SectorNumber(nextNo))
|
||||
}
|
||||
|
||||
return sectors, nil
|
||||
}
|
||||
|
||||
func (s *state6) GetAllocatedSectors() (*bitfield.BitField, error) {
|
||||
var allocatedSectors bitfield.BitField
|
||||
if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &allocatedSectors, nil
|
||||
}
|
||||
|
||||
func (s *state6) LoadDeadline(idx uint64) (Deadline, error) {
|
||||
dls, err := s.State.LoadDeadlines(s.store)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dl, err := dls.LoadDeadline(s.store, idx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &deadline6{*dl, s.store}, nil
|
||||
}
|
||||
|
||||
func (s *state6) ForEachDeadline(cb func(uint64, Deadline) error) error {
|
||||
dls, err := s.State.LoadDeadlines(s.store)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return dls.ForEach(s.store, func(i uint64, dl *miner6.Deadline) error {
|
||||
return cb(i, &deadline6{*dl, s.store})
|
||||
})
|
||||
}
|
||||
|
||||
func (s *state6) NumDeadlines() (uint64, error) {
|
||||
return miner6.WPoStPeriodDeadlines, nil
|
||||
}
|
||||
|
||||
func (s *state6) DeadlinesChanged(other State) (bool, error) {
|
||||
other6, ok := other.(*state6)
|
||||
if !ok {
|
||||
// treat an upgrade as a change, always
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return !s.State.Deadlines.Equals(other6.Deadlines), nil
|
||||
}
|
||||
|
||||
func (s *state6) MinerInfoChanged(other State) (bool, error) {
|
||||
other0, ok := other.(*state6)
|
||||
if !ok {
|
||||
// treat an upgrade as a change, always
|
||||
return true, nil
|
||||
}
|
||||
return !s.State.Info.Equals(other0.State.Info), nil
|
||||
}
|
||||
|
||||
func (s *state6) Info() (MinerInfo, error) {
|
||||
info, err := s.State.GetInfo(s.store)
|
||||
if err != nil {
|
||||
return MinerInfo{}, err
|
||||
}
|
||||
|
||||
var pid *peer.ID
|
||||
if peerID, err := peer.IDFromBytes(info.PeerId); err == nil {
|
||||
pid = &peerID
|
||||
}
|
||||
|
||||
mi := MinerInfo{
|
||||
Owner: info.Owner,
|
||||
Worker: info.Worker,
|
||||
ControlAddresses: info.ControlAddresses,
|
||||
|
||||
NewWorker: address.Undef,
|
||||
WorkerChangeEpoch: -1,
|
||||
|
||||
PeerId: pid,
|
||||
Multiaddrs: info.Multiaddrs,
|
||||
WindowPoStProofType: info.WindowPoStProofType,
|
||||
SectorSize: info.SectorSize,
|
||||
WindowPoStPartitionSectors: info.WindowPoStPartitionSectors,
|
||||
ConsensusFaultElapsed: info.ConsensusFaultElapsed,
|
||||
}
|
||||
|
||||
if info.PendingWorkerKey != nil {
|
||||
mi.NewWorker = info.PendingWorkerKey.NewWorker
|
||||
mi.WorkerChangeEpoch = info.PendingWorkerKey.EffectiveAt
|
||||
}
|
||||
|
||||
return mi, nil
|
||||
}
|
||||
|
||||
func (s *state6) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) {
|
||||
return s.State.RecordedDeadlineInfo(epoch), nil
|
||||
}
|
||||
|
||||
func (s *state6) DeadlineCronActive() (bool, error) {
|
||||
return s.State.DeadlineCronActive, nil
|
||||
}
|
||||
|
||||
func (s *state6) sectors() (adt.Array, error) {
|
||||
return adt6.AsArray(s.store, s.Sectors, miner6.SectorsAmtBitwidth)
|
||||
}
|
||||
|
||||
func (s *state6) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) {
|
||||
var si miner6.SectorOnChainInfo
|
||||
err := si.UnmarshalCBOR(bytes.NewReader(val.Raw))
|
||||
if err != nil {
|
||||
return SectorOnChainInfo{}, err
|
||||
}
|
||||
|
||||
return fromV6SectorOnChainInfo(si), nil
|
||||
}
|
||||
|
||||
func (s *state6) precommits() (adt.Map, error) {
|
||||
return adt6.AsMap(s.store, s.PreCommittedSectors, builtin6.DefaultHamtBitwidth)
|
||||
}
|
||||
|
||||
func (s *state6) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreCommitOnChainInfo, error) {
|
||||
var sp miner6.SectorPreCommitOnChainInfo
|
||||
err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw))
|
||||
if err != nil {
|
||||
return SectorPreCommitOnChainInfo{}, err
|
||||
}
|
||||
|
||||
return fromV6SectorPreCommitOnChainInfo(sp), nil
|
||||
}
|
||||
|
||||
func (s *state6) EraseAllUnproven() error {
|
||||
|
||||
dls, err := s.State.LoadDeadlines(s.store)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dls.ForEach(s.store, func(dindx uint64, dl *miner6.Deadline) error {
|
||||
ps, err := dl.PartitionsArray(s.store)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var part miner6.Partition
|
||||
err = ps.ForEach(&part, func(pindx int64) error {
|
||||
_ = part.ActivateUnproven()
|
||||
err = ps.Set(uint64(pindx), &part)
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dl.Partitions, err = ps.Root()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return dls.UpdateDeadline(s.store, dindx, dl)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.State.SaveDeadlines(s.store, dls)
|
||||
|
||||
}
|
||||
|
||||
func (d *deadline6) LoadPartition(idx uint64) (Partition, error) {
|
||||
p, err := d.Deadline.LoadPartition(d.store, idx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &partition6{*p, d.store}, nil
|
||||
}
|
||||
|
||||
func (d *deadline6) ForEachPartition(cb func(uint64, Partition) error) error {
|
||||
ps, err := d.Deadline.PartitionsArray(d.store)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var part miner6.Partition
|
||||
return ps.ForEach(&part, func(i int64) error {
|
||||
return cb(uint64(i), &partition6{part, d.store})
|
||||
})
|
||||
}
|
||||
|
||||
func (d *deadline6) PartitionsChanged(other Deadline) (bool, error) {
|
||||
other6, ok := other.(*deadline6)
|
||||
if !ok {
|
||||
// treat an upgrade as a change, always
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return !d.Deadline.Partitions.Equals(other6.Deadline.Partitions), nil
|
||||
}
|
||||
|
||||
func (d *deadline6) PartitionsPoSted() (bitfield.BitField, error) {
|
||||
return d.Deadline.PartitionsPoSted, nil
|
||||
}
|
||||
|
||||
func (d *deadline6) DisputableProofCount() (uint64, error) {
|
||||
|
||||
ops, err := d.OptimisticProofsSnapshotArray(d.store)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return ops.Length(), nil
|
||||
|
||||
}
|
||||
|
||||
func (p *partition6) AllSectors() (bitfield.BitField, error) {
|
||||
return p.Partition.Sectors, nil
|
||||
}
|
||||
|
||||
func (p *partition6) FaultySectors() (bitfield.BitField, error) {
|
||||
return p.Partition.Faults, nil
|
||||
}
|
||||
|
||||
func (p *partition6) RecoveringSectors() (bitfield.BitField, error) {
|
||||
return p.Partition.Recoveries, nil
|
||||
}
|
||||
|
||||
func (p *partition6) UnprovenSectors() (bitfield.BitField, error) {
|
||||
return p.Partition.Unproven, nil
|
||||
}
|
||||
|
||||
func fromV6SectorOnChainInfo(v6 miner6.SectorOnChainInfo) SectorOnChainInfo {
|
||||
|
||||
return SectorOnChainInfo{
|
||||
SectorNumber: v6.SectorNumber,
|
||||
SealProof: v6.SealProof,
|
||||
SealedCID: v6.SealedCID,
|
||||
DealIDs: v6.DealIDs,
|
||||
Activation: v6.Activation,
|
||||
Expiration: v6.Expiration,
|
||||
DealWeight: v6.DealWeight,
|
||||
VerifiedDealWeight: v6.VerifiedDealWeight,
|
||||
InitialPledge: v6.InitialPledge,
|
||||
ExpectedDayReward: v6.ExpectedDayReward,
|
||||
ExpectedStoragePledge: v6.ExpectedStoragePledge,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func fromV6SectorPreCommitOnChainInfo(v6 miner6.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {
|
||||
|
||||
return SectorPreCommitOnChainInfo{
|
||||
Info: (SectorPreCommitInfo)(v6.Info),
|
||||
PreCommitDeposit: v6.PreCommitDeposit,
|
||||
PreCommitEpoch: v6.PreCommitEpoch,
|
||||
DealWeight: v6.DealWeight,
|
||||
VerifiedDealWeight: v6.VerifiedDealWeight,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (s *state6) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
71
chain/actors/builtin/multisig/message6.go
Normal file
71
chain/actors/builtin/multisig/message6.go
Normal file
@ -0,0 +1,71 @@
|
||||
package multisig
|
||||
|
||||
import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||
init6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/init"
|
||||
multisig6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/multisig"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
type message6 struct{ message0 }
|
||||
|
||||
func (m message6) Create(
|
||||
signers []address.Address, threshold uint64,
|
||||
unlockStart, unlockDuration abi.ChainEpoch,
|
||||
initialAmount abi.TokenAmount,
|
||||
) (*types.Message, error) {
|
||||
|
||||
lenAddrs := uint64(len(signers))
|
||||
|
||||
if lenAddrs < threshold {
|
||||
return nil, xerrors.Errorf("cannot require signing of more addresses than provided for multisig")
|
||||
}
|
||||
|
||||
if threshold == 0 {
|
||||
threshold = lenAddrs
|
||||
}
|
||||
|
||||
if m.from == address.Undef {
|
||||
return nil, xerrors.Errorf("must provide source address")
|
||||
}
|
||||
|
||||
// Set up constructor parameters for multisig
|
||||
msigParams := &multisig6.ConstructorParams{
|
||||
Signers: signers,
|
||||
NumApprovalsThreshold: threshold,
|
||||
UnlockDuration: unlockDuration,
|
||||
StartEpoch: unlockStart,
|
||||
}
|
||||
|
||||
enc, actErr := actors.SerializeParams(msigParams)
|
||||
if actErr != nil {
|
||||
return nil, actErr
|
||||
}
|
||||
|
||||
// new actors are created by invoking 'exec' on the init actor with the constructor params
|
||||
execParams := &init6.ExecParams{
|
||||
CodeCID: builtin6.MultisigActorCodeID,
|
||||
ConstructorParams: enc,
|
||||
}
|
||||
|
||||
enc, actErr = actors.SerializeParams(execParams)
|
||||
if actErr != nil {
|
||||
return nil, actErr
|
||||
}
|
||||
|
||||
return &types.Message{
|
||||
To: init_.Address,
|
||||
From: m.from,
|
||||
Method: builtin6.MethodsInit.Exec,
|
||||
Params: enc,
|
||||
Value: initialAmount,
|
||||
}, nil
|
||||
}
|
@ -13,7 +13,7 @@ import (
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
|
||||
msig5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/multisig"
|
||||
msig6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/multisig"
|
||||
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
|
||||
@ -25,6 +25,8 @@ import (
|
||||
|
||||
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
|
||||
|
||||
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
@ -52,6 +54,10 @@ func init() {
|
||||
builtin.RegisterActorState(builtin5.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load5(store, root)
|
||||
})
|
||||
|
||||
builtin.RegisterActorState(builtin6.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load6(store, root)
|
||||
})
|
||||
}
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
@ -72,6 +78,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
case builtin5.MultisigActorCodeID:
|
||||
return load5(store, act.Head)
|
||||
|
||||
case builtin6.MultisigActorCodeID:
|
||||
return load6(store, act.Head)
|
||||
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
@ -94,6 +103,9 @@ func MakeState(store adt.Store, av actors.Version, signers []address.Address, th
|
||||
case actors.Version5:
|
||||
return make5(store, signers, threshold, startEpoch, unlockDuration, initialBalance)
|
||||
|
||||
case actors.Version6:
|
||||
return make6(store, signers, threshold, startEpoch, unlockDuration, initialBalance)
|
||||
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||
}
|
||||
@ -116,6 +128,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
|
||||
case actors.Version5:
|
||||
return builtin5.MultisigActorCodeID, nil
|
||||
|
||||
case actors.Version6:
|
||||
return builtin6.MultisigActorCodeID, nil
|
||||
|
||||
}
|
||||
|
||||
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
||||
@ -141,7 +156,7 @@ type State interface {
|
||||
|
||||
type Transaction = msig0.Transaction
|
||||
|
||||
var Methods = builtin5.MethodsMultisig
|
||||
var Methods = builtin6.MethodsMultisig
|
||||
|
||||
func Message(version actors.Version, from address.Address) MessageBuilder {
|
||||
switch version {
|
||||
@ -160,6 +175,9 @@ func Message(version actors.Version, from address.Address) MessageBuilder {
|
||||
|
||||
case actors.Version5:
|
||||
return message5{message0{from}}
|
||||
|
||||
case actors.Version6:
|
||||
return message6{message0{from}}
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported actors version: %d", version))
|
||||
}
|
||||
@ -183,13 +201,13 @@ type MessageBuilder interface {
|
||||
}
|
||||
|
||||
// this type is the same between v0 and v2
|
||||
type ProposalHashData = msig5.ProposalHashData
|
||||
type ProposeReturn = msig5.ProposeReturn
|
||||
type ProposeParams = msig5.ProposeParams
|
||||
type ApproveReturn = msig5.ApproveReturn
|
||||
type ProposalHashData = msig6.ProposalHashData
|
||||
type ProposeReturn = msig6.ProposeReturn
|
||||
type ProposeParams = msig6.ProposeParams
|
||||
type ApproveReturn = msig6.ApproveReturn
|
||||
|
||||
func txnParams(id uint64, data *ProposalHashData) ([]byte, error) {
|
||||
params := msig5.TxnIDParams{ID: msig5.TxnID(id)}
|
||||
params := msig6.TxnIDParams{ID: msig6.TxnID(id)}
|
||||
if data != nil {
|
||||
if data.Requester.Protocol() != address.ID {
|
||||
return nil, xerrors.Errorf("proposer address must be an ID address, was %s", data.Requester)
|
||||
|
119
chain/actors/builtin/multisig/v6.go
Normal file
119
chain/actors/builtin/multisig/v6.go
Normal file
@ -0,0 +1,119 @@
|
||||
package multisig
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
|
||||
adt6 "github.com/filecoin-project/specs-actors/v6/actors/util/adt"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
|
||||
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||
|
||||
msig6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/multisig"
|
||||
)
|
||||
|
||||
var _ State = (*state6)(nil)
|
||||
|
||||
func load6(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state6{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func make6(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) {
|
||||
out := state6{store: store}
|
||||
out.State = msig6.State{}
|
||||
out.State.Signers = signers
|
||||
out.State.NumApprovalsThreshold = threshold
|
||||
out.State.StartEpoch = startEpoch
|
||||
out.State.UnlockDuration = unlockDuration
|
||||
out.State.InitialBalance = initialBalance
|
||||
|
||||
em, err := adt6.StoreEmptyMap(store, builtin6.DefaultHamtBitwidth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out.State.PendingTxns = em
|
||||
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state6 struct {
|
||||
msig6.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state6) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) {
|
||||
return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil
|
||||
}
|
||||
|
||||
func (s *state6) StartEpoch() (abi.ChainEpoch, error) {
|
||||
return s.State.StartEpoch, nil
|
||||
}
|
||||
|
||||
func (s *state6) UnlockDuration() (abi.ChainEpoch, error) {
|
||||
return s.State.UnlockDuration, nil
|
||||
}
|
||||
|
||||
func (s *state6) InitialBalance() (abi.TokenAmount, error) {
|
||||
return s.State.InitialBalance, nil
|
||||
}
|
||||
|
||||
func (s *state6) Threshold() (uint64, error) {
|
||||
return s.State.NumApprovalsThreshold, nil
|
||||
}
|
||||
|
||||
func (s *state6) Signers() ([]address.Address, error) {
|
||||
return s.State.Signers, nil
|
||||
}
|
||||
|
||||
func (s *state6) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error {
|
||||
arr, err := adt6.AsMap(s.store, s.State.PendingTxns, builtin6.DefaultHamtBitwidth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var out msig6.Transaction
|
||||
return arr.ForEach(&out, func(key string) error {
|
||||
txid, n := binary.Varint([]byte(key))
|
||||
if n <= 0 {
|
||||
return xerrors.Errorf("invalid pending transaction key: %v", key)
|
||||
}
|
||||
return cb(txid, (Transaction)(out)) //nolint:unconvert
|
||||
})
|
||||
}
|
||||
|
||||
func (s *state6) PendingTxnChanged(other State) (bool, error) {
|
||||
other6, ok := other.(*state6)
|
||||
if !ok {
|
||||
// treat an upgrade as a change, always
|
||||
return true, nil
|
||||
}
|
||||
return !s.State.PendingTxns.Equals(other6.PendingTxns), nil
|
||||
}
|
||||
|
||||
func (s *state6) transactions() (adt.Map, error) {
|
||||
return adt6.AsMap(s.store, s.PendingTxns, builtin6.DefaultHamtBitwidth)
|
||||
}
|
||||
|
||||
func (s *state6) decodeTransaction(val *cbg.Deferred) (Transaction, error) {
|
||||
var tx msig6.Transaction
|
||||
if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
|
||||
return Transaction{}, err
|
||||
}
|
||||
return tx, nil
|
||||
}
|
||||
|
||||
func (s *state6) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
74
chain/actors/builtin/paych/message6.go
Normal file
74
chain/actors/builtin/paych/message6.go
Normal file
@ -0,0 +1,74 @@
|
||||
package paych
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||
init6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/init"
|
||||
paych6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/paych"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
type message6 struct{ from address.Address }
|
||||
|
||||
func (m message6) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) {
|
||||
params, aerr := actors.SerializeParams(&paych6.ConstructorParams{From: m.from, To: to})
|
||||
if aerr != nil {
|
||||
return nil, aerr
|
||||
}
|
||||
enc, aerr := actors.SerializeParams(&init6.ExecParams{
|
||||
CodeCID: builtin6.PaymentChannelActorCodeID,
|
||||
ConstructorParams: params,
|
||||
})
|
||||
if aerr != nil {
|
||||
return nil, aerr
|
||||
}
|
||||
|
||||
return &types.Message{
|
||||
To: init_.Address,
|
||||
From: m.from,
|
||||
Value: initialAmount,
|
||||
Method: builtin6.MethodsInit.Exec,
|
||||
Params: enc,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m message6) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) {
|
||||
params, aerr := actors.SerializeParams(&paych6.UpdateChannelStateParams{
|
||||
Sv: *sv,
|
||||
Secret: secret,
|
||||
})
|
||||
if aerr != nil {
|
||||
return nil, aerr
|
||||
}
|
||||
|
||||
return &types.Message{
|
||||
To: paych,
|
||||
From: m.from,
|
||||
Value: abi.NewTokenAmount(0),
|
||||
Method: builtin6.MethodsPaych.UpdateChannelState,
|
||||
Params: params,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m message6) Settle(paych address.Address) (*types.Message, error) {
|
||||
return &types.Message{
|
||||
To: paych,
|
||||
From: m.from,
|
||||
Value: abi.NewTokenAmount(0),
|
||||
Method: builtin6.MethodsPaych.Settle,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m message6) Collect(paych address.Address) (*types.Message, error) {
|
||||
return &types.Message{
|
||||
To: paych,
|
||||
From: m.from,
|
||||
Value: abi.NewTokenAmount(0),
|
||||
Method: builtin6.MethodsPaych.Collect,
|
||||
}, nil
|
||||
}
|
@ -25,6 +25,8 @@ import (
|
||||
|
||||
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
|
||||
|
||||
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
@ -52,6 +54,10 @@ func init() {
|
||||
builtin.RegisterActorState(builtin5.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load5(store, root)
|
||||
})
|
||||
|
||||
builtin.RegisterActorState(builtin6.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load6(store, root)
|
||||
})
|
||||
}
|
||||
|
||||
// Load returns an abstract copy of payment channel state, irregardless of actor version
|
||||
@ -73,6 +79,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
case builtin5.PaymentChannelActorCodeID:
|
||||
return load5(store, act.Head)
|
||||
|
||||
case builtin6.PaymentChannelActorCodeID:
|
||||
return load6(store, act.Head)
|
||||
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
@ -95,6 +104,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) {
|
||||
case actors.Version5:
|
||||
return make5(store)
|
||||
|
||||
case actors.Version6:
|
||||
return make6(store)
|
||||
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||
}
|
||||
@ -117,6 +129,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
|
||||
case actors.Version5:
|
||||
return builtin5.PaymentChannelActorCodeID, nil
|
||||
|
||||
case actors.Version6:
|
||||
return builtin6.PaymentChannelActorCodeID, nil
|
||||
|
||||
}
|
||||
|
||||
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
||||
@ -170,7 +185,7 @@ func DecodeSignedVoucher(s string) (*SignedVoucher, error) {
|
||||
return &sv, nil
|
||||
}
|
||||
|
||||
var Methods = builtin5.MethodsPaych
|
||||
var Methods = builtin6.MethodsPaych
|
||||
|
||||
func Message(version actors.Version, from address.Address) MessageBuilder {
|
||||
switch version {
|
||||
@ -190,6 +205,9 @@ func Message(version actors.Version, from address.Address) MessageBuilder {
|
||||
case actors.Version5:
|
||||
return message5{from}
|
||||
|
||||
case actors.Version6:
|
||||
return message6{from}
|
||||
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported actors version: %d", version))
|
||||
}
|
||||
|
114
chain/actors/builtin/paych/v6.go
Normal file
114
chain/actors/builtin/paych/v6.go
Normal file
@ -0,0 +1,114 @@
|
||||
package paych
|
||||
|
||||
import (
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
|
||||
paych6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/paych"
|
||||
adt6 "github.com/filecoin-project/specs-actors/v6/actors/util/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state6)(nil)
|
||||
|
||||
func load6(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state6{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func make6(store adt.Store) (State, error) {
|
||||
out := state6{store: store}
|
||||
out.State = paych6.State{}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state6 struct {
|
||||
paych6.State
|
||||
store adt.Store
|
||||
lsAmt *adt6.Array
|
||||
}
|
||||
|
||||
// Channel owner, who has funded the actor
|
||||
func (s *state6) From() (address.Address, error) {
|
||||
return s.State.From, nil
|
||||
}
|
||||
|
||||
// Recipient of payouts from channel
|
||||
func (s *state6) To() (address.Address, error) {
|
||||
return s.State.To, nil
|
||||
}
|
||||
|
||||
// Height at which the channel can be `Collected`
|
||||
func (s *state6) SettlingAt() (abi.ChainEpoch, error) {
|
||||
return s.State.SettlingAt, nil
|
||||
}
|
||||
|
||||
// Amount successfully redeemed through the payment channel, paid out on `Collect()`
|
||||
func (s *state6) ToSend() (abi.TokenAmount, error) {
|
||||
return s.State.ToSend, nil
|
||||
}
|
||||
|
||||
func (s *state6) getOrLoadLsAmt() (*adt6.Array, error) {
|
||||
if s.lsAmt != nil {
|
||||
return s.lsAmt, nil
|
||||
}
|
||||
|
||||
// Get the lane state from the chain
|
||||
lsamt, err := adt6.AsArray(s.store, s.State.LaneStates, paych6.LaneStatesAmtBitwidth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.lsAmt = lsamt
|
||||
return lsamt, nil
|
||||
}
|
||||
|
||||
// Get total number of lanes
|
||||
func (s *state6) LaneCount() (uint64, error) {
|
||||
lsamt, err := s.getOrLoadLsAmt()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return lsamt.Length(), nil
|
||||
}
|
||||
|
||||
func (s *state6) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
// Iterate lane states
|
||||
func (s *state6) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error {
|
||||
// Get the lane state from the chain
|
||||
lsamt, err := s.getOrLoadLsAmt()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Note: we use a map instead of an array to store laneStates because the
|
||||
// client sets the lane ID (the index) and potentially they could use a
|
||||
// very large index.
|
||||
var ls paych6.LaneState
|
||||
return lsamt.ForEach(&ls, func(i int64) error {
|
||||
return cb(uint64(i), &laneState6{ls})
|
||||
})
|
||||
}
|
||||
|
||||
type laneState6 struct {
|
||||
paych6.LaneState
|
||||
}
|
||||
|
||||
func (ls *laneState6) Redeemed() (big.Int, error) {
|
||||
return ls.LaneState.Redeemed, nil
|
||||
}
|
||||
|
||||
func (ls *laneState6) Nonce() (uint64, error) {
|
||||
return ls.LaneState.Nonce, nil
|
||||
}
|
@ -24,6 +24,8 @@ import (
|
||||
builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
|
||||
|
||||
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
|
||||
|
||||
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -47,11 +49,15 @@ func init() {
|
||||
builtin.RegisterActorState(builtin5.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load5(store, root)
|
||||
})
|
||||
|
||||
builtin.RegisterActorState(builtin6.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load6(store, root)
|
||||
})
|
||||
}
|
||||
|
||||
var (
|
||||
Address = builtin5.StoragePowerActorAddr
|
||||
Methods = builtin5.MethodsPower
|
||||
Address = builtin6.StoragePowerActorAddr
|
||||
Methods = builtin6.MethodsPower
|
||||
)
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
@ -72,6 +78,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
case builtin5.StoragePowerActorCodeID:
|
||||
return load5(store, act.Head)
|
||||
|
||||
case builtin6.StoragePowerActorCodeID:
|
||||
return load6(store, act.Head)
|
||||
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
@ -94,6 +103,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) {
|
||||
case actors.Version5:
|
||||
return make5(store)
|
||||
|
||||
case actors.Version6:
|
||||
return make6(store)
|
||||
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||
}
|
||||
@ -116,6 +128,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
|
||||
case actors.Version5:
|
||||
return builtin5.StoragePowerActorCodeID, nil
|
||||
|
||||
case actors.Version6:
|
||||
return builtin6.StoragePowerActorCodeID, nil
|
||||
|
||||
}
|
||||
|
||||
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
||||
|
187
chain/actors/builtin/power/v6.go
Normal file
187
chain/actors/builtin/power/v6.go
Normal file
@ -0,0 +1,187 @@
|
||||
package power
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
|
||||
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||
|
||||
power6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/power"
|
||||
adt6 "github.com/filecoin-project/specs-actors/v6/actors/util/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state6)(nil)
|
||||
|
||||
func load6(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state6{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func make6(store adt.Store) (State, error) {
|
||||
out := state6{store: store}
|
||||
|
||||
s, err := power6.ConstructState(store)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out.State = *s
|
||||
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state6 struct {
|
||||
power6.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state6) TotalLocked() (abi.TokenAmount, error) {
|
||||
return s.TotalPledgeCollateral, nil
|
||||
}
|
||||
|
||||
func (s *state6) TotalPower() (Claim, error) {
|
||||
return Claim{
|
||||
RawBytePower: s.TotalRawBytePower,
|
||||
QualityAdjPower: s.TotalQualityAdjPower,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Committed power to the network. Includes miners below the minimum threshold.
|
||||
func (s *state6) TotalCommitted() (Claim, error) {
|
||||
return Claim{
|
||||
RawBytePower: s.TotalBytesCommitted,
|
||||
QualityAdjPower: s.TotalQABytesCommitted,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *state6) MinerPower(addr address.Address) (Claim, bool, error) {
|
||||
claims, err := s.claims()
|
||||
if err != nil {
|
||||
return Claim{}, false, err
|
||||
}
|
||||
var claim power6.Claim
|
||||
ok, err := claims.Get(abi.AddrKey(addr), &claim)
|
||||
if err != nil {
|
||||
return Claim{}, false, err
|
||||
}
|
||||
return Claim{
|
||||
RawBytePower: claim.RawBytePower,
|
||||
QualityAdjPower: claim.QualityAdjPower,
|
||||
}, ok, nil
|
||||
}
|
||||
|
||||
func (s *state6) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) {
|
||||
return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a)
|
||||
}
|
||||
|
||||
func (s *state6) TotalPowerSmoothed() (builtin.FilterEstimate, error) {
|
||||
return builtin.FromV6FilterEstimate(s.State.ThisEpochQAPowerSmoothed), nil
|
||||
}
|
||||
|
||||
func (s *state6) MinerCounts() (uint64, uint64, error) {
|
||||
return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil
|
||||
}
|
||||
|
||||
func (s *state6) ListAllMiners() ([]address.Address, error) {
|
||||
claims, err := s.claims()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var miners []address.Address
|
||||
err = claims.ForEach(nil, func(k string) error {
|
||||
a, err := address.NewFromBytes([]byte(k))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
miners = append(miners, a)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return miners, nil
|
||||
}
|
||||
|
||||
func (s *state6) ForEachClaim(cb func(miner address.Address, claim Claim) error) error {
|
||||
claims, err := s.claims()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var claim power6.Claim
|
||||
return claims.ForEach(&claim, func(k string) error {
|
||||
a, err := address.NewFromBytes([]byte(k))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cb(a, Claim{
|
||||
RawBytePower: claim.RawBytePower,
|
||||
QualityAdjPower: claim.QualityAdjPower,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func (s *state6) ClaimsChanged(other State) (bool, error) {
|
||||
other6, ok := other.(*state6)
|
||||
if !ok {
|
||||
// treat an upgrade as a change, always
|
||||
return true, nil
|
||||
}
|
||||
return !s.State.Claims.Equals(other6.State.Claims), nil
|
||||
}
|
||||
|
||||
func (s *state6) SetTotalQualityAdjPower(p abi.StoragePower) error {
|
||||
s.State.TotalQualityAdjPower = p
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *state6) SetTotalRawBytePower(p abi.StoragePower) error {
|
||||
s.State.TotalRawBytePower = p
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *state6) SetThisEpochQualityAdjPower(p abi.StoragePower) error {
|
||||
s.State.ThisEpochQualityAdjPower = p
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *state6) SetThisEpochRawBytePower(p abi.StoragePower) error {
|
||||
s.State.ThisEpochRawBytePower = p
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *state6) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
func (s *state6) claims() (adt.Map, error) {
|
||||
return adt6.AsMap(s.store, s.Claims, builtin6.DefaultHamtBitwidth)
|
||||
}
|
||||
|
||||
func (s *state6) decodeClaim(val *cbg.Deferred) (Claim, error) {
|
||||
var ci power6.Claim
|
||||
if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
|
||||
return Claim{}, err
|
||||
}
|
||||
return fromV6Claim(ci), nil
|
||||
}
|
||||
|
||||
func fromV6Claim(v6 power6.Claim) Claim {
|
||||
return Claim{
|
||||
RawBytePower: v6.RawBytePower,
|
||||
QualityAdjPower: v6.QualityAdjPower,
|
||||
}
|
||||
}
|
@ -19,6 +19,8 @@ import (
|
||||
|
||||
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
|
||||
|
||||
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
@ -45,11 +47,15 @@ func init() {
|
||||
builtin.RegisterActorState(builtin5.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load5(store, root)
|
||||
})
|
||||
|
||||
builtin.RegisterActorState(builtin6.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load6(store, root)
|
||||
})
|
||||
}
|
||||
|
||||
var (
|
||||
Address = builtin5.RewardActorAddr
|
||||
Methods = builtin5.MethodsReward
|
||||
Address = builtin6.RewardActorAddr
|
||||
Methods = builtin6.MethodsReward
|
||||
)
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
@ -70,6 +76,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
case builtin5.RewardActorCodeID:
|
||||
return load5(store, act.Head)
|
||||
|
||||
case builtin6.RewardActorCodeID:
|
||||
return load6(store, act.Head)
|
||||
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
@ -92,6 +101,9 @@ func MakeState(store adt.Store, av actors.Version, currRealizedPower abi.Storage
|
||||
case actors.Version5:
|
||||
return make5(store, currRealizedPower)
|
||||
|
||||
case actors.Version6:
|
||||
return make6(store, currRealizedPower)
|
||||
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||
}
|
||||
@ -114,6 +126,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
|
||||
case actors.Version5:
|
||||
return builtin5.RewardActorCodeID, nil
|
||||
|
||||
case actors.Version6:
|
||||
return builtin6.RewardActorCodeID, nil
|
||||
|
||||
}
|
||||
|
||||
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
||||
|
98
chain/actors/builtin/reward/v6.go
Normal file
98
chain/actors/builtin/reward/v6.go
Normal file
@ -0,0 +1,98 @@
|
||||
package reward
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
|
||||
miner6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/miner"
|
||||
reward6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/reward"
|
||||
smoothing6 "github.com/filecoin-project/specs-actors/v6/actors/util/smoothing"
|
||||
)
|
||||
|
||||
var _ State = (*state6)(nil)
|
||||
|
||||
func load6(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state6{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func make6(store adt.Store, currRealizedPower abi.StoragePower) (State, error) {
|
||||
out := state6{store: store}
|
||||
out.State = *reward6.ConstructState(currRealizedPower)
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state6 struct {
|
||||
reward6.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state6) ThisEpochReward() (abi.TokenAmount, error) {
|
||||
return s.State.ThisEpochReward, nil
|
||||
}
|
||||
|
||||
func (s *state6) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) {
|
||||
|
||||
return builtin.FilterEstimate{
|
||||
PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate,
|
||||
VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
func (s *state6) ThisEpochBaselinePower() (abi.StoragePower, error) {
|
||||
return s.State.ThisEpochBaselinePower, nil
|
||||
}
|
||||
|
||||
func (s *state6) TotalStoragePowerReward() (abi.TokenAmount, error) {
|
||||
return s.State.TotalStoragePowerReward, nil
|
||||
}
|
||||
|
||||
func (s *state6) EffectiveBaselinePower() (abi.StoragePower, error) {
|
||||
return s.State.EffectiveBaselinePower, nil
|
||||
}
|
||||
|
||||
func (s *state6) EffectiveNetworkTime() (abi.ChainEpoch, error) {
|
||||
return s.State.EffectiveNetworkTime, nil
|
||||
}
|
||||
|
||||
func (s *state6) CumsumBaseline() (reward6.Spacetime, error) {
|
||||
return s.State.CumsumBaseline, nil
|
||||
}
|
||||
|
||||
func (s *state6) CumsumRealized() (reward6.Spacetime, error) {
|
||||
return s.State.CumsumRealized, nil
|
||||
}
|
||||
|
||||
func (s *state6) InitialPledgeForPower(qaPower abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) {
|
||||
return miner6.InitialPledgeForPower(
|
||||
qaPower,
|
||||
s.State.ThisEpochBaselinePower,
|
||||
s.State.ThisEpochRewardSmoothed,
|
||||
smoothing6.FilterEstimate{
|
||||
PositionEstimate: networkQAPower.PositionEstimate,
|
||||
VelocityEstimate: networkQAPower.VelocityEstimate,
|
||||
},
|
||||
circSupply,
|
||||
), nil
|
||||
}
|
||||
|
||||
func (s *state6) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) {
|
||||
return miner6.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed,
|
||||
smoothing6.FilterEstimate{
|
||||
PositionEstimate: networkQAPower.PositionEstimate,
|
||||
VelocityEstimate: networkQAPower.VelocityEstimate,
|
||||
},
|
||||
sectorWeight), nil
|
||||
}
|
||||
|
||||
func (s *state6) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
@ -15,10 +15,12 @@ import (
|
||||
builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
|
||||
|
||||
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
|
||||
|
||||
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||
)
|
||||
|
||||
var (
|
||||
Address = builtin5.SystemActorAddr
|
||||
Address = builtin6.SystemActorAddr
|
||||
)
|
||||
|
||||
func MakeState(store adt.Store, av actors.Version) (State, error) {
|
||||
@ -39,6 +41,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) {
|
||||
case actors.Version5:
|
||||
return make5(store)
|
||||
|
||||
case actors.Version6:
|
||||
return make6(store)
|
||||
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||
}
|
||||
@ -61,6 +66,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
|
||||
case actors.Version5:
|
||||
return builtin5.SystemActorCodeID, nil
|
||||
|
||||
case actors.Version6:
|
||||
return builtin6.SystemActorCodeID, nil
|
||||
|
||||
}
|
||||
|
||||
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
||||
|
35
chain/actors/builtin/system/v6.go
Normal file
35
chain/actors/builtin/system/v6.go
Normal file
@ -0,0 +1,35 @@
|
||||
package system
|
||||
|
||||
import (
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
|
||||
system6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/system"
|
||||
)
|
||||
|
||||
var _ State = (*state6)(nil)
|
||||
|
||||
func load6(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state6{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func make6(store adt.Store) (State, error) {
|
||||
out := state6{store: store}
|
||||
out.State = system6.State{}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state6 struct {
|
||||
system6.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state6) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
75
chain/actors/builtin/verifreg/v6.go
Normal file
75
chain/actors/builtin/verifreg/v6.go
Normal file
@ -0,0 +1,75 @@
|
||||
package verifreg
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
|
||||
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||
verifreg6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/verifreg"
|
||||
adt6 "github.com/filecoin-project/specs-actors/v6/actors/util/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state6)(nil)
|
||||
|
||||
func load6(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state6{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func make6(store adt.Store, rootKeyAddress address.Address) (State, error) {
|
||||
out := state6{store: store}
|
||||
|
||||
s, err := verifreg6.ConstructState(store, rootKeyAddress)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out.State = *s
|
||||
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state6 struct {
|
||||
verifreg6.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state6) RootKey() (address.Address, error) {
|
||||
return s.State.RootKey, nil
|
||||
}
|
||||
|
||||
func (s *state6) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) {
|
||||
return getDataCap(s.store, actors.Version6, s.verifiedClients, addr)
|
||||
}
|
||||
|
||||
func (s *state6) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) {
|
||||
return getDataCap(s.store, actors.Version6, s.verifiers, addr)
|
||||
}
|
||||
|
||||
func (s *state6) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||
return forEachCap(s.store, actors.Version6, s.verifiers, cb)
|
||||
}
|
||||
|
||||
func (s *state6) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||
return forEachCap(s.store, actors.Version6, s.verifiedClients, cb)
|
||||
}
|
||||
|
||||
func (s *state6) verifiedClients() (adt.Map, error) {
|
||||
return adt6.AsMap(s.store, s.VerifiedClients, builtin6.DefaultHamtBitwidth)
|
||||
}
|
||||
|
||||
func (s *state6) verifiers() (adt.Map, error) {
|
||||
return adt6.AsMap(s.store, s.Verifiers, builtin6.DefaultHamtBitwidth)
|
||||
}
|
||||
|
||||
func (s *state6) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
@ -19,6 +19,8 @@ import (
|
||||
|
||||
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
|
||||
|
||||
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
@ -47,11 +49,15 @@ func init() {
|
||||
return load5(store, root)
|
||||
})
|
||||
|
||||
builtin.RegisterActorState(builtin6.VerifiedRegistryActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load6(store, root)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
var (
|
||||
Address = builtin5.VerifiedRegistryActorAddr
|
||||
Methods = builtin5.MethodsVerifiedRegistry
|
||||
Address = builtin6.VerifiedRegistryActorAddr
|
||||
Methods = builtin6.MethodsVerifiedRegistry
|
||||
)
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
@ -72,6 +78,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
case builtin5.VerifiedRegistryActorCodeID:
|
||||
return load5(store, act.Head)
|
||||
|
||||
case builtin6.VerifiedRegistryActorCodeID:
|
||||
return load6(store, act.Head)
|
||||
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
@ -94,6 +103,9 @@ func MakeState(store adt.Store, av actors.Version, rootKeyAddress address.Addres
|
||||
case actors.Version5:
|
||||
return make5(store, rootKeyAddress)
|
||||
|
||||
case actors.Version6:
|
||||
return make6(store, rootKeyAddress)
|
||||
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||
}
|
||||
@ -116,6 +128,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
|
||||
case actors.Version5:
|
||||
return builtin5.VerifiedRegistryActorCodeID, nil
|
||||
|
||||
case actors.Version6:
|
||||
return builtin6.VerifiedRegistryActorCodeID, nil
|
||||
|
||||
}
|
||||
|
||||
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
||||
|
@ -35,14 +35,19 @@ import (
|
||||
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
|
||||
verifreg5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/verifreg"
|
||||
|
||||
paych5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/paych"
|
||||
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||
market6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/market"
|
||||
miner6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/miner"
|
||||
verifreg6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/verifreg"
|
||||
|
||||
paych6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/paych"
|
||||
)
|
||||
|
||||
const (
|
||||
ChainFinality = miner5.ChainFinality
|
||||
ChainFinality = miner6.ChainFinality
|
||||
SealRandomnessLookback = ChainFinality
|
||||
PaychSettleDelay = paych5.SettleDelay
|
||||
MaxPreCommitRandomnessLookback = builtin5.EpochsInDay + SealRandomnessLookback
|
||||
PaychSettleDelay = paych6.SettleDelay
|
||||
MaxPreCommitRandomnessLookback = builtin6.EpochsInDay + SealRandomnessLookback
|
||||
)
|
||||
|
||||
// SetSupportedProofTypes sets supported proof types, across all actor versions.
|
||||
@ -65,6 +70,8 @@ func SetSupportedProofTypes(types ...abi.RegisteredSealProof) {
|
||||
|
||||
miner5.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
|
||||
|
||||
miner6.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
|
||||
|
||||
AddSupportedProofTypes(types...)
|
||||
}
|
||||
|
||||
@ -103,6 +110,15 @@ func AddSupportedProofTypes(types ...abi.RegisteredSealProof) {
|
||||
|
||||
miner5.WindowPoStProofTypes[wpp] = struct{}{}
|
||||
|
||||
miner6.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
|
||||
wpp, err = t.RegisteredWindowPoStProof()
|
||||
if err != nil {
|
||||
// Fine to panic, this is a test-only method
|
||||
panic(err)
|
||||
}
|
||||
|
||||
miner6.WindowPoStProofTypes[wpp] = struct{}{}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@ -121,11 +137,13 @@ func SetPreCommitChallengeDelay(delay abi.ChainEpoch) {
|
||||
|
||||
miner5.PreCommitChallengeDelay = delay
|
||||
|
||||
miner6.PreCommitChallengeDelay = delay
|
||||
|
||||
}
|
||||
|
||||
// TODO: this function shouldn't really exist. Instead, the API should expose the precommit delay.
|
||||
func GetPreCommitChallengeDelay() abi.ChainEpoch {
|
||||
return miner5.PreCommitChallengeDelay
|
||||
return miner6.PreCommitChallengeDelay
|
||||
}
|
||||
|
||||
// SetConsensusMinerMinPower sets the minimum power of an individual miner must
|
||||
@ -151,6 +169,10 @@ func SetConsensusMinerMinPower(p abi.StoragePower) {
|
||||
policy.ConsensusMinerMinPower = p
|
||||
}
|
||||
|
||||
for _, policy := range builtin6.PoStProofPolicies {
|
||||
policy.ConsensusMinerMinPower = p
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// SetMinVerifiedDealSize sets the minimum size of a verified deal. This should
|
||||
@ -167,6 +189,8 @@ func SetMinVerifiedDealSize(size abi.StoragePower) {
|
||||
|
||||
verifreg5.MinVerifiedDealSize = size
|
||||
|
||||
verifreg6.MinVerifiedDealSize = size
|
||||
|
||||
}
|
||||
|
||||
func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) (abi.ChainEpoch, error) {
|
||||
@ -192,6 +216,10 @@ func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) (a
|
||||
|
||||
return miner5.MaxProveCommitDuration[t], nil
|
||||
|
||||
case actors.Version6:
|
||||
|
||||
return miner6.MaxProveCommitDuration[t], nil
|
||||
|
||||
default:
|
||||
return 0, xerrors.Errorf("unsupported actors version")
|
||||
}
|
||||
@ -222,6 +250,11 @@ func SetProviderCollateralSupplyTarget(num, denom big.Int) {
|
||||
Denominator: denom,
|
||||
}
|
||||
|
||||
market6.ProviderCollateralSupplyTarget = builtin6.BigFrac{
|
||||
Numerator: num,
|
||||
Denominator: denom,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func DealProviderCollateralBounds(
|
||||
@ -260,13 +293,18 @@ func DealProviderCollateralBounds(
|
||||
min, max := market5.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
|
||||
return min, max, nil
|
||||
|
||||
case actors.Version6:
|
||||
|
||||
min, max := market6.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
|
||||
return min, max, nil
|
||||
|
||||
default:
|
||||
return big.Zero(), big.Zero(), xerrors.Errorf("unsupported actors version")
|
||||
}
|
||||
}
|
||||
|
||||
func DealDurationBounds(pieceSize abi.PaddedPieceSize) (min, max abi.ChainEpoch) {
|
||||
return market5.DealDurationBounds(pieceSize)
|
||||
return market6.DealDurationBounds(pieceSize)
|
||||
}
|
||||
|
||||
// Sets the challenge window and scales the proving period to match (such that
|
||||
@ -300,6 +338,13 @@ func SetWPoStChallengeWindow(period abi.ChainEpoch) {
|
||||
// scale it if we're scaling the challenge period.
|
||||
miner5.WPoStDisputeWindow = period * 30
|
||||
|
||||
miner6.WPoStChallengeWindow = period
|
||||
miner6.WPoStProvingPeriod = period * abi.ChainEpoch(miner6.WPoStPeriodDeadlines)
|
||||
|
||||
// by default, this is 2x finality which is 30 periods.
|
||||
// scale it if we're scaling the challenge period.
|
||||
miner6.WPoStDisputeWindow = period * 30
|
||||
|
||||
}
|
||||
|
||||
func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch {
|
||||
@ -312,15 +357,15 @@ func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch {
|
||||
}
|
||||
|
||||
func GetMaxSectorExpirationExtension() abi.ChainEpoch {
|
||||
return miner5.MaxSectorExpirationExtension
|
||||
return miner6.MaxSectorExpirationExtension
|
||||
}
|
||||
|
||||
func GetMinSectorExpiration() abi.ChainEpoch {
|
||||
return miner5.MinSectorExpiration
|
||||
return miner6.MinSectorExpiration
|
||||
}
|
||||
|
||||
func GetMaxPoStPartitions(nv network.Version, p abi.RegisteredPoStProof) (int, error) {
|
||||
sectorsPerPart, err := builtin5.PoStProofWindowPoStPartitionSectors(p)
|
||||
sectorsPerPart, err := builtin6.PoStProofWindowPoStPartitionSectors(p)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@ -333,8 +378,8 @@ func GetMaxPoStPartitions(nv network.Version, p abi.RegisteredPoStProof) (int, e
|
||||
|
||||
func GetDefaultSectorSize() abi.SectorSize {
|
||||
// supported sector sizes are the same across versions.
|
||||
szs := make([]abi.SectorSize, 0, len(miner5.PreCommitSealProofTypesV8))
|
||||
for spt := range miner5.PreCommitSealProofTypesV8 {
|
||||
szs := make([]abi.SectorSize, 0, len(miner6.PreCommitSealProofTypesV8))
|
||||
for spt := range miner6.PreCommitSealProofTypesV8 {
|
||||
ss, err := spt.SectorSize()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@ -359,7 +404,7 @@ func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version)
|
||||
return builtin4.SealProofPoliciesV0[proof].SectorMaxLifetime
|
||||
}
|
||||
|
||||
return builtin5.SealProofPoliciesV11[proof].SectorMaxLifetime
|
||||
return builtin6.SealProofPoliciesV11[proof].SectorMaxLifetime
|
||||
}
|
||||
|
||||
func GetAddressedSectorsMax(nwVer network.Version) (int, error) {
|
||||
@ -384,6 +429,9 @@ func GetAddressedSectorsMax(nwVer network.Version) (int, error) {
|
||||
case actors.Version5:
|
||||
return miner5.AddressedSectorsMax, nil
|
||||
|
||||
case actors.Version6:
|
||||
return miner6.AddressedSectorsMax, nil
|
||||
|
||||
default:
|
||||
return 0, xerrors.Errorf("unsupported network version")
|
||||
}
|
||||
@ -417,12 +465,16 @@ func GetDeclarationsMax(nwVer network.Version) (int, error) {
|
||||
|
||||
return miner5.DeclarationsMax, nil
|
||||
|
||||
case actors.Version6:
|
||||
|
||||
return miner6.DeclarationsMax, nil
|
||||
|
||||
default:
|
||||
return 0, xerrors.Errorf("unsupported network version")
|
||||
}
|
||||
}
|
||||
|
||||
func AggregateNetworkFee(nwVer network.Version, aggregateSize int, baseFee abi.TokenAmount) (abi.TokenAmount, error) {
|
||||
func AggregateProveCommitNetworkFee(nwVer network.Version, aggregateSize int, baseFee abi.TokenAmount) (abi.TokenAmount, error) {
|
||||
v, err := actors.VersionForNetwork(nwVer)
|
||||
if err != nil {
|
||||
return big.Zero(), err
|
||||
@ -449,6 +501,46 @@ func AggregateNetworkFee(nwVer network.Version, aggregateSize int, baseFee abi.T
|
||||
|
||||
return miner5.AggregateNetworkFee(aggregateSize, baseFee), nil
|
||||
|
||||
case actors.Version6:
|
||||
|
||||
return miner6.AggregateProveCommitNetworkFee(aggregateSize, baseFee), nil
|
||||
|
||||
default:
|
||||
return big.Zero(), xerrors.Errorf("unsupported network version")
|
||||
}
|
||||
}
|
||||
|
||||
func AggregatePreCommitNetworkFee(nwVer network.Version, aggregateSize int, baseFee abi.TokenAmount) (abi.TokenAmount, error) {
|
||||
v, err := actors.VersionForNetwork(nwVer)
|
||||
if err != nil {
|
||||
return big.Zero(), err
|
||||
}
|
||||
switch v {
|
||||
|
||||
case actors.Version0:
|
||||
|
||||
return big.Zero(), nil
|
||||
|
||||
case actors.Version2:
|
||||
|
||||
return big.Zero(), nil
|
||||
|
||||
case actors.Version3:
|
||||
|
||||
return big.Zero(), nil
|
||||
|
||||
case actors.Version4:
|
||||
|
||||
return big.Zero(), nil
|
||||
|
||||
case actors.Version5:
|
||||
|
||||
return big.Zero(), nil
|
||||
|
||||
case actors.Version6:
|
||||
|
||||
return miner6.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil
|
||||
|
||||
default:
|
||||
return big.Zero(), xerrors.Errorf("unsupported network version")
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ func AddSupportedProofTypes(types ...abi.RegisteredSealProof) {
|
||||
miner{{.}}.PreCommitSealProofTypesV7[t] = struct{}{}
|
||||
miner{{.}}.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
|
||||
miner{{.}}.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
|
||||
{{else}}
|
||||
{{else if (eq . 5)}}
|
||||
miner{{.}}.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
|
||||
wpp, err := t.RegisteredWindowPoStProof()
|
||||
if err != nil {
|
||||
@ -71,6 +71,15 @@ func AddSupportedProofTypes(types ...abi.RegisteredSealProof) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
miner{{.}}.WindowPoStProofTypes[wpp] = struct{}{}
|
||||
{{else}}
|
||||
miner{{.}}.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
|
||||
wpp, err = t.RegisteredWindowPoStProof()
|
||||
if err != nil {
|
||||
// Fine to panic, this is a test-only method
|
||||
panic(err)
|
||||
}
|
||||
|
||||
miner{{.}}.WindowPoStProofTypes[wpp] = struct{}{}
|
||||
{{end}}
|
||||
{{end}}
|
||||
@ -285,7 +294,7 @@ func GetDeclarationsMax(nwVer network.Version) (int, error) {
|
||||
}
|
||||
}
|
||||
|
||||
func AggregateNetworkFee(nwVer network.Version, aggregateSize int, baseFee abi.TokenAmount) (abi.TokenAmount, error) {
|
||||
func AggregateProveCommitNetworkFee(nwVer network.Version, aggregateSize int, baseFee abi.TokenAmount) (abi.TokenAmount, error) {
|
||||
v, err := actors.VersionForNetwork(nwVer)
|
||||
if err != nil {
|
||||
return big.Zero(), err
|
||||
@ -293,10 +302,31 @@ func AggregateNetworkFee(nwVer network.Version, aggregateSize int, baseFee abi.T
|
||||
switch v {
|
||||
{{range .versions}}
|
||||
case actors.Version{{.}}:
|
||||
{{if (le . 4)}}
|
||||
return big.Zero(), nil
|
||||
{{else}}
|
||||
{{if (ge . 6)}}
|
||||
return miner{{.}}.AggregateProveCommitNetworkFee(aggregateSize, baseFee), nil
|
||||
{{else if (eq . 5)}}
|
||||
return miner{{.}}.AggregateNetworkFee(aggregateSize, baseFee), nil
|
||||
{{else}}
|
||||
return big.Zero(), nil
|
||||
{{end}}
|
||||
{{end}}
|
||||
default:
|
||||
return big.Zero(), xerrors.Errorf("unsupported network version")
|
||||
}
|
||||
}
|
||||
|
||||
func AggregatePreCommitNetworkFee(nwVer network.Version, aggregateSize int, baseFee abi.TokenAmount) (abi.TokenAmount, error) {
|
||||
v, err := actors.VersionForNetwork(nwVer)
|
||||
if err != nil {
|
||||
return big.Zero(), err
|
||||
}
|
||||
switch v {
|
||||
{{range .versions}}
|
||||
case actors.Version{{.}}:
|
||||
{{if (ge . 6)}}
|
||||
return miner{{.}}.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil
|
||||
{{else}}
|
||||
return big.Zero(), nil
|
||||
{{end}}
|
||||
{{end}}
|
||||
default:
|
||||
|
@ -8,9 +8,21 @@ import (
|
||||
|
||||
type Version int
|
||||
|
||||
var LatestVersion = 5
|
||||
/* inline-gen template
|
||||
|
||||
var Versions = []int{0, 2, 3, 4, LatestVersion}
|
||||
var LatestVersion = {{.latestActorsVersion}}
|
||||
|
||||
var Versions = []int{ {{range .actorVersions}} {{.}}, {{end}} }
|
||||
|
||||
const ({{range .actorVersions}}
|
||||
Version{{.}} Version = {{.}}{{end}}
|
||||
)
|
||||
|
||||
/* inline-gen start */
|
||||
|
||||
var LatestVersion = 6
|
||||
|
||||
var Versions = []int{0, 2, 3, 4, 5, 6}
|
||||
|
||||
const (
|
||||
Version0 Version = 0
|
||||
@ -18,8 +30,11 @@ const (
|
||||
Version3 Version = 3
|
||||
Version4 Version = 4
|
||||
Version5 Version = 5
|
||||
Version6 Version = 6
|
||||
)
|
||||
|
||||
/* inline-gen end */
|
||||
|
||||
// Converts a network version into an actors adt version.
|
||||
func VersionForNetwork(version network.Version) (Version, error) {
|
||||
switch version {
|
||||
@ -33,6 +48,8 @@ func VersionForNetwork(version network.Version) (Version, error) {
|
||||
return Version4, nil
|
||||
case network.Version13:
|
||||
return Version5, nil
|
||||
case network.Version14:
|
||||
return Version6, nil
|
||||
default:
|
||||
return -1, fmt.Errorf("unsupported network version %d", version)
|
||||
}
|
||||
|
@ -177,6 +177,11 @@ func (db *DrandBeacon) VerifyEntry(curr types.BeaconEntry, prev types.BeaconEntr
|
||||
// TODO handle genesis better
|
||||
return nil
|
||||
}
|
||||
|
||||
if curr.Round != prev.Round+1 {
|
||||
return xerrors.Errorf("invalid beacon entry: cur (%d) != prev (%d) + 1", curr.Round, prev.Round)
|
||||
}
|
||||
|
||||
if be := db.getCachedValue(curr.Round); be != nil {
|
||||
if !bytes.Equal(curr.Data, be.Data) {
|
||||
return xerrors.New("invalid beacon value, does not match cached good value")
|
||||
|
@ -54,7 +54,8 @@ func (mb *mockBeacon) VerifyEntry(from types.BeaconEntry, to types.BeaconEntry)
|
||||
}
|
||||
|
||||
func (mb *mockBeacon) MaxBeaconRoundForEpoch(epoch abi.ChainEpoch) uint64 {
|
||||
return uint64(epoch)
|
||||
// offset for better testing
|
||||
return uint64(epoch + 100)
|
||||
}
|
||||
|
||||
var _ RandomBeacon = (*mockBeacon)(nil)
|
||||
|
316
chain/consensus/filcns/compute_state.go
Normal file
316
chain/consensus/filcns/compute_state.go
Normal file
@ -0,0 +1,316 @@
|
||||
package filcns
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/rand"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"go.opencensus.io/stats"
|
||||
"go.opencensus.io/trace"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||
|
||||
/* inline-gen template
|
||||
{{range .actorVersions}}
|
||||
exported{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin/exported"{{end}}
|
||||
|
||||
/* inline-gen start */
|
||||
|
||||
exported0 "github.com/filecoin-project/specs-actors/actors/builtin/exported"
|
||||
exported2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/exported"
|
||||
exported3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/exported"
|
||||
exported4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/exported"
|
||||
exported5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/exported"
|
||||
exported6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/exported"
|
||||
|
||||
/* inline-gen end */
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/cron"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/reward"
|
||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/vm"
|
||||
"github.com/filecoin-project/lotus/metrics"
|
||||
)
|
||||
|
||||
func NewActorRegistry() *vm.ActorRegistry {
|
||||
inv := vm.NewActorRegistry()
|
||||
|
||||
// TODO: define all these properties on the actors themselves, in specs-actors.
|
||||
/* inline-gen template
|
||||
{{range .actorVersions}}
|
||||
inv.Register(vm.ActorsVersionPredicate(actors.Version{{.}}), exported{{.}}.BuiltinActors()...){{end}}
|
||||
|
||||
/* inline-gen start */
|
||||
|
||||
inv.Register(vm.ActorsVersionPredicate(actors.Version0), exported0.BuiltinActors()...)
|
||||
inv.Register(vm.ActorsVersionPredicate(actors.Version2), exported2.BuiltinActors()...)
|
||||
inv.Register(vm.ActorsVersionPredicate(actors.Version3), exported3.BuiltinActors()...)
|
||||
inv.Register(vm.ActorsVersionPredicate(actors.Version4), exported4.BuiltinActors()...)
|
||||
inv.Register(vm.ActorsVersionPredicate(actors.Version5), exported5.BuiltinActors()...)
|
||||
inv.Register(vm.ActorsVersionPredicate(actors.Version6), exported6.BuiltinActors()...)
|
||||
|
||||
/* inline-gen end */
|
||||
|
||||
return inv
|
||||
}
|
||||
|
||||
type TipSetExecutor struct{}
|
||||
|
||||
func NewTipSetExecutor() *TipSetExecutor {
|
||||
return &TipSetExecutor{}
|
||||
}
|
||||
|
||||
func (t *TipSetExecutor) NewActorRegistry() *vm.ActorRegistry {
|
||||
return NewActorRegistry()
|
||||
}
|
||||
|
||||
type FilecoinBlockMessages struct {
|
||||
store.BlockMessages
|
||||
|
||||
WinCount int64
|
||||
}
|
||||
|
||||
func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, sm *stmgr.StateManager, parentEpoch abi.ChainEpoch, pstate cid.Cid, bms []FilecoinBlockMessages, epoch abi.ChainEpoch, r vm.Rand, em stmgr.ExecMonitor, baseFee abi.TokenAmount, ts *types.TipSet) (cid.Cid, cid.Cid, error) {
|
||||
done := metrics.Timer(ctx, metrics.VMApplyBlocksTotal)
|
||||
defer done()
|
||||
|
||||
partDone := metrics.Timer(ctx, metrics.VMApplyEarly)
|
||||
defer func() {
|
||||
partDone()
|
||||
}()
|
||||
|
||||
makeVmWithBaseState := func(base cid.Cid) (*vm.VM, error) {
|
||||
vmopt := &vm.VMOpts{
|
||||
StateBase: base,
|
||||
Epoch: epoch,
|
||||
Rand: r,
|
||||
Bstore: sm.ChainStore().StateBlockstore(),
|
||||
Actors: NewActorRegistry(),
|
||||
Syscalls: sm.Syscalls,
|
||||
CircSupplyCalc: sm.GetVMCirculatingSupply,
|
||||
NtwkVersion: sm.GetNtwkVersion,
|
||||
BaseFee: baseFee,
|
||||
LookbackState: stmgr.LookbackStateGetterForTipset(sm, ts),
|
||||
}
|
||||
|
||||
return sm.VMConstructor()(ctx, vmopt)
|
||||
}
|
||||
|
||||
vmi, err := makeVmWithBaseState(pstate)
|
||||
if err != nil {
|
||||
return cid.Undef, cid.Undef, xerrors.Errorf("making vm: %w", err)
|
||||
}
|
||||
|
||||
runCron := func(epoch abi.ChainEpoch) error {
|
||||
cronMsg := &types.Message{
|
||||
To: cron.Address,
|
||||
From: builtin.SystemActorAddr,
|
||||
Nonce: uint64(epoch),
|
||||
Value: types.NewInt(0),
|
||||
GasFeeCap: types.NewInt(0),
|
||||
GasPremium: types.NewInt(0),
|
||||
GasLimit: build.BlockGasLimit * 10000, // Make super sure this is never too little
|
||||
Method: cron.Methods.EpochTick,
|
||||
Params: nil,
|
||||
}
|
||||
ret, err := vmi.ApplyImplicitMessage(ctx, cronMsg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if em != nil {
|
||||
if err := em.MessageApplied(ctx, ts, cronMsg.Cid(), cronMsg, ret, true); err != nil {
|
||||
return xerrors.Errorf("callback failed on cron message: %w", err)
|
||||
}
|
||||
}
|
||||
if ret.ExitCode != 0 {
|
||||
return xerrors.Errorf("CheckProofSubmissions exit was non-zero: %d", ret.ExitCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := parentEpoch; i < epoch; i++ {
|
||||
if i > parentEpoch {
|
||||
// run cron for null rounds if any
|
||||
if err := runCron(i); err != nil {
|
||||
return cid.Undef, cid.Undef, err
|
||||
}
|
||||
|
||||
pstate, err = vmi.Flush(ctx)
|
||||
if err != nil {
|
||||
return cid.Undef, cid.Undef, xerrors.Errorf("flushing vm: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// handle state forks
|
||||
// XXX: The state tree
|
||||
newState, err := sm.HandleStateForks(ctx, pstate, i, em, ts)
|
||||
if err != nil {
|
||||
return cid.Undef, cid.Undef, xerrors.Errorf("error handling state forks: %w", err)
|
||||
}
|
||||
|
||||
if pstate != newState {
|
||||
vmi, err = makeVmWithBaseState(newState)
|
||||
if err != nil {
|
||||
return cid.Undef, cid.Undef, xerrors.Errorf("making vm: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
vmi.SetBlockHeight(i + 1)
|
||||
pstate = newState
|
||||
}
|
||||
|
||||
partDone()
|
||||
partDone = metrics.Timer(ctx, metrics.VMApplyMessages)
|
||||
|
||||
var receipts []cbg.CBORMarshaler
|
||||
processedMsgs := make(map[cid.Cid]struct{})
|
||||
for _, b := range bms {
|
||||
penalty := types.NewInt(0)
|
||||
gasReward := big.Zero()
|
||||
|
||||
for _, cm := range append(b.BlsMessages, b.SecpkMessages...) {
|
||||
m := cm.VMMessage()
|
||||
if _, found := processedMsgs[m.Cid()]; found {
|
||||
continue
|
||||
}
|
||||
r, err := vmi.ApplyMessage(ctx, cm)
|
||||
if err != nil {
|
||||
return cid.Undef, cid.Undef, err
|
||||
}
|
||||
|
||||
receipts = append(receipts, &r.MessageReceipt)
|
||||
gasReward = big.Add(gasReward, r.GasCosts.MinerTip)
|
||||
penalty = big.Add(penalty, r.GasCosts.MinerPenalty)
|
||||
|
||||
if em != nil {
|
||||
if err := em.MessageApplied(ctx, ts, cm.Cid(), m, r, false); err != nil {
|
||||
return cid.Undef, cid.Undef, err
|
||||
}
|
||||
}
|
||||
processedMsgs[m.Cid()] = struct{}{}
|
||||
}
|
||||
|
||||
params, err := actors.SerializeParams(&reward.AwardBlockRewardParams{
|
||||
Miner: b.Miner,
|
||||
Penalty: penalty,
|
||||
GasReward: gasReward,
|
||||
WinCount: b.WinCount,
|
||||
})
|
||||
if err != nil {
|
||||
return cid.Undef, cid.Undef, xerrors.Errorf("failed to serialize award params: %w", err)
|
||||
}
|
||||
|
||||
rwMsg := &types.Message{
|
||||
From: builtin.SystemActorAddr,
|
||||
To: reward.Address,
|
||||
Nonce: uint64(epoch),
|
||||
Value: types.NewInt(0),
|
||||
GasFeeCap: types.NewInt(0),
|
||||
GasPremium: types.NewInt(0),
|
||||
GasLimit: 1 << 30,
|
||||
Method: reward.Methods.AwardBlockReward,
|
||||
Params: params,
|
||||
}
|
||||
ret, actErr := vmi.ApplyImplicitMessage(ctx, rwMsg)
|
||||
if actErr != nil {
|
||||
return cid.Undef, cid.Undef, xerrors.Errorf("failed to apply reward message for miner %s: %w", b.Miner, actErr)
|
||||
}
|
||||
if em != nil {
|
||||
if err := em.MessageApplied(ctx, ts, rwMsg.Cid(), rwMsg, ret, true); err != nil {
|
||||
return cid.Undef, cid.Undef, xerrors.Errorf("callback failed on reward message: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if ret.ExitCode != 0 {
|
||||
return cid.Undef, cid.Undef, xerrors.Errorf("reward application message failed (exit %d): %s", ret.ExitCode, ret.ActorErr)
|
||||
}
|
||||
}
|
||||
|
||||
partDone()
|
||||
partDone = metrics.Timer(ctx, metrics.VMApplyCron)
|
||||
|
||||
if err := runCron(epoch); err != nil {
|
||||
return cid.Cid{}, cid.Cid{}, err
|
||||
}
|
||||
|
||||
partDone()
|
||||
partDone = metrics.Timer(ctx, metrics.VMApplyFlush)
|
||||
|
||||
rectarr := blockadt.MakeEmptyArray(sm.ChainStore().ActorStore(ctx))
|
||||
for i, receipt := range receipts {
|
||||
if err := rectarr.Set(uint64(i), receipt); err != nil {
|
||||
return cid.Undef, cid.Undef, xerrors.Errorf("failed to build receipts amt: %w", err)
|
||||
}
|
||||
}
|
||||
rectroot, err := rectarr.Root()
|
||||
if err != nil {
|
||||
return cid.Undef, cid.Undef, xerrors.Errorf("failed to build receipts amt: %w", err)
|
||||
}
|
||||
|
||||
st, err := vmi.Flush(ctx)
|
||||
if err != nil {
|
||||
return cid.Undef, cid.Undef, xerrors.Errorf("vm flush failed: %w", err)
|
||||
}
|
||||
|
||||
stats.Record(ctx, metrics.VMSends.M(int64(atomic.LoadUint64(&vm.StatSends))),
|
||||
metrics.VMApplied.M(int64(atomic.LoadUint64(&vm.StatApplied))))
|
||||
|
||||
return st, rectroot, nil
|
||||
}
|
||||
|
||||
func (t *TipSetExecutor) ExecuteTipSet(ctx context.Context, sm *stmgr.StateManager, ts *types.TipSet, em stmgr.ExecMonitor) (stateroot cid.Cid, rectsroot cid.Cid, err error) {
|
||||
ctx, span := trace.StartSpan(ctx, "computeTipSetState")
|
||||
defer span.End()
|
||||
|
||||
blks := ts.Blocks()
|
||||
|
||||
for i := 0; i < len(blks); i++ {
|
||||
for j := i + 1; j < len(blks); j++ {
|
||||
if blks[i].Miner == blks[j].Miner {
|
||||
return cid.Undef, cid.Undef,
|
||||
xerrors.Errorf("duplicate miner in a tipset (%s %s)",
|
||||
blks[i].Miner, blks[j].Miner)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var parentEpoch abi.ChainEpoch
|
||||
pstate := blks[0].ParentStateRoot
|
||||
if blks[0].Height > 0 {
|
||||
parent, err := sm.ChainStore().GetBlock(blks[0].Parents[0])
|
||||
if err != nil {
|
||||
return cid.Undef, cid.Undef, xerrors.Errorf("getting parent block: %w", err)
|
||||
}
|
||||
|
||||
parentEpoch = parent.Height
|
||||
}
|
||||
|
||||
r := rand.NewStateRand(sm.ChainStore(), ts.Cids(), sm.Beacon())
|
||||
|
||||
blkmsgs, err := sm.ChainStore().BlockMsgsForTipset(ts)
|
||||
if err != nil {
|
||||
return cid.Undef, cid.Undef, xerrors.Errorf("getting block messages for tipset: %w", err)
|
||||
}
|
||||
fbmsgs := make([]FilecoinBlockMessages, len(blkmsgs))
|
||||
for i := range fbmsgs {
|
||||
fbmsgs[i].BlockMessages = blkmsgs[i]
|
||||
fbmsgs[i].WinCount = ts.Blocks()[i].ElectionProof.WinCount
|
||||
}
|
||||
baseFee := blks[0].ParentBaseFee
|
||||
|
||||
return t.ApplyBlocks(ctx, sm, parentEpoch, pstate, fbmsgs, blks[0].Height, r, em, baseFee, ts)
|
||||
}
|
||||
|
||||
var _ stmgr.Executor = &TipSetExecutor{}
|
855
chain/consensus/filcns/filecoin.go
Normal file
855
chain/consensus/filcns/filecoin.go
Normal file
@ -0,0 +1,855 @@
|
||||
package filcns
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/rand"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/ipfs/go-cid"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"go.opencensus.io/stats"
|
||||
"go.opencensus.io/trace"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
|
||||
|
||||
bstore "github.com/filecoin-project/lotus/blockstore"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
|
||||
"github.com/filecoin-project/lotus/chain/beacon"
|
||||
"github.com/filecoin-project/lotus/chain/consensus"
|
||||
"github.com/filecoin-project/lotus/chain/state"
|
||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/vm"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/lib/async"
|
||||
"github.com/filecoin-project/lotus/lib/sigs"
|
||||
"github.com/filecoin-project/lotus/metrics"
|
||||
)
|
||||
|
||||
var log = logging.Logger("fil-consensus")
|
||||
|
||||
type FilecoinEC struct {
|
||||
// The interface for accessing and putting tipsets into local storage
|
||||
store *store.ChainStore
|
||||
|
||||
// handle to the random beacon for verification
|
||||
beacon beacon.Schedule
|
||||
|
||||
// the state manager handles making state queries
|
||||
sm *stmgr.StateManager
|
||||
|
||||
verifier ffiwrapper.Verifier
|
||||
|
||||
genesis *types.TipSet
|
||||
}
|
||||
|
||||
// Blocks that are more than MaxHeightDrift epochs above
|
||||
// the theoretical max height based on systime are quickly rejected
|
||||
const MaxHeightDrift = 5
|
||||
|
||||
func NewFilecoinExpectedConsensus(sm *stmgr.StateManager, beacon beacon.Schedule, verifier ffiwrapper.Verifier, genesis chain.Genesis) consensus.Consensus {
|
||||
if build.InsecurePoStValidation {
|
||||
log.Warn("*********************************************************************************************")
|
||||
log.Warn(" [INSECURE-POST-VALIDATION] Insecure test validation is enabled. If you see this outside of a test, it is a severe bug! ")
|
||||
log.Warn("*********************************************************************************************")
|
||||
}
|
||||
|
||||
return &FilecoinEC{
|
||||
store: sm.ChainStore(),
|
||||
beacon: beacon,
|
||||
sm: sm,
|
||||
verifier: verifier,
|
||||
genesis: genesis,
|
||||
}
|
||||
}
|
||||
|
||||
func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock) (err error) {
|
||||
if err := blockSanityChecks(b.Header); err != nil {
|
||||
return xerrors.Errorf("incoming header failed basic sanity checks: %w", err)
|
||||
}
|
||||
|
||||
h := b.Header
|
||||
|
||||
baseTs, err := filec.store.LoadTipSet(types.NewTipSetKey(h.Parents...))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("load parent tipset failed (%s): %w", h.Parents, err)
|
||||
}
|
||||
|
||||
winPoStNv := filec.sm.GetNtwkVersion(ctx, baseTs.Height())
|
||||
|
||||
lbts, lbst, err := stmgr.GetLookbackTipSetForRound(ctx, filec.sm, baseTs, h.Height)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to get lookback tipset for block: %w", err)
|
||||
}
|
||||
|
||||
prevBeacon, err := filec.store.GetLatestBeaconEntry(baseTs)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to get latest beacon entry: %w", err)
|
||||
}
|
||||
|
||||
// fast checks first
|
||||
if h.Height <= baseTs.Height() {
|
||||
return xerrors.Errorf("block height not greater than parent height: %d != %d", h.Height, baseTs.Height())
|
||||
}
|
||||
|
||||
nulls := h.Height - (baseTs.Height() + 1)
|
||||
if tgtTs := baseTs.MinTimestamp() + build.BlockDelaySecs*uint64(nulls+1); h.Timestamp != tgtTs {
|
||||
return xerrors.Errorf("block has wrong timestamp: %d != %d", h.Timestamp, tgtTs)
|
||||
}
|
||||
|
||||
now := uint64(build.Clock.Now().Unix())
|
||||
if h.Timestamp > now+build.AllowableClockDriftSecs {
|
||||
return xerrors.Errorf("block was from the future (now=%d, blk=%d): %w", now, h.Timestamp, consensus.ErrTemporal)
|
||||
}
|
||||
if h.Timestamp > now {
|
||||
log.Warn("Got block from the future, but within threshold", h.Timestamp, build.Clock.Now().Unix())
|
||||
}
|
||||
|
||||
msgsCheck := async.Err(func() error {
|
||||
if b.Cid() == build.WhitelistedBlock {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := filec.checkBlockMessages(ctx, b, baseTs); err != nil {
|
||||
return xerrors.Errorf("block had invalid messages: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
minerCheck := async.Err(func() error {
|
||||
if err := filec.minerIsValid(ctx, h.Miner, baseTs); err != nil {
|
||||
return xerrors.Errorf("minerIsValid failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
baseFeeCheck := async.Err(func() error {
|
||||
baseFee, err := filec.store.ComputeBaseFee(ctx, baseTs)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("computing base fee: %w", err)
|
||||
}
|
||||
if types.BigCmp(baseFee, b.Header.ParentBaseFee) != 0 {
|
||||
return xerrors.Errorf("base fee doesn't match: %s (header) != %s (computed)",
|
||||
b.Header.ParentBaseFee, baseFee)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
pweight, err := filec.store.Weight(ctx, baseTs)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting parent weight: %w", err)
|
||||
}
|
||||
|
||||
if types.BigCmp(pweight, b.Header.ParentWeight) != 0 {
|
||||
return xerrors.Errorf("parrent weight different: %s (header) != %s (computed)",
|
||||
b.Header.ParentWeight, pweight)
|
||||
}
|
||||
|
||||
stateRootCheck := async.Err(func() error {
|
||||
stateroot, precp, err := filec.sm.TipSetState(ctx, baseTs)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("get tipsetstate(%d, %s) failed: %w", h.Height, h.Parents, err)
|
||||
}
|
||||
|
||||
if stateroot != h.ParentStateRoot {
|
||||
msgs, err := filec.store.MessagesForTipset(baseTs)
|
||||
if err != nil {
|
||||
log.Error("failed to load messages for tipset during tipset state mismatch error: ", err)
|
||||
} else {
|
||||
log.Warn("Messages for tipset with mismatching state:")
|
||||
for i, m := range msgs {
|
||||
mm := m.VMMessage()
|
||||
log.Warnf("Message[%d]: from=%s to=%s method=%d params=%x", i, mm.From, mm.To, mm.Method, mm.Params)
|
||||
}
|
||||
}
|
||||
|
||||
return xerrors.Errorf("parent state root did not match computed state (%s != %s)", stateroot, h.ParentStateRoot)
|
||||
}
|
||||
|
||||
if precp != h.ParentMessageReceipts {
|
||||
return xerrors.Errorf("parent receipts root did not match computed value (%s != %s)", precp, h.ParentMessageReceipts)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
// Stuff that needs worker address
|
||||
waddr, err := stmgr.GetMinerWorkerRaw(ctx, filec.sm, lbst, h.Miner)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("GetMinerWorkerRaw failed: %w", err)
|
||||
}
|
||||
|
||||
winnerCheck := async.Err(func() error {
|
||||
if h.ElectionProof.WinCount < 1 {
|
||||
return xerrors.Errorf("block is not claiming to be a winner")
|
||||
}
|
||||
|
||||
eligible, err := stmgr.MinerEligibleToMine(ctx, filec.sm, h.Miner, baseTs, lbts)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("determining if miner has min power failed: %w", err)
|
||||
}
|
||||
|
||||
if !eligible {
|
||||
return xerrors.New("block's miner is ineligible to mine")
|
||||
}
|
||||
|
||||
rBeacon := *prevBeacon
|
||||
if len(h.BeaconEntries) != 0 {
|
||||
rBeacon = h.BeaconEntries[len(h.BeaconEntries)-1]
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
if err := h.Miner.MarshalCBOR(buf); err != nil {
|
||||
return xerrors.Errorf("failed to marshal miner address to cbor: %w", err)
|
||||
}
|
||||
|
||||
vrfBase, err := rand.DrawRandomness(rBeacon.Data, crypto.DomainSeparationTag_ElectionProofProduction, h.Height, buf.Bytes())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("could not draw randomness: %w", err)
|
||||
}
|
||||
|
||||
if err := VerifyElectionPoStVRF(ctx, waddr, vrfBase, h.ElectionProof.VRFProof); err != nil {
|
||||
return xerrors.Errorf("validating block election proof failed: %w", err)
|
||||
}
|
||||
|
||||
slashed, err := stmgr.GetMinerSlashed(ctx, filec.sm, baseTs, h.Miner)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to check if block miner was slashed: %w", err)
|
||||
}
|
||||
|
||||
if slashed {
|
||||
return xerrors.Errorf("received block was from slashed or invalid miner")
|
||||
}
|
||||
|
||||
mpow, tpow, _, err := stmgr.GetPowerRaw(ctx, filec.sm, lbst, h.Miner)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed getting power: %w", err)
|
||||
}
|
||||
|
||||
j := h.ElectionProof.ComputeWinCount(mpow.QualityAdjPower, tpow.QualityAdjPower)
|
||||
if h.ElectionProof.WinCount != j {
|
||||
return xerrors.Errorf("miner claims wrong number of wins: miner: %d, computed: %d", h.ElectionProof.WinCount, j)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
blockSigCheck := async.Err(func() error {
|
||||
if err := sigs.CheckBlockSignature(ctx, h, waddr); err != nil {
|
||||
return xerrors.Errorf("check block signature failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
beaconValuesCheck := async.Err(func() error {
|
||||
if os.Getenv("LOTUS_IGNORE_DRAND") == "_yes_" {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := beacon.ValidateBlockValues(filec.beacon, h, baseTs.Height(), *prevBeacon); err != nil {
|
||||
return xerrors.Errorf("failed to validate blocks random beacon values: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
tktsCheck := async.Err(func() error {
|
||||
buf := new(bytes.Buffer)
|
||||
if err := h.Miner.MarshalCBOR(buf); err != nil {
|
||||
return xerrors.Errorf("failed to marshal miner address to cbor: %w", err)
|
||||
}
|
||||
|
||||
if h.Height > build.UpgradeSmokeHeight {
|
||||
buf.Write(baseTs.MinTicket().VRFProof)
|
||||
}
|
||||
|
||||
beaconBase := *prevBeacon
|
||||
if len(h.BeaconEntries) != 0 {
|
||||
beaconBase = h.BeaconEntries[len(h.BeaconEntries)-1]
|
||||
}
|
||||
|
||||
vrfBase, err := rand.DrawRandomness(beaconBase.Data, crypto.DomainSeparationTag_TicketProduction, h.Height-build.TicketRandomnessLookback, buf.Bytes())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to compute vrf base for ticket: %w", err)
|
||||
}
|
||||
|
||||
err = VerifyElectionPoStVRF(ctx, waddr, vrfBase, h.Ticket.VRFProof)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("validating block tickets failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
wproofCheck := async.Err(func() error {
|
||||
if err := filec.VerifyWinningPoStProof(ctx, winPoStNv, h, *prevBeacon, lbst, waddr); err != nil {
|
||||
return xerrors.Errorf("invalid election post: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
await := []async.ErrorFuture{
|
||||
minerCheck,
|
||||
tktsCheck,
|
||||
blockSigCheck,
|
||||
beaconValuesCheck,
|
||||
wproofCheck,
|
||||
winnerCheck,
|
||||
msgsCheck,
|
||||
baseFeeCheck,
|
||||
stateRootCheck,
|
||||
}
|
||||
|
||||
var merr error
|
||||
for _, fut := range await {
|
||||
if err := fut.AwaitContext(ctx); err != nil {
|
||||
merr = multierror.Append(merr, err)
|
||||
}
|
||||
}
|
||||
if merr != nil {
|
||||
mulErr := merr.(*multierror.Error)
|
||||
mulErr.ErrorFormat = func(es []error) string {
|
||||
if len(es) == 1 {
|
||||
return fmt.Sprintf("1 error occurred:\n\t* %+v\n\n", es[0])
|
||||
}
|
||||
|
||||
points := make([]string, len(es))
|
||||
for i, err := range es {
|
||||
points[i] = fmt.Sprintf("* %+v", err)
|
||||
}
|
||||
|
||||
return fmt.Sprintf(
|
||||
"%d errors occurred:\n\t%s\n\n",
|
||||
len(es), strings.Join(points, "\n\t"))
|
||||
}
|
||||
return mulErr
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func blockSanityChecks(h *types.BlockHeader) error {
|
||||
if h.ElectionProof == nil {
|
||||
return xerrors.Errorf("block cannot have nil election proof")
|
||||
}
|
||||
|
||||
if h.Ticket == nil {
|
||||
return xerrors.Errorf("block cannot have nil ticket")
|
||||
}
|
||||
|
||||
if h.BlockSig == nil {
|
||||
return xerrors.Errorf("block had nil signature")
|
||||
}
|
||||
|
||||
if h.BLSAggregate == nil {
|
||||
return xerrors.Errorf("block had nil bls aggregate signature")
|
||||
}
|
||||
|
||||
if h.Miner.Protocol() != address.ID {
|
||||
return xerrors.Errorf("block had non-ID miner address")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (filec *FilecoinEC) VerifyWinningPoStProof(ctx context.Context, nv network.Version, h *types.BlockHeader, prevBeacon types.BeaconEntry, lbst cid.Cid, waddr address.Address) error {
|
||||
if build.InsecurePoStValidation {
|
||||
if len(h.WinPoStProof) == 0 {
|
||||
return xerrors.Errorf("[INSECURE-POST-VALIDATION] No winning post proof given")
|
||||
}
|
||||
|
||||
if string(h.WinPoStProof[0].ProofBytes) == "valid proof" {
|
||||
return nil
|
||||
}
|
||||
return xerrors.Errorf("[INSECURE-POST-VALIDATION] winning post was invalid")
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
if err := h.Miner.MarshalCBOR(buf); err != nil {
|
||||
return xerrors.Errorf("failed to marshal miner address: %w", err)
|
||||
}
|
||||
|
||||
rbase := prevBeacon
|
||||
if len(h.BeaconEntries) > 0 {
|
||||
rbase = h.BeaconEntries[len(h.BeaconEntries)-1]
|
||||
}
|
||||
|
||||
rand, err := rand.DrawRandomness(rbase.Data, crypto.DomainSeparationTag_WinningPoStChallengeSeed, h.Height, buf.Bytes())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to get randomness for verifying winning post proof: %w", err)
|
||||
}
|
||||
|
||||
mid, err := address.IDFromAddress(h.Miner)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to get ID from miner address %s: %w", h.Miner, err)
|
||||
}
|
||||
|
||||
sectors, err := stmgr.GetSectorsForWinningPoSt(ctx, nv, filec.verifier, filec.sm, lbst, h.Miner, rand)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting winning post sector set: %w", err)
|
||||
}
|
||||
|
||||
ok, err := ffiwrapper.ProofVerifier.VerifyWinningPoSt(ctx, proof2.WinningPoStVerifyInfo{
|
||||
Randomness: rand,
|
||||
Proofs: h.WinPoStProof,
|
||||
ChallengedSectors: sectors,
|
||||
Prover: abi.ActorID(mid),
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to verify election post: %w", err)
|
||||
}
|
||||
|
||||
if !ok {
|
||||
log.Errorf("invalid winning post (block: %s, %x; %v)", h.Cid(), rand, sectors)
|
||||
return xerrors.Errorf("winning post was invalid")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: We should extract this somewhere else and make the message pool and miner use the same logic
|
||||
func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBlock, baseTs *types.TipSet) error {
|
||||
{
|
||||
var sigCids []cid.Cid // this is what we get for people not wanting the marshalcbor method on the cid type
|
||||
var pubks [][]byte
|
||||
|
||||
for _, m := range b.BlsMessages {
|
||||
sigCids = append(sigCids, m.Cid())
|
||||
|
||||
pubk, err := filec.sm.GetBlsPublicKey(ctx, m.From, baseTs)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to load bls public to validate block: %w", err)
|
||||
}
|
||||
|
||||
pubks = append(pubks, pubk)
|
||||
}
|
||||
|
||||
if err := consensus.VerifyBlsAggregate(ctx, b.Header.BLSAggregate, sigCids, pubks); err != nil {
|
||||
return xerrors.Errorf("bls aggregate signature was invalid: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
nonces := make(map[address.Address]uint64)
|
||||
|
||||
stateroot, _, err := filec.sm.TipSetState(ctx, baseTs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
st, err := state.LoadStateTree(filec.store.ActorStore(ctx), stateroot)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to load base state tree: %w", err)
|
||||
}
|
||||
|
||||
nv := filec.sm.GetNtwkVersion(ctx, b.Header.Height)
|
||||
pl := vm.PricelistByEpoch(baseTs.Height())
|
||||
var sumGasLimit int64
|
||||
checkMsg := func(msg types.ChainMsg) error {
|
||||
m := msg.VMMessage()
|
||||
|
||||
// Phase 1: syntactic validation, as defined in the spec
|
||||
minGas := pl.OnChainMessage(msg.ChainLength())
|
||||
if err := m.ValidForBlockInclusion(minGas.Total(), nv); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// ValidForBlockInclusion checks if any single message does not exceed BlockGasLimit
|
||||
// So below is overflow safe
|
||||
sumGasLimit += m.GasLimit
|
||||
if sumGasLimit > build.BlockGasLimit {
|
||||
return xerrors.Errorf("block gas limit exceeded")
|
||||
}
|
||||
|
||||
// Phase 2: (Partial) semantic validation:
|
||||
// the sender exists and is an account actor, and the nonces make sense
|
||||
var sender address.Address
|
||||
if filec.sm.GetNtwkVersion(ctx, b.Header.Height) >= network.Version13 {
|
||||
sender, err = st.LookupID(m.From)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
sender = m.From
|
||||
}
|
||||
|
||||
if _, ok := nonces[sender]; !ok {
|
||||
// `GetActor` does not validate that this is an account actor.
|
||||
act, err := st.GetActor(sender)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to get actor: %w", err)
|
||||
}
|
||||
|
||||
if !builtin.IsAccountActor(act.Code) {
|
||||
return xerrors.New("Sender must be an account actor")
|
||||
}
|
||||
nonces[sender] = act.Nonce
|
||||
}
|
||||
|
||||
if nonces[sender] != m.Nonce {
|
||||
return xerrors.Errorf("wrong nonce (exp: %d, got: %d)", nonces[sender], m.Nonce)
|
||||
}
|
||||
nonces[sender]++
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate message arrays in a temporary blockstore.
|
||||
tmpbs := bstore.NewMemory()
|
||||
tmpstore := blockadt.WrapStore(ctx, cbor.NewCborStore(tmpbs))
|
||||
|
||||
bmArr := blockadt.MakeEmptyArray(tmpstore)
|
||||
for i, m := range b.BlsMessages {
|
||||
if err := checkMsg(m); err != nil {
|
||||
return xerrors.Errorf("block had invalid bls message at index %d: %w", i, err)
|
||||
}
|
||||
|
||||
c, err := store.PutMessage(tmpbs, m)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err)
|
||||
}
|
||||
|
||||
k := cbg.CborCid(c)
|
||||
if err := bmArr.Set(uint64(i), &k); err != nil {
|
||||
return xerrors.Errorf("failed to put bls message at index %d: %w", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
smArr := blockadt.MakeEmptyArray(tmpstore)
|
||||
for i, m := range b.SecpkMessages {
|
||||
if filec.sm.GetNtwkVersion(ctx, b.Header.Height) >= network.Version14 {
|
||||
if m.Signature.Type != crypto.SigTypeSecp256k1 {
|
||||
return xerrors.Errorf("block had invalid secpk message at index %d: %w", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := checkMsg(m); err != nil {
|
||||
return xerrors.Errorf("block had invalid secpk message at index %d: %w", i, err)
|
||||
}
|
||||
|
||||
// `From` being an account actor is only validated inside the `vm.ResolveToKeyAddr` call
|
||||
// in `StateManager.ResolveToKeyAddress` here (and not in `checkMsg`).
|
||||
kaddr, err := filec.sm.ResolveToKeyAddress(ctx, m.Message.From, baseTs)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to resolve key addr: %w", err)
|
||||
}
|
||||
|
||||
if err := sigs.Verify(&m.Signature, kaddr, m.Message.Cid().Bytes()); err != nil {
|
||||
return xerrors.Errorf("secpk message %s has invalid signature: %w", m.Cid(), err)
|
||||
}
|
||||
|
||||
c, err := store.PutMessage(tmpbs, m)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err)
|
||||
}
|
||||
k := cbg.CborCid(c)
|
||||
if err := smArr.Set(uint64(i), &k); err != nil {
|
||||
return xerrors.Errorf("failed to put secpk message at index %d: %w", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
bmroot, err := bmArr.Root()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
smroot, err := smArr.Root()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mrcid, err := tmpstore.Put(ctx, &types.MsgMeta{
|
||||
BlsMessages: bmroot,
|
||||
SecpkMessages: smroot,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if b.Header.Messages != mrcid {
|
||||
return fmt.Errorf("messages didnt match message root in header")
|
||||
}
|
||||
|
||||
// Finally, flush.
|
||||
return vm.Copy(ctx, tmpbs, filec.store.ChainBlockstore(), mrcid)
|
||||
}
|
||||
|
||||
func (filec *FilecoinEC) IsEpochBeyondCurrMax(epoch abi.ChainEpoch) bool {
|
||||
if filec.genesis == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
now := uint64(build.Clock.Now().Unix())
|
||||
return epoch > (abi.ChainEpoch((now-filec.genesis.MinTimestamp())/build.BlockDelaySecs) + MaxHeightDrift)
|
||||
}
|
||||
|
||||
func (filec *FilecoinEC) minerIsValid(ctx context.Context, maddr address.Address, baseTs *types.TipSet) error {
|
||||
act, err := filec.sm.LoadActor(ctx, power.Address, baseTs)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to load power actor: %w", err)
|
||||
}
|
||||
|
||||
powState, err := power.Load(filec.store.ActorStore(ctx), act)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to load power actor state: %w", err)
|
||||
}
|
||||
|
||||
_, exist, err := powState.MinerPower(maddr)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to look up miner's claim: %w", err)
|
||||
}
|
||||
|
||||
if !exist {
|
||||
return xerrors.New("miner isn't valid")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func VerifyElectionPoStVRF(ctx context.Context, worker address.Address, rand []byte, evrf []byte) error {
|
||||
return VerifyVRF(ctx, worker, rand, evrf)
|
||||
}
|
||||
|
||||
func VerifyVRF(ctx context.Context, worker address.Address, vrfBase, vrfproof []byte) error {
|
||||
_, span := trace.StartSpan(ctx, "VerifyVRF")
|
||||
defer span.End()
|
||||
|
||||
sig := &crypto.Signature{
|
||||
Type: crypto.SigTypeBLS,
|
||||
Data: vrfproof,
|
||||
}
|
||||
|
||||
if err := sigs.Verify(sig, worker, vrfBase); err != nil {
|
||||
return xerrors.Errorf("vrf was invalid: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var ErrSoftFailure = errors.New("soft validation failure")
|
||||
var ErrInsufficientPower = errors.New("incoming block's miner does not have minimum power")
|
||||
|
||||
func (filec *FilecoinEC) ValidateBlockPubsub(ctx context.Context, self bool, msg *pubsub.Message) (pubsub.ValidationResult, string) {
|
||||
if self {
|
||||
return filec.validateLocalBlock(ctx, msg)
|
||||
}
|
||||
|
||||
// track validation time
|
||||
begin := build.Clock.Now()
|
||||
defer func() {
|
||||
log.Debugf("block validation time: %s", build.Clock.Since(begin))
|
||||
}()
|
||||
|
||||
stats.Record(ctx, metrics.BlockReceived.M(1))
|
||||
|
||||
recordFailureFlagPeer := func(what string) {
|
||||
// bv.Validate will flag the peer in that case
|
||||
panic(what)
|
||||
}
|
||||
|
||||
blk, what, err := filec.decodeAndCheckBlock(msg)
|
||||
if err != nil {
|
||||
log.Error("got invalid block over pubsub: ", err)
|
||||
recordFailureFlagPeer(what)
|
||||
return pubsub.ValidationReject, what
|
||||
}
|
||||
|
||||
// validate the block meta: the Message CID in the header must match the included messages
|
||||
err = filec.validateMsgMeta(ctx, blk)
|
||||
if err != nil {
|
||||
log.Warnf("error validating message metadata: %s", err)
|
||||
recordFailureFlagPeer("invalid_block_meta")
|
||||
return pubsub.ValidationReject, "invalid_block_meta"
|
||||
}
|
||||
|
||||
reject, err := filec.validateBlockHeader(ctx, blk.Header)
|
||||
if err != nil {
|
||||
if reject == "" {
|
||||
log.Warn("ignoring block msg: ", err)
|
||||
return pubsub.ValidationIgnore, reject
|
||||
}
|
||||
recordFailureFlagPeer(reject)
|
||||
return pubsub.ValidationReject, reject
|
||||
}
|
||||
|
||||
// all good, accept the block
|
||||
msg.ValidatorData = blk
|
||||
stats.Record(ctx, metrics.BlockValidationSuccess.M(1))
|
||||
return pubsub.ValidationAccept, ""
|
||||
}
|
||||
|
||||
func (filec *FilecoinEC) validateLocalBlock(ctx context.Context, msg *pubsub.Message) (pubsub.ValidationResult, string) {
|
||||
stats.Record(ctx, metrics.BlockPublished.M(1))
|
||||
|
||||
if size := msg.Size(); size > 1<<20-1<<15 {
|
||||
log.Errorf("ignoring oversize block (%dB)", size)
|
||||
return pubsub.ValidationIgnore, "oversize_block"
|
||||
}
|
||||
|
||||
blk, what, err := filec.decodeAndCheckBlock(msg)
|
||||
if err != nil {
|
||||
log.Errorf("got invalid local block: %s", err)
|
||||
return pubsub.ValidationIgnore, what
|
||||
}
|
||||
|
||||
msg.ValidatorData = blk
|
||||
stats.Record(ctx, metrics.BlockValidationSuccess.M(1))
|
||||
return pubsub.ValidationAccept, ""
|
||||
}
|
||||
|
||||
func (filec *FilecoinEC) decodeAndCheckBlock(msg *pubsub.Message) (*types.BlockMsg, string, error) {
|
||||
blk, err := types.DecodeBlockMsg(msg.GetData())
|
||||
if err != nil {
|
||||
return nil, "invalid", xerrors.Errorf("error decoding block: %w", err)
|
||||
}
|
||||
|
||||
if count := len(blk.BlsMessages) + len(blk.SecpkMessages); count > build.BlockMessageLimit {
|
||||
return nil, "too_many_messages", fmt.Errorf("block contains too many messages (%d)", count)
|
||||
}
|
||||
|
||||
// make sure we have a signature
|
||||
if blk.Header.BlockSig == nil {
|
||||
return nil, "missing_signature", fmt.Errorf("block without a signature")
|
||||
}
|
||||
|
||||
return blk, "", nil
|
||||
}
|
||||
|
||||
func (filec *FilecoinEC) validateMsgMeta(ctx context.Context, msg *types.BlockMsg) error {
|
||||
// TODO there has to be a simpler way to do this without the blockstore dance
|
||||
// block headers use adt0
|
||||
store := blockadt.WrapStore(ctx, cbor.NewCborStore(bstore.NewMemory()))
|
||||
bmArr := blockadt.MakeEmptyArray(store)
|
||||
smArr := blockadt.MakeEmptyArray(store)
|
||||
|
||||
for i, m := range msg.BlsMessages {
|
||||
c := cbg.CborCid(m)
|
||||
if err := bmArr.Set(uint64(i), &c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for i, m := range msg.SecpkMessages {
|
||||
c := cbg.CborCid(m)
|
||||
if err := smArr.Set(uint64(i), &c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
bmroot, err := bmArr.Root()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
smroot, err := smArr.Root()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mrcid, err := store.Put(store.Context(), &types.MsgMeta{
|
||||
BlsMessages: bmroot,
|
||||
SecpkMessages: smroot,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if msg.Header.Messages != mrcid {
|
||||
return fmt.Errorf("messages didn't match root cid in header")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (filec *FilecoinEC) validateBlockHeader(ctx context.Context, b *types.BlockHeader) (rejectReason string, err error) {
|
||||
|
||||
// we want to ensure that it is a block from a known miner; we reject blocks from unknown miners
|
||||
// to prevent spam attacks.
|
||||
// the logic works as follows: we lookup the miner in the chain for its key.
|
||||
// if we can find it then it's a known miner and we can validate the signature.
|
||||
// if we can't find it, we check whether we are (near) synced in the chain.
|
||||
// if we are not synced we cannot validate the block and we must ignore it.
|
||||
// if we are synced and the miner is unknown, then the block is rejcected.
|
||||
key, err := filec.checkPowerAndGetWorkerKey(ctx, b)
|
||||
if err != nil {
|
||||
if err != ErrSoftFailure && filec.isChainNearSynced() {
|
||||
log.Warnf("received block from unknown miner or miner that doesn't meet min power over pubsub; rejecting message")
|
||||
return "unknown_miner", err
|
||||
}
|
||||
|
||||
log.Warnf("cannot validate block message; unknown miner or miner that doesn't meet min power in unsynced chain: %s", b.Cid())
|
||||
return "", err // ignore
|
||||
}
|
||||
|
||||
if b.ElectionProof.WinCount < 1 {
|
||||
log.Errorf("block is not claiming to be winning")
|
||||
return "not_winning", xerrors.Errorf("block not winning")
|
||||
}
|
||||
|
||||
err = sigs.CheckBlockSignature(ctx, b, key)
|
||||
if err != nil {
|
||||
log.Errorf("block signature verification failed: %s", err)
|
||||
return "signature_verification_failed", err
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (filec *FilecoinEC) checkPowerAndGetWorkerKey(ctx context.Context, bh *types.BlockHeader) (address.Address, error) {
|
||||
// we check that the miner met the minimum power at the lookback tipset
|
||||
|
||||
baseTs := filec.store.GetHeaviestTipSet()
|
||||
lbts, lbst, err := stmgr.GetLookbackTipSetForRound(ctx, filec.sm, baseTs, bh.Height)
|
||||
if err != nil {
|
||||
log.Warnf("failed to load lookback tipset for incoming block: %s", err)
|
||||
return address.Undef, ErrSoftFailure
|
||||
}
|
||||
|
||||
key, err := stmgr.GetMinerWorkerRaw(ctx, filec.sm, lbst, bh.Miner)
|
||||
if err != nil {
|
||||
log.Warnf("failed to resolve worker key for miner %s and block height %d: %s", bh.Miner, bh.Height, err)
|
||||
return address.Undef, ErrSoftFailure
|
||||
}
|
||||
|
||||
// NOTE: we check to see if the miner was eligible in the lookback
|
||||
// tipset - 1 for historical reasons. DO NOT use the lookback state
|
||||
// returned by GetLookbackTipSetForRound.
|
||||
|
||||
eligible, err := stmgr.MinerEligibleToMine(ctx, filec.sm, bh.Miner, baseTs, lbts)
|
||||
if err != nil {
|
||||
log.Warnf("failed to determine if incoming block's miner has minimum power: %s", err)
|
||||
return address.Undef, ErrSoftFailure
|
||||
}
|
||||
|
||||
if !eligible {
|
||||
log.Warnf("incoming block's miner is ineligible")
|
||||
return address.Undef, ErrInsufficientPower
|
||||
}
|
||||
|
||||
return key, nil
|
||||
}
|
||||
|
||||
func (filec *FilecoinEC) isChainNearSynced() bool {
|
||||
ts := filec.store.GetHeaviestTipSet()
|
||||
timestamp := ts.MinTimestamp()
|
||||
timestampTime := time.Unix(int64(timestamp), 0)
|
||||
return build.Clock.Since(timestampTime) < 6*time.Hour
|
||||
}
|
||||
|
||||
var _ consensus.Consensus = &FilecoinEC{}
|
@ -1,38 +1,36 @@
|
||||
package gen
|
||||
package filcns
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||
cid "github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
ffi "github.com/filecoin-project/filecoin-ffi"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/consensus"
|
||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w api.Wallet, bt *api.BlockTemplate) (*types.FullBlock, error) {
|
||||
|
||||
pts, err := sm.ChainStore().LoadTipSet(bt.Parents)
|
||||
func (filec *FilecoinEC) CreateBlock(ctx context.Context, w api.Wallet, bt *api.BlockTemplate) (*types.FullBlock, error) {
|
||||
pts, err := filec.sm.ChainStore().LoadTipSet(bt.Parents)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to load parent tipset: %w", err)
|
||||
}
|
||||
|
||||
st, recpts, err := sm.TipSetState(ctx, pts)
|
||||
st, recpts, err := filec.sm.TipSetState(ctx, pts)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to load tipset state: %w", err)
|
||||
}
|
||||
|
||||
_, lbst, err := stmgr.GetLookbackTipSetForRound(ctx, sm, pts, bt.Epoch)
|
||||
_, lbst, err := stmgr.GetLookbackTipSetForRound(ctx, filec.sm, pts, bt.Epoch)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting lookback miner actor state: %w", err)
|
||||
}
|
||||
|
||||
worker, err := stmgr.GetMinerWorkerRaw(ctx, sm, lbst, bt.Miner)
|
||||
worker, err := stmgr.GetMinerWorkerRaw(ctx, filec.sm, lbst, bt.Miner)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to get miner worker: %w", err)
|
||||
}
|
||||
@ -61,14 +59,14 @@ func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w api.Wallet,
|
||||
blsSigs = append(blsSigs, msg.Signature)
|
||||
blsMessages = append(blsMessages, &msg.Message)
|
||||
|
||||
c, err := sm.ChainStore().PutMessage(&msg.Message)
|
||||
c, err := filec.sm.ChainStore().PutMessage(&msg.Message)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
blsMsgCids = append(blsMsgCids, c)
|
||||
} else {
|
||||
c, err := sm.ChainStore().PutMessage(msg)
|
||||
c, err := filec.sm.ChainStore().PutMessage(msg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -79,12 +77,12 @@ func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w api.Wallet,
|
||||
}
|
||||
}
|
||||
|
||||
store := sm.ChainStore().ActorStore(ctx)
|
||||
blsmsgroot, err := toArray(store, blsMsgCids)
|
||||
store := filec.sm.ChainStore().ActorStore(ctx)
|
||||
blsmsgroot, err := consensus.ToMessagesArray(store, blsMsgCids)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("building bls amt: %w", err)
|
||||
}
|
||||
secpkmsgroot, err := toArray(store, secpkMsgCids)
|
||||
secpkmsgroot, err := consensus.ToMessagesArray(store, secpkMsgCids)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("building secpk amt: %w", err)
|
||||
}
|
||||
@ -98,19 +96,19 @@ func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w api.Wallet,
|
||||
}
|
||||
next.Messages = mmcid
|
||||
|
||||
aggSig, err := aggregateSignatures(blsSigs)
|
||||
aggSig, err := consensus.AggregateSignatures(blsSigs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
next.BLSAggregate = aggSig
|
||||
pweight, err := sm.ChainStore().Weight(ctx, pts)
|
||||
pweight, err := filec.sm.ChainStore().Weight(ctx, pts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
next.ParentWeight = pweight
|
||||
|
||||
baseFee, err := sm.ChainStore().ComputeBaseFee(ctx, pts)
|
||||
baseFee, err := filec.sm.ChainStore().ComputeBaseFee(ctx, pts)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("computing base fee: %w", err)
|
||||
}
|
||||
@ -138,41 +136,3 @@ func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w api.Wallet,
|
||||
|
||||
return fullBlock, nil
|
||||
}
|
||||
|
||||
func aggregateSignatures(sigs []crypto.Signature) (*crypto.Signature, error) {
|
||||
sigsS := make([]ffi.Signature, len(sigs))
|
||||
for i := 0; i < len(sigs); i++ {
|
||||
copy(sigsS[i][:], sigs[i].Data[:ffi.SignatureBytes])
|
||||
}
|
||||
|
||||
aggSig := ffi.Aggregate(sigsS)
|
||||
if aggSig == nil {
|
||||
if len(sigs) > 0 {
|
||||
return nil, xerrors.Errorf("bls.Aggregate returned nil with %d signatures", len(sigs))
|
||||
}
|
||||
|
||||
zeroSig := ffi.CreateZeroSignature()
|
||||
|
||||
// Note: for blst this condition should not happen - nil should not
|
||||
// be returned
|
||||
return &crypto.Signature{
|
||||
Type: crypto.SigTypeBLS,
|
||||
Data: zeroSig[:],
|
||||
}, nil
|
||||
}
|
||||
return &crypto.Signature{
|
||||
Type: crypto.SigTypeBLS,
|
||||
Data: aggSig[:],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func toArray(store blockadt.Store, cids []cid.Cid) (cid.Cid, error) {
|
||||
arr := blockadt.MakeEmptyArray(store)
|
||||
for i, c := range cids {
|
||||
oc := cbg.CborCid(c)
|
||||
if err := arr.Set(uint64(i), &oc); err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
}
|
||||
return arr.Root()
|
||||
}
|
@ -1,10 +1,12 @@
|
||||
package stmgr
|
||||
package filcns
|
||||
|
||||
import (
|
||||
"context"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/v6/actors/migration/nv14"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
"golang.org/x/xerrors"
|
||||
@ -13,6 +15,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/filecoin-project/go-state-types/rt"
|
||||
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||
@ -31,15 +34,16 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/multisig"
|
||||
"github.com/filecoin-project/lotus/chain/state"
|
||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/vm"
|
||||
)
|
||||
|
||||
func DefaultUpgradeSchedule() UpgradeSchedule {
|
||||
var us UpgradeSchedule
|
||||
func DefaultUpgradeSchedule() stmgr.UpgradeSchedule {
|
||||
var us stmgr.UpgradeSchedule
|
||||
|
||||
updates := []Upgrade{{
|
||||
updates := []stmgr.Upgrade{{
|
||||
Height: build.UpgradeBreezeHeight,
|
||||
Network: network.Version1,
|
||||
Migration: UpgradeFaucetBurnRecovery,
|
||||
@ -88,7 +92,7 @@ func DefaultUpgradeSchedule() UpgradeSchedule {
|
||||
Height: build.UpgradeTrustHeight,
|
||||
Network: network.Version10,
|
||||
Migration: UpgradeActorsV3,
|
||||
PreMigrations: []PreMigration{{
|
||||
PreMigrations: []stmgr.PreMigration{{
|
||||
PreMigration: PreUpgradeActorsV3,
|
||||
StartWithin: 120,
|
||||
DontStartWithin: 60,
|
||||
@ -108,7 +112,7 @@ func DefaultUpgradeSchedule() UpgradeSchedule {
|
||||
Height: build.UpgradeTurboHeight,
|
||||
Network: network.Version12,
|
||||
Migration: UpgradeActorsV4,
|
||||
PreMigrations: []PreMigration{{
|
||||
PreMigrations: []stmgr.PreMigration{{
|
||||
PreMigration: PreUpgradeActorsV4,
|
||||
StartWithin: 120,
|
||||
DontStartWithin: 60,
|
||||
@ -124,7 +128,7 @@ func DefaultUpgradeSchedule() UpgradeSchedule {
|
||||
Height: build.UpgradeHyperdriveHeight,
|
||||
Network: network.Version13,
|
||||
Migration: UpgradeActorsV5,
|
||||
PreMigrations: []PreMigration{{
|
||||
PreMigrations: []stmgr.PreMigration{{
|
||||
PreMigration: PreUpgradeActorsV5,
|
||||
StartWithin: 120,
|
||||
DontStartWithin: 60,
|
||||
@ -135,7 +139,25 @@ func DefaultUpgradeSchedule() UpgradeSchedule {
|
||||
DontStartWithin: 15,
|
||||
StopWithin: 5,
|
||||
}},
|
||||
Expensive: true}}
|
||||
Expensive: true,
|
||||
}, {
|
||||
Height: build.UpgradeChocolateHeight,
|
||||
Network: network.Version14,
|
||||
Migration: UpgradeActorsV6,
|
||||
PreMigrations: []stmgr.PreMigration{{
|
||||
PreMigration: PreUpgradeActorsV6,
|
||||
StartWithin: 120,
|
||||
DontStartWithin: 60,
|
||||
StopWithin: 35,
|
||||
}, {
|
||||
PreMigration: PreUpgradeActorsV6,
|
||||
StartWithin: 30,
|
||||
DontStartWithin: 15,
|
||||
StopWithin: 5,
|
||||
}},
|
||||
Expensive: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, u := range updates {
|
||||
if u.Height < 0 {
|
||||
@ -147,7 +169,7 @@ func DefaultUpgradeSchedule() UpgradeSchedule {
|
||||
return us
|
||||
}
|
||||
|
||||
func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ MigrationCache, em ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
func UpgradeFaucetBurnRecovery(ctx context.Context, sm *stmgr.StateManager, _ stmgr.MigrationCache, em stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
// Some initial parameters
|
||||
FundsForMiners := types.FromFil(1_000_000)
|
||||
LookbackEpoch := abi.ChainEpoch(32000)
|
||||
@ -249,7 +271,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ Migratio
|
||||
|
||||
// Execute transfers from previous step
|
||||
for _, t := range transfers {
|
||||
if err := doTransfer(tree, t.From, t.To, t.Amt, transferCb); err != nil {
|
||||
if err := stmgr.DoTransfer(tree, t.From, t.To, t.Amt, transferCb); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("transfer %s %s->%s failed: %w", t.Amt, t.From, t.To, err)
|
||||
}
|
||||
}
|
||||
@ -352,7 +374,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ Migratio
|
||||
}
|
||||
|
||||
for _, t := range transfersBack {
|
||||
if err := doTransfer(tree, t.From, t.To, t.Amt, transferCb); err != nil {
|
||||
if err := stmgr.DoTransfer(tree, t.From, t.To, t.Amt, transferCb); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("transfer %s %s->%s failed: %w", t.Amt, t.From, t.To, err)
|
||||
}
|
||||
}
|
||||
@ -362,7 +384,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ Migratio
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to load burnt funds actor: %w", err)
|
||||
}
|
||||
if err := doTransfer(tree, builtin0.BurntFundsActorAddr, builtin.ReserveAddress, burntAct.Balance, transferCb); err != nil {
|
||||
if err := stmgr.DoTransfer(tree, builtin0.BurntFundsActorAddr, builtin.ReserveAddress, burntAct.Balance, transferCb); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to unburn funds: %w", err)
|
||||
}
|
||||
|
||||
@ -378,7 +400,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ Migratio
|
||||
}
|
||||
|
||||
difference := types.BigSub(DesiredReimbursementBalance, reimb.Balance)
|
||||
if err := doTransfer(tree, builtin.ReserveAddress, reimbAddr, difference, transferCb); err != nil {
|
||||
if err := stmgr.DoTransfer(tree, builtin.ReserveAddress, reimbAddr, difference, transferCb); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to top up reimbursement account: %w", err)
|
||||
}
|
||||
|
||||
@ -400,14 +422,14 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ Migratio
|
||||
if em != nil {
|
||||
// record the transfer in execution traces
|
||||
|
||||
fakeMsg := makeFakeMsg(builtin.SystemActorAddr, builtin.SystemActorAddr, big.Zero(), uint64(epoch))
|
||||
fakeMsg := stmgr.MakeFakeMsg(builtin.SystemActorAddr, builtin.SystemActorAddr, big.Zero(), uint64(epoch))
|
||||
|
||||
if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{
|
||||
MessageReceipt: *makeFakeRct(),
|
||||
MessageReceipt: *stmgr.MakeFakeRct(),
|
||||
ActorErr: nil,
|
||||
ExecutionTrace: types.ExecutionTrace{
|
||||
Msg: fakeMsg,
|
||||
MsgRct: makeFakeRct(),
|
||||
MsgRct: stmgr.MakeFakeRct(),
|
||||
Error: "",
|
||||
Duration: 0,
|
||||
GasCharges: nil,
|
||||
@ -423,8 +445,8 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ Migratio
|
||||
return tree.Flush(ctx)
|
||||
}
|
||||
|
||||
func UpgradeIgnition(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
store := sm.cs.ActorStore(ctx)
|
||||
func UpgradeIgnition(ctx context.Context, sm *stmgr.StateManager, _ stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
store := sm.ChainStore().ActorStore(ctx)
|
||||
|
||||
if build.UpgradeLiftoffHeight <= epoch {
|
||||
return cid.Undef, xerrors.Errorf("liftoff height must be beyond ignition height")
|
||||
@ -440,7 +462,7 @@ func UpgradeIgnition(ctx context.Context, sm *StateManager, _ MigrationCache, cb
|
||||
return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
|
||||
}
|
||||
|
||||
err = setNetworkName(ctx, store, tree, "ignition")
|
||||
err = stmgr.SetNetworkName(ctx, store, tree, "ignition")
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("setting network name: %w", err)
|
||||
}
|
||||
@ -478,7 +500,7 @@ func UpgradeIgnition(ctx context.Context, sm *StateManager, _ MigrationCache, cb
|
||||
return tree.Flush(ctx)
|
||||
}
|
||||
|
||||
func splitGenesisMultisig0(ctx context.Context, em ExecMonitor, addr address.Address, store adt0.Store, tree *state.StateTree, portions uint64, epoch abi.ChainEpoch, ts *types.TipSet) error {
|
||||
func splitGenesisMultisig0(ctx context.Context, em stmgr.ExecMonitor, addr address.Address, store adt0.Store, tree *state.StateTree, portions uint64, epoch abi.ChainEpoch, ts *types.TipSet) error {
|
||||
if portions < 1 {
|
||||
return xerrors.Errorf("cannot split into 0 portions")
|
||||
}
|
||||
@ -553,7 +575,7 @@ func splitGenesisMultisig0(ctx context.Context, em ExecMonitor, addr address.Add
|
||||
}
|
||||
|
||||
for i < portions {
|
||||
keyAddr, err := makeKeyAddr(addr, i)
|
||||
keyAddr, err := stmgr.MakeKeyAddr(addr, i)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("creating key address: %w", err)
|
||||
}
|
||||
@ -568,7 +590,7 @@ func splitGenesisMultisig0(ctx context.Context, em ExecMonitor, addr address.Add
|
||||
return xerrors.Errorf("setting new msig actor state: %w", err)
|
||||
}
|
||||
|
||||
if err := doTransfer(tree, addr, idAddr, newIbal, transferCb); err != nil {
|
||||
if err := stmgr.DoTransfer(tree, addr, idAddr, newIbal, transferCb); err != nil {
|
||||
return xerrors.Errorf("transferring split msig balance: %w", err)
|
||||
}
|
||||
|
||||
@ -578,14 +600,14 @@ func splitGenesisMultisig0(ctx context.Context, em ExecMonitor, addr address.Add
|
||||
if em != nil {
|
||||
// record the transfer in execution traces
|
||||
|
||||
fakeMsg := makeFakeMsg(builtin.SystemActorAddr, addr, big.Zero(), uint64(epoch))
|
||||
fakeMsg := stmgr.MakeFakeMsg(builtin.SystemActorAddr, addr, big.Zero(), uint64(epoch))
|
||||
|
||||
if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{
|
||||
MessageReceipt: *makeFakeRct(),
|
||||
MessageReceipt: *stmgr.MakeFakeRct(),
|
||||
ActorErr: nil,
|
||||
ExecutionTrace: types.ExecutionTrace{
|
||||
Msg: fakeMsg,
|
||||
MsgRct: makeFakeRct(),
|
||||
MsgRct: stmgr.MakeFakeRct(),
|
||||
Error: "",
|
||||
Duration: 0,
|
||||
GasCharges: nil,
|
||||
@ -602,8 +624,8 @@ func splitGenesisMultisig0(ctx context.Context, em ExecMonitor, addr address.Add
|
||||
}
|
||||
|
||||
// TODO: After the Liftoff epoch, refactor this to use resetMultisigVesting
|
||||
func resetGenesisMsigs0(ctx context.Context, sm *StateManager, store adt0.Store, tree *state.StateTree, startEpoch abi.ChainEpoch) error {
|
||||
gb, err := sm.cs.GetGenesis()
|
||||
func resetGenesisMsigs0(ctx context.Context, sm *stmgr.StateManager, store adt0.Store, tree *state.StateTree, startEpoch abi.ChainEpoch) error {
|
||||
gb, err := sm.ChainStore().GetGenesis()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting genesis block: %w", err)
|
||||
}
|
||||
@ -613,7 +635,7 @@ func resetGenesisMsigs0(ctx context.Context, sm *StateManager, store adt0.Store,
|
||||
return xerrors.Errorf("getting genesis tipset: %w", err)
|
||||
}
|
||||
|
||||
cst := cbor.NewCborStore(sm.cs.StateBlockstore())
|
||||
cst := cbor.NewCborStore(sm.ChainStore().StateBlockstore())
|
||||
genesisTree, err := state.LoadStateTree(cst, gts.ParentState())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("loading state tree: %w", err)
|
||||
@ -683,9 +705,9 @@ func resetMultisigVesting0(ctx context.Context, store adt0.Store, tree *state.St
|
||||
return nil
|
||||
}
|
||||
|
||||
func UpgradeRefuel(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
func UpgradeRefuel(ctx context.Context, sm *stmgr.StateManager, _ stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
|
||||
store := sm.cs.ActorStore(ctx)
|
||||
store := sm.ChainStore().ActorStore(ctx)
|
||||
tree, err := sm.StateTree(root)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
|
||||
@ -709,8 +731,8 @@ func UpgradeRefuel(ctx context.Context, sm *StateManager, _ MigrationCache, cb E
|
||||
return tree.Flush(ctx)
|
||||
}
|
||||
|
||||
func UpgradeActorsV2(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync())
|
||||
func UpgradeActorsV2(ctx context.Context, sm *stmgr.StateManager, _ stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), blockstore.NewMemorySync())
|
||||
store := store.ActorStore(ctx, buf)
|
||||
|
||||
info, err := store.Put(ctx, new(types.StateInfo0))
|
||||
@ -755,13 +777,13 @@ func UpgradeActorsV2(ctx context.Context, sm *StateManager, _ MigrationCache, cb
|
||||
return newRoot, nil
|
||||
}
|
||||
|
||||
func UpgradeLiftoff(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
func UpgradeLiftoff(ctx context.Context, sm *stmgr.StateManager, _ stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
tree, err := sm.StateTree(root)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
|
||||
}
|
||||
|
||||
err = setNetworkName(ctx, sm.cs.ActorStore(ctx), tree, "mainnet")
|
||||
err = stmgr.SetNetworkName(ctx, sm.ChainStore().ActorStore(ctx), tree, "mainnet")
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("setting network name: %w", err)
|
||||
}
|
||||
@ -769,12 +791,12 @@ func UpgradeLiftoff(ctx context.Context, sm *StateManager, _ MigrationCache, cb
|
||||
return tree.Flush(ctx)
|
||||
}
|
||||
|
||||
func UpgradeCalico(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
func UpgradeCalico(ctx context.Context, sm *stmgr.StateManager, _ stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
if build.BuildType != build.BuildMainnet {
|
||||
return root, nil
|
||||
}
|
||||
|
||||
store := sm.cs.ActorStore(ctx)
|
||||
store := sm.ChainStore().ActorStore(ctx)
|
||||
var stateRoot types.StateRoot
|
||||
if err := store.Get(ctx, root, &stateRoot); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
|
||||
@ -815,7 +837,7 @@ func UpgradeCalico(ctx context.Context, sm *StateManager, _ MigrationCache, cb E
|
||||
return newRoot, nil
|
||||
}
|
||||
|
||||
func UpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
func UpgradeActorsV3(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
// Use all the CPUs except 3.
|
||||
workerCount := runtime.NumCPU() - 3
|
||||
if workerCount <= 0 {
|
||||
@ -839,7 +861,7 @@ func UpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache
|
||||
}
|
||||
|
||||
if build.BuildType == build.BuildMainnet {
|
||||
err := terminateActor(ctx, tree, build.ZeroAddress, cb, epoch, ts)
|
||||
err := stmgr.TerminateActor(ctx, tree, build.ZeroAddress, cb, epoch, ts)
|
||||
if err != nil && !xerrors.Is(err, types.ErrActorNotFound) {
|
||||
return cid.Undef, xerrors.Errorf("deleting zero bls actor: %w", err)
|
||||
}
|
||||
@ -853,7 +875,7 @@ func UpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache
|
||||
return newRoot, nil
|
||||
}
|
||||
|
||||
func PreUpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
|
||||
func PreUpgradeActorsV3(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
|
||||
// Use half the CPUs for pre-migration, but leave at least 3.
|
||||
workerCount := runtime.NumCPU()
|
||||
if workerCount <= 4 {
|
||||
@ -867,11 +889,11 @@ func PreUpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCa
|
||||
}
|
||||
|
||||
func upgradeActorsV3Common(
|
||||
ctx context.Context, sm *StateManager, cache MigrationCache,
|
||||
ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache,
|
||||
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
|
||||
config nv10.Config,
|
||||
) (cid.Cid, error) {
|
||||
buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync())
|
||||
buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), blockstore.NewMemorySync())
|
||||
store := store.ActorStore(ctx, buf)
|
||||
|
||||
// Load the state root.
|
||||
@ -917,7 +939,7 @@ func upgradeActorsV3Common(
|
||||
return newRoot, nil
|
||||
}
|
||||
|
||||
func UpgradeActorsV4(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
func UpgradeActorsV4(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
// Use all the CPUs except 3.
|
||||
workerCount := runtime.NumCPU() - 3
|
||||
if workerCount <= 0 {
|
||||
@ -939,7 +961,7 @@ func UpgradeActorsV4(ctx context.Context, sm *StateManager, cache MigrationCache
|
||||
return newRoot, nil
|
||||
}
|
||||
|
||||
func PreUpgradeActorsV4(ctx context.Context, sm *StateManager, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
|
||||
func PreUpgradeActorsV4(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
|
||||
// Use half the CPUs for pre-migration, but leave at least 3.
|
||||
workerCount := runtime.NumCPU()
|
||||
if workerCount <= 4 {
|
||||
@ -953,11 +975,11 @@ func PreUpgradeActorsV4(ctx context.Context, sm *StateManager, cache MigrationCa
|
||||
}
|
||||
|
||||
func upgradeActorsV4Common(
|
||||
ctx context.Context, sm *StateManager, cache MigrationCache,
|
||||
ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache,
|
||||
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
|
||||
config nv12.Config,
|
||||
) (cid.Cid, error) {
|
||||
buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync())
|
||||
buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), blockstore.NewMemorySync())
|
||||
store := store.ActorStore(ctx, buf)
|
||||
|
||||
// Load the state root.
|
||||
@ -1003,7 +1025,7 @@ func upgradeActorsV4Common(
|
||||
return newRoot, nil
|
||||
}
|
||||
|
||||
func UpgradeActorsV5(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
func UpgradeActorsV5(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
// Use all the CPUs except 3.
|
||||
workerCount := runtime.NumCPU() - 3
|
||||
if workerCount <= 0 {
|
||||
@ -1025,7 +1047,7 @@ func UpgradeActorsV5(ctx context.Context, sm *StateManager, cache MigrationCache
|
||||
return newRoot, nil
|
||||
}
|
||||
|
||||
func PreUpgradeActorsV5(ctx context.Context, sm *StateManager, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
|
||||
func PreUpgradeActorsV5(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
|
||||
// Use half the CPUs for pre-migration, but leave at least 3.
|
||||
workerCount := runtime.NumCPU()
|
||||
if workerCount <= 4 {
|
||||
@ -1039,11 +1061,11 @@ func PreUpgradeActorsV5(ctx context.Context, sm *StateManager, cache MigrationCa
|
||||
}
|
||||
|
||||
func upgradeActorsV5Common(
|
||||
ctx context.Context, sm *StateManager, cache MigrationCache,
|
||||
ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache,
|
||||
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
|
||||
config nv13.Config,
|
||||
) (cid.Cid, error) {
|
||||
buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync())
|
||||
buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), blockstore.NewMemorySync())
|
||||
store := store.ActorStore(ctx, buf)
|
||||
|
||||
// Load the state root.
|
||||
@ -1088,3 +1110,104 @@ func upgradeActorsV5Common(
|
||||
|
||||
return newRoot, nil
|
||||
}
|
||||
|
||||
func UpgradeActorsV6(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
// Use all the CPUs except 3.
|
||||
workerCount := runtime.NumCPU() - 3
|
||||
if workerCount <= 0 {
|
||||
workerCount = 1
|
||||
}
|
||||
|
||||
config := nv14.Config{
|
||||
MaxWorkers: uint(workerCount),
|
||||
JobQueueSize: 1000,
|
||||
ResultQueueSize: 100,
|
||||
ProgressLogPeriod: 10 * time.Second,
|
||||
}
|
||||
|
||||
newRoot, err := upgradeActorsV6Common(ctx, sm, cache, root, epoch, ts, config)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("migrating actors v5 state: %w", err)
|
||||
}
|
||||
|
||||
return newRoot, nil
|
||||
}
|
||||
|
||||
func PreUpgradeActorsV6(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
|
||||
// Use half the CPUs for pre-migration, but leave at least 3.
|
||||
workerCount := runtime.NumCPU()
|
||||
if workerCount <= 4 {
|
||||
workerCount = 1
|
||||
} else {
|
||||
workerCount /= 2
|
||||
}
|
||||
config := nv14.Config{MaxWorkers: uint(workerCount)}
|
||||
_, err := upgradeActorsV6Common(ctx, sm, cache, root, epoch, ts, config)
|
||||
return err
|
||||
}
|
||||
|
||||
func upgradeActorsV6Common(
|
||||
ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache,
|
||||
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
|
||||
config nv14.Config,
|
||||
) (cid.Cid, error) {
|
||||
buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), blockstore.NewMemorySync())
|
||||
store := store.ActorStore(ctx, buf)
|
||||
|
||||
// Load the state root.
|
||||
var stateRoot types.StateRoot
|
||||
if err := store.Get(ctx, root, &stateRoot); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
|
||||
}
|
||||
|
||||
if stateRoot.Version != types.StateTreeVersion4 {
|
||||
return cid.Undef, xerrors.Errorf(
|
||||
"expected state root version 4 for actors v6 upgrade, got %d",
|
||||
stateRoot.Version,
|
||||
)
|
||||
}
|
||||
|
||||
// Perform the migration
|
||||
newHamtRoot, err := nv14.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("upgrading to actors v5: %w", err)
|
||||
}
|
||||
|
||||
// Persist the result.
|
||||
newRoot, err := store.Put(ctx, &types.StateRoot{
|
||||
Version: types.StateTreeVersion4,
|
||||
Actors: newHamtRoot,
|
||||
Info: stateRoot.Info,
|
||||
})
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
|
||||
}
|
||||
|
||||
// Persist the new tree.
|
||||
|
||||
{
|
||||
from := buf
|
||||
to := buf.Read()
|
||||
|
||||
if err := vm.Copy(ctx, from, to, newRoot); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return newRoot, nil
|
||||
}
|
||||
|
||||
type migrationLogger struct{}
|
||||
|
||||
func (ml migrationLogger) Log(level rt.LogLevel, msg string, args ...interface{}) {
|
||||
switch level {
|
||||
case rt.DEBUG:
|
||||
log.Debugf(msg, args...)
|
||||
case rt.INFO:
|
||||
log.Infof(msg, args...)
|
||||
case rt.WARN:
|
||||
log.Warnf(msg, args...)
|
||||
case rt.ERROR:
|
||||
log.Errorf(msg, args...)
|
||||
}
|
||||
}
|
@ -1,22 +1,25 @@
|
||||
package store
|
||||
package filcns
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
|
||||
|
||||
big2 "github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/state"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
big2 "github.com/filecoin-project/go-state-types/big"
|
||||
|
||||
bstore "github.com/filecoin-project/lotus/blockstore"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
|
||||
"github.com/filecoin-project/lotus/chain/state"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
var zero = types.NewInt(0)
|
||||
|
||||
func (cs *ChainStore) Weight(ctx context.Context, ts *types.TipSet) (types.BigInt, error) {
|
||||
func Weight(ctx context.Context, stateBs bstore.Blockstore, ts *types.TipSet) (types.BigInt, error) {
|
||||
if ts == nil {
|
||||
return types.NewInt(0), nil
|
||||
}
|
||||
@ -28,7 +31,7 @@ func (cs *ChainStore) Weight(ctx context.Context, ts *types.TipSet) (types.BigIn
|
||||
|
||||
tpow := big2.Zero()
|
||||
{
|
||||
cst := cbor.NewCborStore(cs.StateBlockstore())
|
||||
cst := cbor.NewCborStore(stateBs)
|
||||
state, err := state.LoadStateTree(cst, ts.ParentState())
|
||||
if err != nil {
|
||||
return types.NewInt(0), xerrors.Errorf("load state tree: %w", err)
|
||||
@ -39,7 +42,7 @@ func (cs *ChainStore) Weight(ctx context.Context, ts *types.TipSet) (types.BigIn
|
||||
return types.NewInt(0), xerrors.Errorf("get power actor: %w", err)
|
||||
}
|
||||
|
||||
powState, err := power.Load(cs.ActorStore(ctx), act)
|
||||
powState, err := power.Load(store.ActorStore(ctx, stateBs), act)
|
||||
if err != nil {
|
||||
return types.NewInt(0), xerrors.Errorf("failed to load power actor state: %w", err)
|
||||
}
|
19
chain/consensus/iface.go
Normal file
19
chain/consensus/iface.go
Normal file
@ -0,0 +1,19 @@
|
||||
package consensus
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
type Consensus interface {
|
||||
ValidateBlock(ctx context.Context, b *types.FullBlock) (err error)
|
||||
ValidateBlockPubsub(ctx context.Context, self bool, msg *pubsub.Message) (pubsub.ValidationResult, string)
|
||||
IsEpochBeyondCurrMax(epoch abi.ChainEpoch) bool
|
||||
|
||||
CreateBlock(ctx context.Context, w api.Wallet, bt *api.BlockTemplate) (*types.FullBlock, error)
|
||||
}
|
83
chain/consensus/utils.go
Normal file
83
chain/consensus/utils.go
Normal file
@ -0,0 +1,83 @@
|
||||
package consensus
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||
"github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"go.opencensus.io/trace"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
ffi "github.com/filecoin-project/filecoin-ffi"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
)
|
||||
|
||||
var ErrTemporal = errors.New("temporal error")
|
||||
|
||||
func VerifyBlsAggregate(ctx context.Context, sig *crypto.Signature, msgs []cid.Cid, pubks [][]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "syncer.VerifyBlsAggregate")
|
||||
defer span.End()
|
||||
span.AddAttributes(
|
||||
trace.Int64Attribute("msgCount", int64(len(msgs))),
|
||||
)
|
||||
|
||||
msgsS := make([]ffi.Message, len(msgs))
|
||||
pubksS := make([]ffi.PublicKey, len(msgs))
|
||||
for i := 0; i < len(msgs); i++ {
|
||||
msgsS[i] = msgs[i].Bytes()
|
||||
copy(pubksS[i][:], pubks[i][:ffi.PublicKeyBytes])
|
||||
}
|
||||
|
||||
sigS := new(ffi.Signature)
|
||||
copy(sigS[:], sig.Data[:ffi.SignatureBytes])
|
||||
|
||||
if len(msgs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
valid := ffi.HashVerify(sigS, msgsS, pubksS)
|
||||
if !valid {
|
||||
return xerrors.New("bls aggregate signature failed to verify")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func AggregateSignatures(sigs []crypto.Signature) (*crypto.Signature, error) {
|
||||
sigsS := make([]ffi.Signature, len(sigs))
|
||||
for i := 0; i < len(sigs); i++ {
|
||||
copy(sigsS[i][:], sigs[i].Data[:ffi.SignatureBytes])
|
||||
}
|
||||
|
||||
aggSig := ffi.Aggregate(sigsS)
|
||||
if aggSig == nil {
|
||||
if len(sigs) > 0 {
|
||||
return nil, xerrors.Errorf("bls.Aggregate returned nil with %d signatures", len(sigs))
|
||||
}
|
||||
|
||||
zeroSig := ffi.CreateZeroSignature()
|
||||
|
||||
// Note: for blst this condition should not happen - nil should not
|
||||
// be returned
|
||||
return &crypto.Signature{
|
||||
Type: crypto.SigTypeBLS,
|
||||
Data: zeroSig[:],
|
||||
}, nil
|
||||
}
|
||||
return &crypto.Signature{
|
||||
Type: crypto.SigTypeBLS,
|
||||
Data: aggSig[:],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ToMessagesArray(store blockadt.Store, cids []cid.Cid) (cid.Cid, error) {
|
||||
arr := blockadt.MakeEmptyArray(store)
|
||||
for i, c := range cids {
|
||||
oc := cbg.CborCid(c)
|
||||
if err := arr.Set(uint64(i), &oc); err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
}
|
||||
return arr.Root()
|
||||
}
|
33
chain/events/cache.go
Normal file
33
chain/events/cache.go
Normal file
@ -0,0 +1,33 @@
|
||||
package events
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/ipfs/go-cid"
|
||||
)
|
||||
|
||||
type uncachedAPI interface {
|
||||
ChainNotify(context.Context) (<-chan []*api.HeadChange, error)
|
||||
ChainGetPath(ctx context.Context, from, to types.TipSetKey) ([]*api.HeadChange, error)
|
||||
StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error)
|
||||
|
||||
StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) // optional / for CalledMsg
|
||||
}
|
||||
|
||||
type cache struct {
|
||||
*tipSetCache
|
||||
*messageCache
|
||||
uncachedAPI
|
||||
}
|
||||
|
||||
func newCache(api EventAPI, gcConfidence abi.ChainEpoch) *cache {
|
||||
return &cache{
|
||||
newTSCache(api, gcConfidence),
|
||||
newMessageCache(api),
|
||||
api,
|
||||
}
|
||||
}
|
@ -2,18 +2,14 @@ package events
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/ipfs/go-cid"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
@ -25,209 +21,46 @@ type (
|
||||
RevertHandler func(ctx context.Context, ts *types.TipSet) error
|
||||
)
|
||||
|
||||
type heightHandler struct {
|
||||
confidence int
|
||||
called bool
|
||||
|
||||
handle HeightHandler
|
||||
revert RevertHandler
|
||||
// A TipSetObserver receives notifications of tipsets
|
||||
type TipSetObserver interface {
|
||||
Apply(ctx context.Context, from, to *types.TipSet) error
|
||||
Revert(ctx context.Context, from, to *types.TipSet) error
|
||||
}
|
||||
|
||||
type EventAPI interface {
|
||||
ChainNotify(context.Context) (<-chan []*api.HeadChange, error)
|
||||
ChainGetBlockMessages(context.Context, cid.Cid) (*api.BlockMessages, error)
|
||||
ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error)
|
||||
ChainGetTipSetAfterHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error)
|
||||
ChainHead(context.Context) (*types.TipSet, error)
|
||||
StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error)
|
||||
ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error)
|
||||
ChainGetPath(ctx context.Context, from, to types.TipSetKey) ([]*api.HeadChange, error)
|
||||
|
||||
StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) // optional / for CalledMsg
|
||||
}
|
||||
|
||||
type Events struct {
|
||||
api EventAPI
|
||||
|
||||
tsc *tipSetCache
|
||||
lk sync.Mutex
|
||||
|
||||
ready chan struct{}
|
||||
readyOnce sync.Once
|
||||
|
||||
heightEvents
|
||||
*observer
|
||||
*heightEvents
|
||||
*hcEvents
|
||||
|
||||
observers []TipSetObserver
|
||||
}
|
||||
|
||||
func NewEventsWithConfidence(ctx context.Context, api EventAPI, gcConfidence abi.ChainEpoch) *Events {
|
||||
tsc := newTSCache(gcConfidence, api)
|
||||
func NewEventsWithConfidence(ctx context.Context, api EventAPI, gcConfidence abi.ChainEpoch) (*Events, error) {
|
||||
cache := newCache(api, gcConfidence)
|
||||
|
||||
e := &Events{
|
||||
api: api,
|
||||
|
||||
tsc: tsc,
|
||||
|
||||
heightEvents: heightEvents{
|
||||
tsc: tsc,
|
||||
ctx: ctx,
|
||||
gcConfidence: gcConfidence,
|
||||
|
||||
heightTriggers: map[uint64]*heightHandler{},
|
||||
htTriggerHeights: map[abi.ChainEpoch][]uint64{},
|
||||
htHeights: map[abi.ChainEpoch][]uint64{},
|
||||
},
|
||||
|
||||
hcEvents: newHCEvents(ctx, api, tsc, uint64(gcConfidence)),
|
||||
ready: make(chan struct{}),
|
||||
observers: []TipSetObserver{},
|
||||
ob := newObserver(cache, gcConfidence)
|
||||
if err := ob.start(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go e.listenHeadChanges(ctx)
|
||||
he := newHeightEvents(cache, ob, gcConfidence)
|
||||
headChange := newHCEvents(cache, ob)
|
||||
|
||||
// Wait for the first tipset to be seen or bail if shutting down
|
||||
select {
|
||||
case <-e.ready:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
|
||||
return e
|
||||
return &Events{ob, he, headChange}, nil
|
||||
}
|
||||
|
||||
func NewEvents(ctx context.Context, api EventAPI) *Events {
|
||||
func NewEvents(ctx context.Context, api EventAPI) (*Events, error) {
|
||||
gcConfidence := 2 * build.ForkLengthThreshold
|
||||
return NewEventsWithConfidence(ctx, api, gcConfidence)
|
||||
}
|
||||
|
||||
func (e *Events) listenHeadChanges(ctx context.Context) {
|
||||
for {
|
||||
if err := e.listenHeadChangesOnce(ctx); err != nil {
|
||||
log.Errorf("listen head changes errored: %s", err)
|
||||
} else {
|
||||
log.Warn("listenHeadChanges quit")
|
||||
}
|
||||
select {
|
||||
case <-build.Clock.After(time.Second):
|
||||
case <-ctx.Done():
|
||||
log.Warnf("not restarting listenHeadChanges: context error: %s", ctx.Err())
|
||||
return
|
||||
}
|
||||
|
||||
log.Info("restarting listenHeadChanges")
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Events) listenHeadChangesOnce(ctx context.Context) error {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
notifs, err := e.api.ChainNotify(ctx)
|
||||
if err != nil {
|
||||
// Retry is handled by caller
|
||||
return xerrors.Errorf("listenHeadChanges ChainNotify call failed: %w", err)
|
||||
}
|
||||
|
||||
var cur []*api.HeadChange
|
||||
var ok bool
|
||||
|
||||
// Wait for first tipset or bail
|
||||
select {
|
||||
case cur, ok = <-notifs:
|
||||
if !ok {
|
||||
return xerrors.Errorf("notification channel closed")
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
if len(cur) != 1 {
|
||||
return xerrors.Errorf("unexpected initial head notification length: %d", len(cur))
|
||||
}
|
||||
|
||||
if cur[0].Type != store.HCCurrent {
|
||||
return xerrors.Errorf("expected first head notification type to be 'current', was '%s'", cur[0].Type)
|
||||
}
|
||||
|
||||
if err := e.tsc.add(cur[0].Val); err != nil {
|
||||
log.Warnf("tsc.add: adding current tipset failed: %v", err)
|
||||
}
|
||||
|
||||
e.readyOnce.Do(func() {
|
||||
e.lastTs = cur[0].Val
|
||||
// Signal that we have seen first tipset
|
||||
close(e.ready)
|
||||
})
|
||||
|
||||
for notif := range notifs {
|
||||
var rev, app []*types.TipSet
|
||||
for _, notif := range notif {
|
||||
switch notif.Type {
|
||||
case store.HCRevert:
|
||||
rev = append(rev, notif.Val)
|
||||
case store.HCApply:
|
||||
app = append(app, notif.Val)
|
||||
default:
|
||||
log.Warnf("unexpected head change notification type: '%s'", notif.Type)
|
||||
}
|
||||
}
|
||||
|
||||
if err := e.headChange(ctx, rev, app); err != nil {
|
||||
log.Warnf("headChange failed: %s", err)
|
||||
}
|
||||
|
||||
// sync with fake chainstore (for tests)
|
||||
if fcs, ok := e.api.(interface{ notifDone() }); ok {
|
||||
fcs.notifDone()
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Events) headChange(ctx context.Context, rev, app []*types.TipSet) error {
|
||||
if len(app) == 0 {
|
||||
return xerrors.New("events.headChange expected at least one applied tipset")
|
||||
}
|
||||
|
||||
e.lk.Lock()
|
||||
defer e.lk.Unlock()
|
||||
|
||||
if err := e.headChangeAt(rev, app); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := e.observeChanges(ctx, rev, app); err != nil {
|
||||
return err
|
||||
}
|
||||
return e.processHeadChangeEvent(rev, app)
|
||||
}
|
||||
|
||||
// A TipSetObserver receives notifications of tipsets
|
||||
type TipSetObserver interface {
|
||||
Apply(ctx context.Context, ts *types.TipSet) error
|
||||
Revert(ctx context.Context, ts *types.TipSet) error
|
||||
}
|
||||
|
||||
// TODO: add a confidence level so we can have observers with difference levels of confidence
|
||||
func (e *Events) Observe(obs TipSetObserver) error {
|
||||
e.lk.Lock()
|
||||
defer e.lk.Unlock()
|
||||
e.observers = append(e.observers, obs)
|
||||
return nil
|
||||
}
|
||||
|
||||
// observeChanges expects caller to hold e.lk
|
||||
func (e *Events) observeChanges(ctx context.Context, rev, app []*types.TipSet) error {
|
||||
for _, ts := range rev {
|
||||
for _, o := range e.observers {
|
||||
_ = o.Revert(ctx, ts)
|
||||
}
|
||||
}
|
||||
|
||||
for _, ts := range app {
|
||||
for _, o := range e.observers {
|
||||
_ = o.Apply(ctx, ts)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -5,9 +5,6 @@ import (
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
@ -35,7 +32,7 @@ type eventData interface{}
|
||||
// `prevTs` is the previous tipset, eg the "from" tipset for a state change.
|
||||
// `ts` is the event tipset, eg the tipset in which the `msg` is included.
|
||||
// `curH`-`ts.Height` = `confidence`
|
||||
type EventHandler func(data eventData, prevTs, ts *types.TipSet, curH abi.ChainEpoch) (more bool, err error)
|
||||
type EventHandler func(ctx context.Context, data eventData, prevTs, ts *types.TipSet, curH abi.ChainEpoch) (more bool, err error)
|
||||
|
||||
// CheckFunc is used for atomicity guarantees. If the condition the callbacks
|
||||
// wait for has already happened in tipset `ts`
|
||||
@ -43,7 +40,7 @@ type EventHandler func(data eventData, prevTs, ts *types.TipSet, curH abi.ChainE
|
||||
// If `done` is true, timeout won't be triggered
|
||||
// If `more` is false, no messages will be sent to EventHandler (RevertHandler
|
||||
// may still be called)
|
||||
type CheckFunc func(ts *types.TipSet) (done bool, more bool, err error)
|
||||
type CheckFunc func(ctx context.Context, ts *types.TipSet) (done bool, more bool, err error)
|
||||
|
||||
// Keep track of information for an event handler
|
||||
type handlerInfo struct {
|
||||
@ -60,10 +57,9 @@ type handlerInfo struct {
|
||||
// until the required confidence is reached
|
||||
type queuedEvent struct {
|
||||
trigger triggerID
|
||||
data eventData
|
||||
|
||||
prevH abi.ChainEpoch
|
||||
h abi.ChainEpoch
|
||||
data eventData
|
||||
prevTipset, tipset *types.TipSet
|
||||
|
||||
called bool
|
||||
}
|
||||
@ -71,19 +67,17 @@ type queuedEvent struct {
|
||||
// Manages chain head change events, which may be forward (new tipset added to
|
||||
// chain) or backward (chain branch discarded in favour of heavier branch)
|
||||
type hcEvents struct {
|
||||
cs EventAPI
|
||||
tsc *tipSetCache
|
||||
ctx context.Context
|
||||
gcConfidence uint64
|
||||
cs EventAPI
|
||||
|
||||
lk sync.Mutex
|
||||
lastTs *types.TipSet
|
||||
|
||||
lk sync.Mutex
|
||||
|
||||
ctr triggerID
|
||||
|
||||
// TODO: get rid of trigger IDs and just use pointers as keys.
|
||||
triggers map[triggerID]*handlerInfo
|
||||
|
||||
// TODO: instead of scheduling events in the future, look at the chain in the past. We can sip the "confidence" queue entirely.
|
||||
// maps block heights to events
|
||||
// [triggerH][msgH][event]
|
||||
confQueue map[triggerH]map[msgH][]*queuedEvent
|
||||
@ -98,83 +92,77 @@ type hcEvents struct {
|
||||
watcherEvents
|
||||
}
|
||||
|
||||
func newHCEvents(ctx context.Context, cs EventAPI, tsc *tipSetCache, gcConfidence uint64) *hcEvents {
|
||||
e := hcEvents{
|
||||
ctx: ctx,
|
||||
cs: cs,
|
||||
tsc: tsc,
|
||||
gcConfidence: gcConfidence,
|
||||
|
||||
func newHCEvents(api EventAPI, obs *observer) *hcEvents {
|
||||
e := &hcEvents{
|
||||
cs: api,
|
||||
confQueue: map[triggerH]map[msgH][]*queuedEvent{},
|
||||
revertQueue: map[msgH][]triggerH{},
|
||||
triggers: map[triggerID]*handlerInfo{},
|
||||
timeouts: map[abi.ChainEpoch]map[triggerID]int{},
|
||||
}
|
||||
|
||||
e.messageEvents = newMessageEvents(ctx, &e, cs)
|
||||
e.watcherEvents = newWatcherEvents(ctx, &e, cs)
|
||||
e.messageEvents = newMessageEvents(e, api)
|
||||
e.watcherEvents = newWatcherEvents(e, api)
|
||||
|
||||
return &e
|
||||
// We need to take the lock as the observer could immediately try calling us.
|
||||
e.lk.Lock()
|
||||
e.lastTs = obs.Observe((*hcEventsObserver)(e))
|
||||
e.lk.Unlock()
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
// Called when there is a change to the head with tipsets to be
|
||||
// reverted / applied
|
||||
func (e *hcEvents) processHeadChangeEvent(rev, app []*types.TipSet) error {
|
||||
type hcEventsObserver hcEvents
|
||||
|
||||
func (e *hcEventsObserver) Apply(ctx context.Context, from, to *types.TipSet) error {
|
||||
e.lk.Lock()
|
||||
defer e.lk.Unlock()
|
||||
|
||||
for _, ts := range rev {
|
||||
e.handleReverts(ts)
|
||||
e.lastTs = ts
|
||||
defer func() { e.lastTs = to }()
|
||||
|
||||
// Check if the head change caused any state changes that we were
|
||||
// waiting for
|
||||
stateChanges := e.checkStateChanges(from, to)
|
||||
|
||||
// Queue up calls until there have been enough blocks to reach
|
||||
// confidence on the state changes
|
||||
for tid, data := range stateChanges {
|
||||
e.queueForConfidence(tid, data, from, to)
|
||||
}
|
||||
|
||||
for _, ts := range app {
|
||||
// Check if the head change caused any state changes that we were
|
||||
// waiting for
|
||||
stateChanges := e.watcherEvents.checkStateChanges(e.lastTs, ts)
|
||||
// Check if the head change included any new message calls
|
||||
newCalls := e.checkNewCalls(ctx, from, to)
|
||||
|
||||
// Queue up calls until there have been enough blocks to reach
|
||||
// confidence on the state changes
|
||||
for tid, data := range stateChanges {
|
||||
e.queueForConfidence(tid, data, e.lastTs, ts)
|
||||
// Queue up calls until there have been enough blocks to reach
|
||||
// confidence on the message calls
|
||||
for tid, calls := range newCalls {
|
||||
for _, data := range calls {
|
||||
e.queueForConfidence(tid, data, nil, to)
|
||||
}
|
||||
|
||||
// Check if the head change included any new message calls
|
||||
newCalls, err := e.messageEvents.checkNewCalls(ts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Queue up calls until there have been enough blocks to reach
|
||||
// confidence on the message calls
|
||||
for tid, calls := range newCalls {
|
||||
for _, data := range calls {
|
||||
e.queueForConfidence(tid, data, nil, ts)
|
||||
}
|
||||
}
|
||||
|
||||
for at := e.lastTs.Height(); at <= ts.Height(); at++ {
|
||||
// Apply any queued events and timeouts that were targeted at the
|
||||
// current chain height
|
||||
e.applyWithConfidence(ts, at)
|
||||
e.applyTimeouts(ts)
|
||||
}
|
||||
|
||||
// Update the latest known tipset
|
||||
e.lastTs = ts
|
||||
}
|
||||
|
||||
for at := from.Height() + 1; at <= to.Height(); at++ {
|
||||
// Apply any queued events and timeouts that were targeted at the
|
||||
// current chain height
|
||||
e.applyWithConfidence(ctx, at)
|
||||
e.applyTimeouts(ctx, at, to)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *hcEvents) handleReverts(ts *types.TipSet) {
|
||||
reverts, ok := e.revertQueue[ts.Height()]
|
||||
func (e *hcEventsObserver) Revert(ctx context.Context, from, to *types.TipSet) error {
|
||||
e.lk.Lock()
|
||||
defer e.lk.Unlock()
|
||||
|
||||
defer func() { e.lastTs = to }()
|
||||
|
||||
reverts, ok := e.revertQueue[from.Height()]
|
||||
if !ok {
|
||||
return // nothing to do
|
||||
return nil // nothing to do
|
||||
}
|
||||
|
||||
for _, triggerH := range reverts {
|
||||
toRevert := e.confQueue[triggerH][ts.Height()]
|
||||
toRevert := e.confQueue[triggerH][from.Height()]
|
||||
for _, event := range toRevert {
|
||||
if !event.called {
|
||||
continue // event wasn't apply()-ied yet
|
||||
@ -182,24 +170,21 @@ func (e *hcEvents) handleReverts(ts *types.TipSet) {
|
||||
|
||||
trigger := e.triggers[event.trigger]
|
||||
|
||||
if err := trigger.revert(e.ctx, ts); err != nil {
|
||||
log.Errorf("reverting chain trigger (@H %d, triggered @ %d) failed: %s", ts.Height(), triggerH, err)
|
||||
if err := trigger.revert(ctx, from); err != nil {
|
||||
log.Errorf("reverting chain trigger (@H %d, triggered @ %d) failed: %s", from.Height(), triggerH, err)
|
||||
}
|
||||
}
|
||||
delete(e.confQueue[triggerH], ts.Height())
|
||||
delete(e.confQueue[triggerH], from.Height())
|
||||
}
|
||||
delete(e.revertQueue, ts.Height())
|
||||
delete(e.revertQueue, from.Height())
|
||||
return nil
|
||||
}
|
||||
|
||||
// Queue up events until the chain has reached a height that reflects the
|
||||
// desired confidence
|
||||
func (e *hcEvents) queueForConfidence(trigID uint64, data eventData, prevTs, ts *types.TipSet) {
|
||||
func (e *hcEventsObserver) queueForConfidence(trigID uint64, data eventData, prevTs, ts *types.TipSet) {
|
||||
trigger := e.triggers[trigID]
|
||||
|
||||
prevH := NoHeight
|
||||
if prevTs != nil {
|
||||
prevH = prevTs.Height()
|
||||
}
|
||||
appliedH := ts.Height()
|
||||
|
||||
triggerH := appliedH + abi.ChainEpoch(trigger.confidence)
|
||||
@ -211,28 +196,23 @@ func (e *hcEvents) queueForConfidence(trigID uint64, data eventData, prevTs, ts
|
||||
}
|
||||
|
||||
byOrigH[appliedH] = append(byOrigH[appliedH], &queuedEvent{
|
||||
trigger: trigID,
|
||||
prevH: prevH,
|
||||
h: appliedH,
|
||||
data: data,
|
||||
trigger: trigID,
|
||||
data: data,
|
||||
tipset: ts,
|
||||
prevTipset: prevTs,
|
||||
})
|
||||
|
||||
e.revertQueue[appliedH] = append(e.revertQueue[appliedH], triggerH)
|
||||
}
|
||||
|
||||
// Apply any events that were waiting for this chain height for confidence
|
||||
func (e *hcEvents) applyWithConfidence(ts *types.TipSet, height abi.ChainEpoch) {
|
||||
func (e *hcEventsObserver) applyWithConfidence(ctx context.Context, height abi.ChainEpoch) {
|
||||
byOrigH, ok := e.confQueue[height]
|
||||
if !ok {
|
||||
return // no triggers at this height
|
||||
}
|
||||
|
||||
for origH, events := range byOrigH {
|
||||
triggerTs, err := e.tsc.get(origH)
|
||||
if err != nil {
|
||||
log.Errorf("events: applyWithConfidence didn't find tipset for event; wanted %d; current %d", origH, height)
|
||||
}
|
||||
|
||||
for _, event := range events {
|
||||
if event.called {
|
||||
continue
|
||||
@ -243,18 +223,7 @@ func (e *hcEvents) applyWithConfidence(ts *types.TipSet, height abi.ChainEpoch)
|
||||
continue
|
||||
}
|
||||
|
||||
// Previous tipset - this is relevant for example in a state change
|
||||
// from one tipset to another
|
||||
var prevTs *types.TipSet
|
||||
if event.prevH != NoHeight {
|
||||
prevTs, err = e.tsc.get(event.prevH)
|
||||
if err != nil {
|
||||
log.Errorf("events: applyWithConfidence didn't find tipset for previous event; wanted %d; current %d", event.prevH, height)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
more, err := trigger.handle(event.data, prevTs, triggerTs, height)
|
||||
more, err := trigger.handle(ctx, event.data, event.prevTipset, event.tipset, height)
|
||||
if err != nil {
|
||||
log.Errorf("chain trigger (@H %d, triggered @ %d) failed: %s", origH, height, err)
|
||||
continue // don't revert failed calls
|
||||
@ -273,8 +242,8 @@ func (e *hcEvents) applyWithConfidence(ts *types.TipSet, height abi.ChainEpoch)
|
||||
}
|
||||
|
||||
// Apply any timeouts that expire at this height
|
||||
func (e *hcEvents) applyTimeouts(ts *types.TipSet) {
|
||||
triggers, ok := e.timeouts[ts.Height()]
|
||||
func (e *hcEventsObserver) applyTimeouts(ctx context.Context, at abi.ChainEpoch, ts *types.TipSet) {
|
||||
triggers, ok := e.timeouts[at]
|
||||
if !ok {
|
||||
return // nothing to do
|
||||
}
|
||||
@ -288,14 +257,15 @@ func (e *hcEvents) applyTimeouts(ts *types.TipSet) {
|
||||
continue
|
||||
}
|
||||
|
||||
timeoutTs, err := e.tsc.get(ts.Height() - abi.ChainEpoch(trigger.confidence))
|
||||
// This should be cached.
|
||||
timeoutTs, err := e.cs.ChainGetTipSetAfterHeight(ctx, at-abi.ChainEpoch(trigger.confidence), ts.Key())
|
||||
if err != nil {
|
||||
log.Errorf("events: applyTimeouts didn't find tipset for event; wanted %d; current %d", ts.Height()-abi.ChainEpoch(trigger.confidence), ts.Height())
|
||||
log.Errorf("events: applyTimeouts didn't find tipset for event; wanted %d; current %d", at-abi.ChainEpoch(trigger.confidence), at)
|
||||
}
|
||||
|
||||
more, err := trigger.handle(nil, nil, timeoutTs, ts.Height())
|
||||
more, err := trigger.handle(ctx, nil, nil, timeoutTs, at)
|
||||
if err != nil {
|
||||
log.Errorf("chain trigger (call @H %d, called @ %d) failed: %s", timeoutTs.Height(), ts.Height(), err)
|
||||
log.Errorf("chain trigger (call @H %d, called @ %d) failed: %s", timeoutTs.Height(), at, err)
|
||||
continue // don't revert failed calls
|
||||
}
|
||||
|
||||
@ -309,24 +279,19 @@ func (e *hcEvents) applyTimeouts(ts *types.TipSet) {
|
||||
// - RevertHandler: called if the chain head changes causing the event to revert
|
||||
// - confidence: wait this many tipsets before calling EventHandler
|
||||
// - timeout: at this chain height, timeout on waiting for this event
|
||||
func (e *hcEvents) onHeadChanged(check CheckFunc, hnd EventHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch) (triggerID, error) {
|
||||
func (e *hcEvents) onHeadChanged(ctx context.Context, check CheckFunc, hnd EventHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch) (triggerID, error) {
|
||||
e.lk.Lock()
|
||||
defer e.lk.Unlock()
|
||||
|
||||
// Check if the event has already occurred
|
||||
ts, err := e.tsc.best()
|
||||
done, more, err := check(ctx, e.lastTs)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("error getting best tipset: %w", err)
|
||||
}
|
||||
done, more, err := check(ts)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("called check error (h: %d): %w", ts.Height(), err)
|
||||
return 0, xerrors.Errorf("called check error (h: %d): %w", e.lastTs.Height(), err)
|
||||
}
|
||||
if done {
|
||||
timeout = NoTimeout
|
||||
}
|
||||
|
||||
// Create a trigger for the event
|
||||
id := e.ctr
|
||||
e.ctr++
|
||||
|
||||
@ -354,12 +319,11 @@ func (e *hcEvents) onHeadChanged(check CheckFunc, hnd EventHandler, rev RevertHa
|
||||
// headChangeAPI is used to allow the composed event APIs to call back to hcEvents
|
||||
// to listen for changes
|
||||
type headChangeAPI interface {
|
||||
onHeadChanged(check CheckFunc, hnd EventHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch) (triggerID, error)
|
||||
onHeadChanged(ctx context.Context, check CheckFunc, hnd EventHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch) (triggerID, error)
|
||||
}
|
||||
|
||||
// watcherEvents watches for a state change
|
||||
type watcherEvents struct {
|
||||
ctx context.Context
|
||||
cs EventAPI
|
||||
hcAPI headChangeAPI
|
||||
|
||||
@ -367,9 +331,8 @@ type watcherEvents struct {
|
||||
matchers map[triggerID]StateMatchFunc
|
||||
}
|
||||
|
||||
func newWatcherEvents(ctx context.Context, hcAPI headChangeAPI, cs EventAPI) watcherEvents {
|
||||
func newWatcherEvents(hcAPI headChangeAPI, cs EventAPI) watcherEvents {
|
||||
return watcherEvents{
|
||||
ctx: ctx,
|
||||
cs: cs,
|
||||
hcAPI: hcAPI,
|
||||
matchers: make(map[triggerID]StateMatchFunc),
|
||||
@ -425,7 +388,7 @@ type StateMatchFunc func(oldTs, newTs *types.TipSet) (bool, StateChange, error)
|
||||
// * `StateChangeHandler` is called when the specified state change was observed
|
||||
// on-chain, and a confidence threshold was reached, or the specified `timeout`
|
||||
// height was reached with no state change observed. When this callback is
|
||||
// invoked on a timeout, `oldState` and `newState` are set to nil.
|
||||
// invoked on a timeout, `oldTs` and `states are set to nil.
|
||||
// This callback returns a boolean specifying whether further notifications
|
||||
// should be sent, like `more` return param from `CheckFunc` above.
|
||||
//
|
||||
@ -438,7 +401,7 @@ type StateMatchFunc func(oldTs, newTs *types.TipSet) (bool, StateChange, error)
|
||||
// the state change is queued up until the confidence interval has elapsed (and
|
||||
// `StateChangeHandler` is called)
|
||||
func (we *watcherEvents) StateChanged(check CheckFunc, scHnd StateChangeHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch, mf StateMatchFunc) error {
|
||||
hnd := func(data eventData, prevTs, ts *types.TipSet, height abi.ChainEpoch) (bool, error) {
|
||||
hnd := func(ctx context.Context, data eventData, prevTs, ts *types.TipSet, height abi.ChainEpoch) (bool, error) {
|
||||
states, ok := data.(StateChange)
|
||||
if data != nil && !ok {
|
||||
panic("expected StateChange")
|
||||
@ -447,7 +410,7 @@ func (we *watcherEvents) StateChanged(check CheckFunc, scHnd StateChangeHandler,
|
||||
return scHnd(prevTs, ts, states, height)
|
||||
}
|
||||
|
||||
id, err := we.hcAPI.onHeadChanged(check, hnd, rev, confidence, timeout)
|
||||
id, err := we.hcAPI.onHeadChanged(context.TODO(), check, hnd, rev, confidence, timeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -461,43 +424,29 @@ func (we *watcherEvents) StateChanged(check CheckFunc, scHnd StateChangeHandler,
|
||||
|
||||
// messageEvents watches for message calls to actors
|
||||
type messageEvents struct {
|
||||
ctx context.Context
|
||||
cs EventAPI
|
||||
hcAPI headChangeAPI
|
||||
|
||||
lk sync.RWMutex
|
||||
matchers map[triggerID]MsgMatchFunc
|
||||
|
||||
blockMsgLk sync.Mutex
|
||||
blockMsgCache *lru.ARCCache
|
||||
}
|
||||
|
||||
func newMessageEvents(ctx context.Context, hcAPI headChangeAPI, cs EventAPI) messageEvents {
|
||||
blsMsgCache, _ := lru.NewARC(500)
|
||||
func newMessageEvents(hcAPI headChangeAPI, cs EventAPI) messageEvents {
|
||||
return messageEvents{
|
||||
ctx: ctx,
|
||||
cs: cs,
|
||||
hcAPI: hcAPI,
|
||||
matchers: make(map[triggerID]MsgMatchFunc),
|
||||
blockMsgLk: sync.Mutex{},
|
||||
blockMsgCache: blsMsgCache,
|
||||
cs: cs,
|
||||
hcAPI: hcAPI,
|
||||
matchers: make(map[triggerID]MsgMatchFunc),
|
||||
}
|
||||
}
|
||||
|
||||
// Check if there are any new actor calls
|
||||
func (me *messageEvents) checkNewCalls(ts *types.TipSet) (map[triggerID][]eventData, error) {
|
||||
pts, err := me.cs.ChainGetTipSet(me.ctx, ts.Parents()) // we actually care about messages in the parent tipset here
|
||||
if err != nil {
|
||||
log.Errorf("getting parent tipset in checkNewCalls: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (me *messageEvents) checkNewCalls(ctx context.Context, from, to *types.TipSet) map[triggerID][]eventData {
|
||||
me.lk.RLock()
|
||||
defer me.lk.RUnlock()
|
||||
|
||||
// For each message in the tipset
|
||||
res := make(map[triggerID][]eventData)
|
||||
me.messagesForTs(pts, func(msg *types.Message) {
|
||||
me.messagesForTs(from, func(msg *types.Message) {
|
||||
// TODO: provide receipts
|
||||
|
||||
// Run each trigger's matcher against the message
|
||||
@ -516,47 +465,32 @@ func (me *messageEvents) checkNewCalls(ts *types.TipSet) (map[triggerID][]eventD
|
||||
}
|
||||
})
|
||||
|
||||
return res, nil
|
||||
return res
|
||||
}
|
||||
|
||||
// Get the messages in a tipset
|
||||
func (me *messageEvents) messagesForTs(ts *types.TipSet, consume func(*types.Message)) {
|
||||
seen := map[cid.Cid]struct{}{}
|
||||
|
||||
for _, tsb := range ts.Blocks() {
|
||||
me.blockMsgLk.Lock()
|
||||
msgsI, ok := me.blockMsgCache.Get(tsb.Cid())
|
||||
var err error
|
||||
if !ok {
|
||||
msgsI, err = me.cs.ChainGetBlockMessages(context.TODO(), tsb.Cid())
|
||||
if err != nil {
|
||||
log.Errorf("messagesForTs MessagesForBlock failed (ts.H=%d, Bcid:%s, B.Mcid:%s): %s", ts.Height(), tsb.Cid(), tsb.Messages, err)
|
||||
// this is quite bad, but probably better than missing all the other updates
|
||||
me.blockMsgLk.Unlock()
|
||||
continue
|
||||
}
|
||||
me.blockMsgCache.Add(tsb.Cid(), msgsI)
|
||||
for i, tsb := range ts.Cids() {
|
||||
msgs, err := me.cs.ChainGetBlockMessages(context.TODO(), tsb)
|
||||
if err != nil {
|
||||
log.Errorf("messagesForTs MessagesForBlock failed (ts.H=%d, Bcid:%s, B.Mcid:%s): %s",
|
||||
ts.Height(), tsb, ts.Blocks()[i].Messages, err)
|
||||
continue
|
||||
}
|
||||
me.blockMsgLk.Unlock()
|
||||
msgs := msgsI.(*api.BlockMessages)
|
||||
for _, m := range msgs.BlsMessages {
|
||||
_, ok := seen[m.Cid()]
|
||||
for i, c := range msgs.Cids {
|
||||
// We iterate over the CIDs to avoid having to recompute them.
|
||||
_, ok := seen[c]
|
||||
if ok {
|
||||
continue
|
||||
}
|
||||
seen[m.Cid()] = struct{}{}
|
||||
|
||||
consume(m)
|
||||
}
|
||||
|
||||
for _, m := range msgs.SecpkMessages {
|
||||
_, ok := seen[m.Message.Cid()]
|
||||
if ok {
|
||||
continue
|
||||
seen[c] = struct{}{}
|
||||
if i < len(msgs.BlsMessages) {
|
||||
consume(msgs.BlsMessages[i])
|
||||
} else {
|
||||
consume(&msgs.SecpkMessages[i-len(msgs.BlsMessages)].Message)
|
||||
}
|
||||
seen[m.Message.Cid()] = struct{}{}
|
||||
|
||||
consume(&m.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -596,14 +530,14 @@ type MsgMatchFunc func(msg *types.Message) (matched bool, err error)
|
||||
// * `MsgMatchFunc` is called against each message. If there is a match, the
|
||||
// message is queued up until the confidence interval has elapsed (and
|
||||
// `MsgHandler` is called)
|
||||
func (me *messageEvents) Called(check CheckFunc, msgHnd MsgHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch, mf MsgMatchFunc) error {
|
||||
hnd := func(data eventData, prevTs, ts *types.TipSet, height abi.ChainEpoch) (bool, error) {
|
||||
func (me *messageEvents) Called(ctx context.Context, check CheckFunc, msgHnd MsgHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch, mf MsgMatchFunc) error {
|
||||
hnd := func(ctx context.Context, data eventData, prevTs, ts *types.TipSet, height abi.ChainEpoch) (bool, error) {
|
||||
msg, ok := data.(*types.Message)
|
||||
if data != nil && !ok {
|
||||
panic("expected msg")
|
||||
}
|
||||
|
||||
ml, err := me.cs.StateSearchMsg(me.ctx, ts.Key(), msg.Cid(), stmgr.LookbackNoLimit, true)
|
||||
ml, err := me.cs.StateSearchMsg(ctx, ts.Key(), msg.Cid(), stmgr.LookbackNoLimit, true)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -615,7 +549,7 @@ func (me *messageEvents) Called(check CheckFunc, msgHnd MsgHandler, rev RevertHa
|
||||
return msgHnd(msg, &ml.Receipt, ts, height)
|
||||
}
|
||||
|
||||
id, err := me.hcAPI.onHeadChanged(check, hnd, rev, confidence, timeout)
|
||||
id, err := me.hcAPI.onHeadChanged(ctx, check, hnd, rev, confidence, timeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -629,5 +563,5 @@ func (me *messageEvents) Called(check CheckFunc, msgHnd MsgHandler, rev RevertHa
|
||||
|
||||
// Convenience function for checking and matching messages
|
||||
func (me *messageEvents) CalledMsg(ctx context.Context, hnd MsgHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch, msg types.ChainMsg) error {
|
||||
return me.Called(me.CheckMsg(ctx, msg, hnd), hnd, rev, confidence, timeout, me.MatchMsg(msg.VMMessage()))
|
||||
return me.Called(ctx, me.CheckMsg(msg, hnd), hnd, rev, confidence, timeout, me.MatchMsg(msg.VMMessage()))
|
||||
}
|
||||
|
@ -11,199 +11,235 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
type heightEvents struct {
|
||||
lk sync.Mutex
|
||||
tsc *tipSetCache
|
||||
gcConfidence abi.ChainEpoch
|
||||
type heightHandler struct {
|
||||
ts *types.TipSet
|
||||
height abi.ChainEpoch
|
||||
called bool
|
||||
|
||||
ctr triggerID
|
||||
|
||||
heightTriggers map[triggerID]*heightHandler
|
||||
|
||||
htTriggerHeights map[triggerH][]triggerID
|
||||
htHeights map[msgH][]triggerID
|
||||
|
||||
ctx context.Context
|
||||
handle HeightHandler
|
||||
revert RevertHandler
|
||||
}
|
||||
|
||||
func (e *heightEvents) headChangeAt(rev, app []*types.TipSet) error {
|
||||
ctx, span := trace.StartSpan(e.ctx, "events.HeightHeadChange")
|
||||
defer span.End()
|
||||
span.AddAttributes(trace.Int64Attribute("endHeight", int64(app[0].Height())))
|
||||
span.AddAttributes(trace.Int64Attribute("reverts", int64(len(rev))))
|
||||
span.AddAttributes(trace.Int64Attribute("applies", int64(len(app))))
|
||||
type heightEvents struct {
|
||||
api EventAPI
|
||||
gcConfidence abi.ChainEpoch
|
||||
|
||||
e.lk.Lock()
|
||||
defer e.lk.Unlock()
|
||||
for _, ts := range rev {
|
||||
// TODO: log error if h below gcconfidence
|
||||
// revert height-based triggers
|
||||
lk sync.Mutex
|
||||
head *types.TipSet
|
||||
tsHeights, triggerHeights map[abi.ChainEpoch][]*heightHandler
|
||||
lastGc abi.ChainEpoch //nolint:structcheck
|
||||
}
|
||||
|
||||
revert := func(h abi.ChainEpoch, ts *types.TipSet) {
|
||||
for _, tid := range e.htHeights[h] {
|
||||
ctx, span := trace.StartSpan(ctx, "events.HeightRevert")
|
||||
|
||||
rev := e.heightTriggers[tid].revert
|
||||
e.lk.Unlock()
|
||||
err := rev(ctx, ts)
|
||||
e.lk.Lock()
|
||||
e.heightTriggers[tid].called = false
|
||||
|
||||
span.End()
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("reverting chain trigger (@H %d): %s", h, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
revert(ts.Height(), ts)
|
||||
|
||||
subh := ts.Height() - 1
|
||||
for {
|
||||
cts, err := e.tsc.get(subh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cts != nil {
|
||||
break
|
||||
}
|
||||
|
||||
revert(subh, ts)
|
||||
subh--
|
||||
}
|
||||
|
||||
if err := e.tsc.revert(ts); err != nil {
|
||||
return err
|
||||
}
|
||||
func newHeightEvents(api EventAPI, obs *observer, gcConfidence abi.ChainEpoch) *heightEvents {
|
||||
he := &heightEvents{
|
||||
api: api,
|
||||
gcConfidence: gcConfidence,
|
||||
tsHeights: map[abi.ChainEpoch][]*heightHandler{},
|
||||
triggerHeights: map[abi.ChainEpoch][]*heightHandler{},
|
||||
}
|
||||
|
||||
for i := range app {
|
||||
ts := app[i]
|
||||
|
||||
if err := e.tsc.add(ts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// height triggers
|
||||
|
||||
apply := func(h abi.ChainEpoch, ts *types.TipSet) error {
|
||||
for _, tid := range e.htTriggerHeights[h] {
|
||||
hnd := e.heightTriggers[tid]
|
||||
if hnd.called {
|
||||
return nil
|
||||
}
|
||||
|
||||
triggerH := h - abi.ChainEpoch(hnd.confidence)
|
||||
|
||||
incTs, err := e.tsc.getNonNull(triggerH)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "events.HeightApply")
|
||||
span.AddAttributes(trace.BoolAttribute("immediate", false))
|
||||
handle := hnd.handle
|
||||
e.lk.Unlock()
|
||||
err = handle(ctx, incTs, h)
|
||||
e.lk.Lock()
|
||||
hnd.called = true
|
||||
span.End()
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("chain trigger (@H %d, called @ %d) failed: %+v", triggerH, ts.Height(), err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := apply(ts.Height(), ts); err != nil {
|
||||
return err
|
||||
}
|
||||
subh := ts.Height() - 1
|
||||
for {
|
||||
cts, err := e.tsc.get(subh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cts != nil {
|
||||
break
|
||||
}
|
||||
|
||||
if err := apply(subh, ts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
subh--
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
he.lk.Lock()
|
||||
he.head = obs.Observe((*heightEventsObserver)(he))
|
||||
he.lk.Unlock()
|
||||
return he
|
||||
}
|
||||
|
||||
// ChainAt invokes the specified `HeightHandler` when the chain reaches the
|
||||
// specified height+confidence threshold. If the chain is rolled-back under the
|
||||
// specified height, `RevertHandler` will be called.
|
||||
//
|
||||
// ts passed to handlers is the tipset at the specified, or above, if lower tipsets were null
|
||||
func (e *heightEvents) ChainAt(hnd HeightHandler, rev RevertHandler, confidence int, h abi.ChainEpoch) error {
|
||||
e.lk.Lock() // Tricky locking, check your locks if you modify this function!
|
||||
|
||||
best, err := e.tsc.best()
|
||||
if err != nil {
|
||||
e.lk.Unlock()
|
||||
return xerrors.Errorf("error getting best tipset: %w", err)
|
||||
// ts passed to handlers is the tipset at the specified epoch, or above if lower tipsets were null.
|
||||
//
|
||||
// The context governs cancellations of this call, it won't cancel the event handler.
|
||||
func (e *heightEvents) ChainAt(ctx context.Context, hnd HeightHandler, rev RevertHandler, confidence int, h abi.ChainEpoch) error {
|
||||
if abi.ChainEpoch(confidence) > e.gcConfidence {
|
||||
// Need this to be able to GC effectively.
|
||||
return xerrors.Errorf("confidence cannot be greater than gcConfidence: %d > %d", confidence, e.gcConfidence)
|
||||
}
|
||||
|
||||
bestH := best.Height()
|
||||
if bestH >= h+abi.ChainEpoch(confidence) {
|
||||
ts, err := e.tsc.getNonNull(h)
|
||||
if err != nil {
|
||||
log.Warnf("events.ChainAt: calling HandleFunc with nil tipset, not found in cache: %s", err)
|
||||
}
|
||||
|
||||
e.lk.Unlock()
|
||||
ctx, span := trace.StartSpan(e.ctx, "events.HeightApply")
|
||||
span.AddAttributes(trace.BoolAttribute("immediate", true))
|
||||
|
||||
err = hnd(ctx, ts, bestH)
|
||||
span.End()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
e.lk.Lock()
|
||||
best, err = e.tsc.best()
|
||||
if err != nil {
|
||||
e.lk.Unlock()
|
||||
return xerrors.Errorf("error getting best tipset: %w", err)
|
||||
}
|
||||
bestH = best.Height()
|
||||
}
|
||||
|
||||
defer e.lk.Unlock()
|
||||
|
||||
if bestH >= h+abi.ChainEpoch(confidence)+e.gcConfidence {
|
||||
return nil
|
||||
}
|
||||
|
||||
triggerAt := h + abi.ChainEpoch(confidence)
|
||||
|
||||
id := e.ctr
|
||||
e.ctr++
|
||||
|
||||
e.heightTriggers[id] = &heightHandler{
|
||||
confidence: confidence,
|
||||
|
||||
handler := &heightHandler{
|
||||
height: h,
|
||||
handle: hnd,
|
||||
revert: rev,
|
||||
}
|
||||
triggerAt := h + abi.ChainEpoch(confidence)
|
||||
|
||||
e.htHeights[h] = append(e.htHeights[h], id)
|
||||
e.htTriggerHeights[triggerAt] = append(e.htTriggerHeights[triggerAt], id)
|
||||
// Here we try to jump onto a moving train. To avoid stopping the train, we release the lock
|
||||
// while calling the API and/or the trigger functions. Unfortunately, it's entirely possible
|
||||
// (although unlikely) to go back and forth across the trigger heights, so we need to keep
|
||||
// going back and forth here till we're synced.
|
||||
//
|
||||
// TODO: Consider using a worker goroutine so we can just drop the handler in a channel? The
|
||||
// downside is that we'd either need a tipset cache, or we'd need to potentially fetch
|
||||
// tipsets in-line inside the event loop.
|
||||
e.lk.Lock()
|
||||
for {
|
||||
head := e.head
|
||||
if head.Height() >= h {
|
||||
// Head is past the handler height. We at least need to stash the tipset to
|
||||
// avoid doing this from the main event loop.
|
||||
e.lk.Unlock()
|
||||
|
||||
var ts *types.TipSet
|
||||
if head.Height() == h {
|
||||
ts = head
|
||||
} else {
|
||||
var err error
|
||||
ts, err = e.api.ChainGetTipSetAfterHeight(ctx, handler.height, head.Key())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("events.ChainAt: failed to get tipset: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// If we've applied the handler on the wrong tipset, revert.
|
||||
if handler.called && !ts.Equals(handler.ts) {
|
||||
ctx, span := trace.StartSpan(ctx, "events.HeightRevert")
|
||||
span.AddAttributes(trace.BoolAttribute("immediate", true))
|
||||
err := handler.revert(ctx, handler.ts)
|
||||
span.End()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
handler.called = false
|
||||
}
|
||||
|
||||
// Save the tipset.
|
||||
handler.ts = ts
|
||||
|
||||
// If we've reached confidence and haven't called, call.
|
||||
if !handler.called && head.Height() >= triggerAt {
|
||||
ctx, span := trace.StartSpan(ctx, "events.HeightApply")
|
||||
span.AddAttributes(trace.BoolAttribute("immediate", true))
|
||||
err := handler.handle(ctx, handler.ts, head.Height())
|
||||
span.End()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
handler.called = true
|
||||
|
||||
// If we've reached gcConfidence, return without saving anything.
|
||||
if head.Height() >= h+e.gcConfidence {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
e.lk.Lock()
|
||||
} else if handler.called {
|
||||
// We're not passed the head (anymore) but have applied the handler. Revert, try again.
|
||||
e.lk.Unlock()
|
||||
ctx, span := trace.StartSpan(ctx, "events.HeightRevert")
|
||||
span.AddAttributes(trace.BoolAttribute("immediate", true))
|
||||
err := handler.revert(ctx, handler.ts)
|
||||
span.End()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
handler.called = false
|
||||
e.lk.Lock()
|
||||
} // otherwise, we changed heads but the change didn't matter.
|
||||
|
||||
// If we managed to get through this without the head changing, we're finally done.
|
||||
if head.Equals(e.head) {
|
||||
e.triggerHeights[triggerAt] = append(e.triggerHeights[triggerAt], handler)
|
||||
e.tsHeights[h] = append(e.tsHeights[h], handler)
|
||||
e.lk.Unlock()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Updates the head and garbage collects if we're 2x over our garbage collection confidence period.
|
||||
func (e *heightEventsObserver) updateHead(h *types.TipSet) {
|
||||
e.lk.Lock()
|
||||
defer e.lk.Unlock()
|
||||
e.head = h
|
||||
|
||||
if e.head.Height() < e.lastGc+e.gcConfidence*2 {
|
||||
return
|
||||
}
|
||||
e.lastGc = h.Height()
|
||||
|
||||
targetGcHeight := e.head.Height() - e.gcConfidence
|
||||
for h := range e.tsHeights {
|
||||
if h >= targetGcHeight {
|
||||
continue
|
||||
}
|
||||
delete(e.tsHeights, h)
|
||||
}
|
||||
for h := range e.triggerHeights {
|
||||
if h >= targetGcHeight {
|
||||
continue
|
||||
}
|
||||
delete(e.triggerHeights, h)
|
||||
}
|
||||
}
|
||||
|
||||
type heightEventsObserver heightEvents
|
||||
|
||||
func (e *heightEventsObserver) Revert(ctx context.Context, from, to *types.TipSet) error {
|
||||
// Update the head first so we don't accidental skip reverting a concurrent call to ChainAt.
|
||||
e.updateHead(to)
|
||||
|
||||
// Call revert on all hights between the two tipsets, handling empty tipsets.
|
||||
for h := from.Height(); h > to.Height(); h-- {
|
||||
e.lk.Lock()
|
||||
triggers := e.tsHeights[h]
|
||||
e.lk.Unlock()
|
||||
|
||||
// 1. Triggers are only invoked from the global event loop, we don't need to hold the lock while calling.
|
||||
// 2. We only ever append to or replace the trigger slice, so it's safe to iterate over it without the lock.
|
||||
for _, handler := range triggers {
|
||||
handler.ts = nil // invalidate
|
||||
if !handler.called {
|
||||
// We haven't triggered this yet, or there has been a concurrent call to ChainAt.
|
||||
continue
|
||||
}
|
||||
ctx, span := trace.StartSpan(ctx, "events.HeightRevert")
|
||||
err := handler.revert(ctx, from)
|
||||
span.End()
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("reverting chain trigger (@H %d): %s", h, err)
|
||||
}
|
||||
handler.called = false
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *heightEventsObserver) Apply(ctx context.Context, from, to *types.TipSet) error {
|
||||
// Update the head first so we don't accidental skip applying a concurrent call to ChainAt.
|
||||
e.updateHead(to)
|
||||
|
||||
for h := from.Height() + 1; h <= to.Height(); h++ {
|
||||
e.lk.Lock()
|
||||
triggers := e.triggerHeights[h]
|
||||
tipsets := e.tsHeights[h]
|
||||
e.lk.Unlock()
|
||||
|
||||
// Stash the tipset for future triggers.
|
||||
for _, handler := range tipsets {
|
||||
handler.ts = to
|
||||
}
|
||||
|
||||
// Trigger the ready triggers.
|
||||
for _, handler := range triggers {
|
||||
if handler.called {
|
||||
// We may have reverted past the trigger point, but not past the call point.
|
||||
// Or there has been a concurrent call to ChainAt.
|
||||
continue
|
||||
}
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "events.HeightApply")
|
||||
span.AddAttributes(trace.BoolAttribute("immediate", false))
|
||||
err := handler.handle(ctx, handler.ts, h)
|
||||
span.End()
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("chain trigger (@H %d, called @ %d) failed: %+v", h, to.Height(), err)
|
||||
}
|
||||
|
||||
handler.called = true
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
42
chain/events/message_cache.go
Normal file
42
chain/events/message_cache.go
Normal file
@ -0,0 +1,42 @@
|
||||
package events
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/ipfs/go-cid"
|
||||
)
|
||||
|
||||
type messageCache struct {
|
||||
api EventAPI
|
||||
|
||||
blockMsgLk sync.Mutex
|
||||
blockMsgCache *lru.ARCCache
|
||||
}
|
||||
|
||||
func newMessageCache(api EventAPI) *messageCache {
|
||||
blsMsgCache, _ := lru.NewARC(500)
|
||||
|
||||
return &messageCache{
|
||||
api: api,
|
||||
blockMsgCache: blsMsgCache,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *messageCache) ChainGetBlockMessages(ctx context.Context, blkCid cid.Cid) (*api.BlockMessages, error) {
|
||||
c.blockMsgLk.Lock()
|
||||
defer c.blockMsgLk.Unlock()
|
||||
|
||||
msgsI, ok := c.blockMsgCache.Get(blkCid)
|
||||
var err error
|
||||
if !ok {
|
||||
msgsI, err = c.api.ChainGetBlockMessages(ctx, blkCid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.blockMsgCache.Add(blkCid, msgsI)
|
||||
}
|
||||
return msgsI.(*api.BlockMessages), nil
|
||||
}
|
255
chain/events/observer.go
Normal file
255
chain/events/observer.go
Normal file
@ -0,0 +1,255 @@
|
||||
package events
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"go.opencensus.io/trace"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
type observer struct {
|
||||
api EventAPI
|
||||
|
||||
gcConfidence abi.ChainEpoch
|
||||
|
||||
ready chan struct{}
|
||||
|
||||
lk sync.Mutex
|
||||
head *types.TipSet
|
||||
maxHeight abi.ChainEpoch
|
||||
observers []TipSetObserver
|
||||
}
|
||||
|
||||
func newObserver(api *cache, gcConfidence abi.ChainEpoch) *observer {
|
||||
obs := &observer{
|
||||
api: api,
|
||||
gcConfidence: gcConfidence,
|
||||
|
||||
ready: make(chan struct{}),
|
||||
observers: []TipSetObserver{},
|
||||
}
|
||||
obs.Observe(api.observer())
|
||||
return obs
|
||||
}
|
||||
|
||||
func (o *observer) start(ctx context.Context) error {
|
||||
go o.listenHeadChanges(ctx)
|
||||
|
||||
// Wait for the first tipset to be seen or bail if shutting down
|
||||
select {
|
||||
case <-o.ready:
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func (o *observer) listenHeadChanges(ctx context.Context) {
|
||||
for {
|
||||
if err := o.listenHeadChangesOnce(ctx); err != nil {
|
||||
log.Errorf("listen head changes errored: %s", err)
|
||||
} else {
|
||||
log.Warn("listenHeadChanges quit")
|
||||
}
|
||||
select {
|
||||
case <-build.Clock.After(time.Second):
|
||||
case <-ctx.Done():
|
||||
log.Warnf("not restarting listenHeadChanges: context error: %s", ctx.Err())
|
||||
return
|
||||
}
|
||||
|
||||
log.Info("restarting listenHeadChanges")
|
||||
}
|
||||
}
|
||||
|
||||
func (o *observer) listenHeadChangesOnce(ctx context.Context) error {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
notifs, err := o.api.ChainNotify(ctx)
|
||||
if err != nil {
|
||||
// Retry is handled by caller
|
||||
return xerrors.Errorf("listenHeadChanges ChainNotify call failed: %w", err)
|
||||
}
|
||||
|
||||
var cur []*api.HeadChange
|
||||
var ok bool
|
||||
|
||||
// Wait for first tipset or bail
|
||||
select {
|
||||
case cur, ok = <-notifs:
|
||||
if !ok {
|
||||
return xerrors.Errorf("notification channel closed")
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
if len(cur) != 1 {
|
||||
return xerrors.Errorf("unexpected initial head notification length: %d", len(cur))
|
||||
}
|
||||
|
||||
if cur[0].Type != store.HCCurrent {
|
||||
return xerrors.Errorf("expected first head notification type to be 'current', was '%s'", cur[0].Type)
|
||||
}
|
||||
|
||||
curHead := cur[0].Val
|
||||
|
||||
o.lk.Lock()
|
||||
if o.head == nil {
|
||||
o.head = curHead
|
||||
close(o.ready)
|
||||
}
|
||||
startHead := o.head
|
||||
o.lk.Unlock()
|
||||
|
||||
if !startHead.Equals(curHead) {
|
||||
changes, err := o.api.ChainGetPath(ctx, startHead.Key(), curHead.Key())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to get path from last applied tipset to head: %w", err)
|
||||
}
|
||||
|
||||
if err := o.applyChanges(ctx, changes); err != nil {
|
||||
return xerrors.Errorf("failed catch-up head changes: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
for changes := range notifs {
|
||||
if err := o.applyChanges(ctx, changes); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *observer) applyChanges(ctx context.Context, changes []*api.HeadChange) error {
|
||||
// Used to wait for a prior notification round to finish (by tests)
|
||||
if len(changes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var rev, app []*types.TipSet
|
||||
for _, changes := range changes {
|
||||
switch changes.Type {
|
||||
case store.HCRevert:
|
||||
rev = append(rev, changes.Val)
|
||||
case store.HCApply:
|
||||
app = append(app, changes.Val)
|
||||
default:
|
||||
log.Errorf("unexpected head change notification type: '%s'", changes.Type)
|
||||
}
|
||||
}
|
||||
|
||||
if err := o.headChange(ctx, rev, app); err != nil {
|
||||
return xerrors.Errorf("failed to apply head changes: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *observer) headChange(ctx context.Context, rev, app []*types.TipSet) error {
|
||||
ctx, span := trace.StartSpan(ctx, "events.HeadChange")
|
||||
span.AddAttributes(trace.Int64Attribute("reverts", int64(len(rev))))
|
||||
span.AddAttributes(trace.Int64Attribute("applies", int64(len(app))))
|
||||
|
||||
o.lk.Lock()
|
||||
head := o.head
|
||||
o.lk.Unlock()
|
||||
|
||||
defer func() {
|
||||
span.AddAttributes(trace.Int64Attribute("endHeight", int64(head.Height())))
|
||||
span.End()
|
||||
}()
|
||||
|
||||
// NOTE: bailing out here if the head isn't what we expected is fine. We'll re-start the
|
||||
// entire process and handle any strange reorgs.
|
||||
for i, from := range rev {
|
||||
if !from.Equals(head) {
|
||||
return xerrors.Errorf(
|
||||
"expected to revert %s (%d), reverting %s (%d)",
|
||||
head.Key(), head.Height(), from.Key(), from.Height(),
|
||||
)
|
||||
}
|
||||
var to *types.TipSet
|
||||
if i+1 < len(rev) {
|
||||
// If we have more reverts, the next revert is the next head.
|
||||
to = rev[i+1]
|
||||
} else {
|
||||
// At the end of the revert sequenece, we need to lookup the joint tipset
|
||||
// between the revert sequence and the apply sequence.
|
||||
var err error
|
||||
to, err = o.api.ChainGetTipSet(ctx, from.Parents())
|
||||
if err != nil {
|
||||
// Well, this sucks. We'll bail and restart.
|
||||
return xerrors.Errorf("failed to get tipset when reverting due to a SetHeead: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Get the current observers and atomically set the head.
|
||||
//
|
||||
// 1. We need to get the observers every time in case some registered/deregistered.
|
||||
// 2. We need to atomically set the head so new observers don't see events twice or
|
||||
// skip them.
|
||||
o.lk.Lock()
|
||||
observers := o.observers
|
||||
o.head = to
|
||||
o.lk.Unlock()
|
||||
|
||||
for _, obs := range observers {
|
||||
if err := obs.Revert(ctx, from, to); err != nil {
|
||||
log.Errorf("observer %T failed to apply tipset %s (%d) with: %s", obs, from.Key(), from.Height(), err)
|
||||
}
|
||||
}
|
||||
|
||||
if to.Height() < o.maxHeight-o.gcConfidence {
|
||||
log.Errorf("reverted past finality, from %d to %d", o.maxHeight, to.Height())
|
||||
}
|
||||
|
||||
head = to
|
||||
}
|
||||
|
||||
for _, to := range app {
|
||||
if to.Parents() != head.Key() {
|
||||
return xerrors.Errorf(
|
||||
"cannot apply %s (%d) with parents %s on top of %s (%d)",
|
||||
to.Key(), to.Height(), to.Parents(), head.Key(), head.Height(),
|
||||
)
|
||||
}
|
||||
|
||||
o.lk.Lock()
|
||||
observers := o.observers
|
||||
o.head = to
|
||||
o.lk.Unlock()
|
||||
|
||||
for _, obs := range observers {
|
||||
if err := obs.Apply(ctx, head, to); err != nil {
|
||||
log.Errorf("observer %T failed to revert tipset %s (%d) with: %s", obs, to.Key(), to.Height(), err)
|
||||
}
|
||||
}
|
||||
if to.Height() > o.maxHeight {
|
||||
o.maxHeight = to.Height()
|
||||
}
|
||||
|
||||
head = to
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Observe registers the observer, and returns the current tipset. The observer is guaranteed to
|
||||
// observe events starting at this tipset.
|
||||
//
|
||||
// Returns nil if the observer hasn't started yet (but still registers).
|
||||
func (o *observer) Observe(obs TipSetObserver) *types.TipSet {
|
||||
o.lk.Lock()
|
||||
defer o.lk.Unlock()
|
||||
o.observers = append(o.observers, obs)
|
||||
return o.head
|
||||
}
|
@ -11,7 +11,9 @@ import (
|
||||
)
|
||||
|
||||
type tsCacheAPI interface {
|
||||
ChainGetTipSetAfterHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error)
|
||||
ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error)
|
||||
ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error)
|
||||
ChainHead(context.Context) (*types.TipSet, error)
|
||||
}
|
||||
|
||||
@ -20,61 +22,157 @@ type tsCacheAPI interface {
|
||||
type tipSetCache struct {
|
||||
mu sync.RWMutex
|
||||
|
||||
cache []*types.TipSet
|
||||
start int
|
||||
len int
|
||||
byKey map[types.TipSetKey]*types.TipSet
|
||||
byHeight []*types.TipSet
|
||||
start int // chain head (end)
|
||||
len int
|
||||
|
||||
storage tsCacheAPI
|
||||
}
|
||||
|
||||
func newTSCache(cap abi.ChainEpoch, storage tsCacheAPI) *tipSetCache {
|
||||
func newTSCache(storage tsCacheAPI, cap abi.ChainEpoch) *tipSetCache {
|
||||
return &tipSetCache{
|
||||
cache: make([]*types.TipSet, cap),
|
||||
start: 0,
|
||||
len: 0,
|
||||
byKey: make(map[types.TipSetKey]*types.TipSet, cap),
|
||||
byHeight: make([]*types.TipSet, cap),
|
||||
start: 0,
|
||||
len: 0,
|
||||
|
||||
storage: storage,
|
||||
}
|
||||
}
|
||||
func (tsc *tipSetCache) ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) {
|
||||
if ts, ok := tsc.byKey[tsk]; ok {
|
||||
return ts, nil
|
||||
}
|
||||
return tsc.storage.ChainGetTipSet(ctx, tsk)
|
||||
}
|
||||
|
||||
func (tsc *tipSetCache) add(ts *types.TipSet) error {
|
||||
func (tsc *tipSetCache) ChainGetTipSetByHeight(ctx context.Context, height abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) {
|
||||
return tsc.get(ctx, height, tsk, true)
|
||||
}
|
||||
|
||||
func (tsc *tipSetCache) ChainGetTipSetAfterHeight(ctx context.Context, height abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) {
|
||||
return tsc.get(ctx, height, tsk, false)
|
||||
}
|
||||
|
||||
func (tsc *tipSetCache) get(ctx context.Context, height abi.ChainEpoch, tsk types.TipSetKey, prev bool) (*types.TipSet, error) {
|
||||
fallback := tsc.storage.ChainGetTipSetAfterHeight
|
||||
if prev {
|
||||
fallback = tsc.storage.ChainGetTipSetByHeight
|
||||
}
|
||||
tsc.mu.RLock()
|
||||
|
||||
// Nothing in the cache?
|
||||
if tsc.len == 0 {
|
||||
tsc.mu.RUnlock()
|
||||
log.Warnf("tipSetCache.get: cache is empty, requesting from storage (h=%d)", height)
|
||||
return fallback(ctx, height, tsk)
|
||||
}
|
||||
|
||||
// Resolve the head.
|
||||
head := tsc.byHeight[tsc.start]
|
||||
if !tsk.IsEmpty() {
|
||||
// Not on this chain?
|
||||
var ok bool
|
||||
head, ok = tsc.byKey[tsk]
|
||||
if !ok {
|
||||
tsc.mu.RUnlock()
|
||||
return fallback(ctx, height, tsk)
|
||||
}
|
||||
}
|
||||
|
||||
headH := head.Height()
|
||||
tailH := headH - abi.ChainEpoch(tsc.len)
|
||||
|
||||
if headH == height {
|
||||
tsc.mu.RUnlock()
|
||||
return head, nil
|
||||
} else if headH < height {
|
||||
tsc.mu.RUnlock()
|
||||
// If the user doesn't pass a tsk, we assume "head" is the last tipset we processed.
|
||||
return nil, xerrors.Errorf("requested epoch is in the future")
|
||||
} else if height < tailH {
|
||||
log.Warnf("tipSetCache.get: requested tipset not in cache, requesting from storage (h=%d; tail=%d)", height, tailH)
|
||||
tsc.mu.RUnlock()
|
||||
return fallback(ctx, height, head.Key())
|
||||
}
|
||||
|
||||
direction := 1
|
||||
if prev {
|
||||
direction = -1
|
||||
}
|
||||
var ts *types.TipSet
|
||||
for i := 0; i < tsc.len && ts == nil; i += direction {
|
||||
ts = tsc.byHeight[normalModulo(tsc.start-int(headH-height)+i, len(tsc.byHeight))]
|
||||
}
|
||||
tsc.mu.RUnlock()
|
||||
return ts, nil
|
||||
}
|
||||
|
||||
func (tsc *tipSetCache) ChainHead(ctx context.Context) (*types.TipSet, error) {
|
||||
tsc.mu.RLock()
|
||||
best := tsc.byHeight[tsc.start]
|
||||
tsc.mu.RUnlock()
|
||||
if best == nil {
|
||||
return tsc.storage.ChainHead(ctx)
|
||||
}
|
||||
return best, nil
|
||||
}
|
||||
|
||||
func (tsc *tipSetCache) add(to *types.TipSet) error {
|
||||
tsc.mu.Lock()
|
||||
defer tsc.mu.Unlock()
|
||||
|
||||
if tsc.len > 0 {
|
||||
if tsc.cache[tsc.start].Height() >= ts.Height() {
|
||||
return xerrors.Errorf("tipSetCache.add: expected new tipset height to be at least %d, was %d", tsc.cache[tsc.start].Height()+1, ts.Height())
|
||||
best := tsc.byHeight[tsc.start]
|
||||
if best.Height() >= to.Height() {
|
||||
return xerrors.Errorf("tipSetCache.add: expected new tipset height to be at least %d, was %d", tsc.byHeight[tsc.start].Height()+1, to.Height())
|
||||
}
|
||||
if best.Key() != to.Parents() {
|
||||
return xerrors.Errorf(
|
||||
"tipSetCache.add: expected new tipset %s (%d) to follow %s (%d), its parents are %s",
|
||||
to.Key(), to.Height(), best.Key(), best.Height(), best.Parents(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
nextH := ts.Height()
|
||||
nextH := to.Height()
|
||||
if tsc.len > 0 {
|
||||
nextH = tsc.cache[tsc.start].Height() + 1
|
||||
nextH = tsc.byHeight[tsc.start].Height() + 1
|
||||
}
|
||||
|
||||
// fill null blocks
|
||||
for nextH != ts.Height() {
|
||||
tsc.start = normalModulo(tsc.start+1, len(tsc.cache))
|
||||
tsc.cache[tsc.start] = nil
|
||||
if tsc.len < len(tsc.cache) {
|
||||
for nextH != to.Height() {
|
||||
tsc.start = normalModulo(tsc.start+1, len(tsc.byHeight))
|
||||
was := tsc.byHeight[tsc.start]
|
||||
if was != nil {
|
||||
tsc.byHeight[tsc.start] = nil
|
||||
delete(tsc.byKey, was.Key())
|
||||
}
|
||||
if tsc.len < len(tsc.byHeight) {
|
||||
tsc.len++
|
||||
}
|
||||
nextH++
|
||||
}
|
||||
|
||||
tsc.start = normalModulo(tsc.start+1, len(tsc.cache))
|
||||
tsc.cache[tsc.start] = ts
|
||||
if tsc.len < len(tsc.cache) {
|
||||
tsc.start = normalModulo(tsc.start+1, len(tsc.byHeight))
|
||||
was := tsc.byHeight[tsc.start]
|
||||
if was != nil {
|
||||
delete(tsc.byKey, was.Key())
|
||||
}
|
||||
tsc.byHeight[tsc.start] = to
|
||||
if tsc.len < len(tsc.byHeight) {
|
||||
tsc.len++
|
||||
}
|
||||
tsc.byKey[to.Key()] = to
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tsc *tipSetCache) revert(ts *types.TipSet) error {
|
||||
func (tsc *tipSetCache) revert(from *types.TipSet) error {
|
||||
tsc.mu.Lock()
|
||||
defer tsc.mu.Unlock()
|
||||
|
||||
return tsc.revertUnlocked(ts)
|
||||
return tsc.revertUnlocked(from)
|
||||
}
|
||||
|
||||
func (tsc *tipSetCache) revertUnlocked(ts *types.TipSet) error {
|
||||
@ -82,75 +180,35 @@ func (tsc *tipSetCache) revertUnlocked(ts *types.TipSet) error {
|
||||
return nil // this can happen, and it's fine
|
||||
}
|
||||
|
||||
if !tsc.cache[tsc.start].Equals(ts) {
|
||||
was := tsc.byHeight[tsc.start]
|
||||
|
||||
if !was.Equals(ts) {
|
||||
return xerrors.New("tipSetCache.revert: revert tipset didn't match cache head")
|
||||
}
|
||||
delete(tsc.byKey, was.Key())
|
||||
|
||||
tsc.cache[tsc.start] = nil
|
||||
tsc.start = normalModulo(tsc.start-1, len(tsc.cache))
|
||||
tsc.byHeight[tsc.start] = nil
|
||||
tsc.start = normalModulo(tsc.start-1, len(tsc.byHeight))
|
||||
tsc.len--
|
||||
|
||||
_ = tsc.revertUnlocked(nil) // revert null block gap
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tsc *tipSetCache) getNonNull(height abi.ChainEpoch) (*types.TipSet, error) {
|
||||
for {
|
||||
ts, err := tsc.get(height)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ts != nil {
|
||||
return ts, nil
|
||||
}
|
||||
height++
|
||||
}
|
||||
func (tsc *tipSetCache) observer() TipSetObserver {
|
||||
return (*tipSetCacheObserver)(tsc)
|
||||
}
|
||||
|
||||
func (tsc *tipSetCache) get(height abi.ChainEpoch) (*types.TipSet, error) {
|
||||
tsc.mu.RLock()
|
||||
type tipSetCacheObserver tipSetCache
|
||||
|
||||
if tsc.len == 0 {
|
||||
tsc.mu.RUnlock()
|
||||
log.Warnf("tipSetCache.get: cache is empty, requesting from storage (h=%d)", height)
|
||||
return tsc.storage.ChainGetTipSetByHeight(context.TODO(), height, types.EmptyTSK)
|
||||
}
|
||||
var _ TipSetObserver = new(tipSetCacheObserver)
|
||||
|
||||
headH := tsc.cache[tsc.start].Height()
|
||||
|
||||
if height > headH {
|
||||
tsc.mu.RUnlock()
|
||||
return nil, xerrors.Errorf("tipSetCache.get: requested tipset not in cache (req: %d, cache head: %d)", height, headH)
|
||||
}
|
||||
|
||||
clen := len(tsc.cache)
|
||||
var tail *types.TipSet
|
||||
for i := 1; i <= tsc.len; i++ {
|
||||
tail = tsc.cache[normalModulo(tsc.start-tsc.len+i, clen)]
|
||||
if tail != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if height < tail.Height() {
|
||||
tsc.mu.RUnlock()
|
||||
log.Warnf("tipSetCache.get: requested tipset not in cache, requesting from storage (h=%d; tail=%d)", height, tail.Height())
|
||||
return tsc.storage.ChainGetTipSetByHeight(context.TODO(), height, tail.Key())
|
||||
}
|
||||
|
||||
ts := tsc.cache[normalModulo(tsc.start-int(headH-height), clen)]
|
||||
tsc.mu.RUnlock()
|
||||
return ts, nil
|
||||
func (tsc *tipSetCacheObserver) Apply(_ context.Context, _, to *types.TipSet) error {
|
||||
return (*tipSetCache)(tsc).add(to)
|
||||
}
|
||||
|
||||
func (tsc *tipSetCache) best() (*types.TipSet, error) {
|
||||
tsc.mu.RLock()
|
||||
best := tsc.cache[tsc.start]
|
||||
tsc.mu.RUnlock()
|
||||
if best == nil {
|
||||
return tsc.storage.ChainHead(context.TODO())
|
||||
}
|
||||
return best, nil
|
||||
func (tsc *tipSetCacheObserver) Revert(ctx context.Context, from, _ *types.TipSet) error {
|
||||
return (*tipSetCache)(tsc).revert(from)
|
||||
}
|
||||
|
||||
func normalModulo(n, m int) int {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user