Merge branch 'bloxico/system-test-matrix' of https://github.com/filecoin-project/lotus into merge_lotus
This commit is contained in:
commit
dda1a42a2a
@ -805,6 +805,11 @@ workflows:
|
|||||||
suite: itest-deals_padding
|
suite: itest-deals_padding
|
||||||
target: "./itests/deals_padding_test.go"
|
target: "./itests/deals_padding_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-deals_partial_retrieval_dm-level
|
||||||
|
suite: itest-deals_partial_retrieval_dm-level
|
||||||
|
target: "./itests/deals_partial_retrieval_dm-level_test.go"
|
||||||
|
|
||||||
- test:
|
- test:
|
||||||
name: test-itest-deals_partial_retrieval
|
name: test-itest-deals_partial_retrieval
|
||||||
suite: itest-deals_partial_retrieval
|
suite: itest-deals_partial_retrieval
|
||||||
@ -890,6 +895,11 @@ workflows:
|
|||||||
suite: itest-sector_terminate
|
suite: itest-sector_terminate
|
||||||
target: "./itests/sector_terminate_test.go"
|
target: "./itests/sector_terminate_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-self_sent_txn
|
||||||
|
suite: itest-self_sent_txn
|
||||||
|
target: "./itests/self_sent_txn_test.go"
|
||||||
|
|
||||||
- test:
|
- test:
|
||||||
name: test-itest-tape
|
name: test-itest-tape
|
||||||
suite: itest-tape
|
suite: itest-tape
|
||||||
@ -971,19 +981,10 @@ workflows:
|
|||||||
tags:
|
tags:
|
||||||
only:
|
only:
|
||||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
- build-appimage:
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
ignore:
|
|
||||||
- /.*/
|
|
||||||
tags:
|
|
||||||
only:
|
|
||||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
|
||||||
- publish:
|
- publish:
|
||||||
requires:
|
requires:
|
||||||
- build-all
|
- build-all
|
||||||
- build-macos
|
- build-macos
|
||||||
- build-appimage
|
|
||||||
filters:
|
filters:
|
||||||
branches:
|
branches:
|
||||||
ignore:
|
ignore:
|
||||||
|
@ -816,19 +816,10 @@ workflows:
|
|||||||
tags:
|
tags:
|
||||||
only:
|
only:
|
||||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
- build-appimage:
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
ignore:
|
|
||||||
- /.*/
|
|
||||||
tags:
|
|
||||||
only:
|
|
||||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
|
||||||
- publish:
|
- publish:
|
||||||
requires:
|
requires:
|
||||||
- build-all
|
- build-all
|
||||||
- build-macos
|
- build-macos
|
||||||
- build-appimage
|
|
||||||
filters:
|
filters:
|
||||||
branches:
|
branches:
|
||||||
ignore:
|
ignore:
|
||||||
|
21
.github/pull_request_template.md
vendored
Normal file
21
.github/pull_request_template.md
vendored
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
## Related Issues
|
||||||
|
<!-- link all issues that this PR might resolve/fix. If an issue doesn't exist, include a brief motivation for the change being made.-->
|
||||||
|
|
||||||
|
## Proposed Changes
|
||||||
|
<!-- provide a clear list of the changes being made-->
|
||||||
|
|
||||||
|
|
||||||
|
## Additional Info
|
||||||
|
<!-- callouts, links to documentation, and etc-->
|
||||||
|
|
||||||
|
## Checklist
|
||||||
|
|
||||||
|
Before you mark the PR ready for review, please make sure that:
|
||||||
|
- [ ] All commits have a clear commit message.
|
||||||
|
- [ ] The PR title is in the form of of `<PR type>: <#issue number> <area>: <change being made>`
|
||||||
|
- example: ` fix: #1234 mempool: Introduce a cache for valid signatures`
|
||||||
|
- `PR type`: _fix_, _feat_, _INTERFACE BREAKING CHANGE_, _CONSENSUS BREAKING_, _build_, _chore_, _ci_, _docs_, _misc_,_perf_, _refactor_, _revert_, _style_, _test_
|
||||||
|
- `area`: _api_, _chain_, _state_, _vm_, _data transfer_, _market_, _mempool_, _message_, _block production_, _multisig_, _networking_, _paychan_, _proving_, _sealing_, _wallet_
|
||||||
|
- [ ] This PR has tests for new functionality or change in behaviour
|
||||||
|
- [ ] If new user-facing features are introduced, clear usage guidelines and / or documentation updates should be included in https://lotus.filecoin.io or [Discussion Tutorials.](https://github.com/filecoin-project/lotus/discussions/categories/tutorials)
|
||||||
|
- [ ] CI is green
|
83
CHANGELOG.md
83
CHANGELOG.md
@ -1,5 +1,88 @@
|
|||||||
# Lotus changelog
|
# Lotus changelog
|
||||||
|
|
||||||
|
# v1.13.1 / 2021-11-26
|
||||||
|
|
||||||
|
This is an optional Lotus v1.13.1 release.
|
||||||
|
|
||||||
|
## New Features
|
||||||
|
- Shed: Add a util to find miner based on peerid ([filecoin-project/lotus#7544](https://github.com/filecoin-project/lotus/pull/7544))
|
||||||
|
- Collect and expose graphsync metrics ([filecoin-project/lotus#7542](https://github.com/filecoin-project/lotus/pull/7542))
|
||||||
|
- Shed: Add a util to find the most recent null tipset ([filecoin-project/lotus#7456](https://github.com/filecoin-project/lotus/pull/7456))
|
||||||
|
|
||||||
|
## Improvements
|
||||||
|
- Show prepared tasks in sealing jobs ([filecoin-project/lotus#7527](https://github.com/filecoin-project/lotus/pull/7527))
|
||||||
|
- To make Deep happy ([filecoin-project/lotus#7546](https://github.com/filecoin-project/lotus/pull/7546))
|
||||||
|
- Expose per-state sector counts on the prometheus endpoint ([filecoin-project/lotus#7541](https://github.com/filecoin-project/lotus/pull/7541))
|
||||||
|
- Add storage-id flag to proving check ([filecoin-project/lotus#7479](https://github.com/filecoin-project/lotus/pull/7479))
|
||||||
|
- FilecoinEC: Improve a log message ([filecoin-project/lotus#7499](https://github.com/filecoin-project/lotus/pull/7499))
|
||||||
|
- itests: retry deal when control addr is out of funds ([filecoin-project/lotus#7454](https://github.com/filecoin-project/lotus/pull/7454))
|
||||||
|
- Normlize selector use within lotus ([filecoin-project/lotus#7467](https://github.com/filecoin-project/lotus/pull/7467))
|
||||||
|
- sealing: Improve scheduling of ready work ([filecoin-project/lotus#7335](https://github.com/filecoin-project/lotus/pull/7335))
|
||||||
|
- Remove dead example code + dep ([filecoin-project/lotus#7466](https://github.com/filecoin-project/lotus/pull/7466))
|
||||||
|
|
||||||
|
## Bug Fixes
|
||||||
|
- fix the withdrawn amount unit ([filecoin-project/lotus#7563](https://github.com/filecoin-project/lotus/pull/7563))
|
||||||
|
- rename vm#make{=>Account}Actor(). ([filecoin-project/lotus#7562](https://github.com/filecoin-project/lotus/pull/7562))
|
||||||
|
- Fix used sector space accounting after AddPieceFailed ([filecoin-project/lotus#7530](https://github.com/filecoin-project/lotus/pull/7530))
|
||||||
|
- Don't remove sector data when moving data into a shared path ([filecoin-project/lotus#7494](https://github.com/filecoin-project/lotus/pull/7494))
|
||||||
|
- fix: support node instantiation in external packages ([filecoin-project/lotus#7511](https://github.com/filecoin-project/lotus/pull/7511))
|
||||||
|
- Stop adding Jennifer's $HOME to lotus docs ([filecoin-project/lotus#7477](https://github.com/filecoin-project/lotus/pull/7477))
|
||||||
|
- Bugfix: Use correct startup network versions ([filecoin-project/lotus#7486](https://github.com/filecoin-project/lotus/pull/7486))
|
||||||
|
- Dep upgrade pass ([filecoin-project/lotus#7478](https://github.com/filecoin-project/lotus/pull/7478))
|
||||||
|
- Remove obsolete GS testplan - it now lives in go-graphsync ([filecoin-project/lotus#7469](https://github.com/filecoin-project/lotus/pull/7469))
|
||||||
|
- sealing: Recover sectors after failed AddPiece ([filecoin-project/lotus#7444](https://github.com/filecoin-project/lotus/pull/7444))
|
||||||
|
|
||||||
|
## Dependency Updates
|
||||||
|
- Update go-graphsync v0.10.1 ([filecoin-project/lotus#7457](https://github.com/filecoin-project/lotus/pull/7457))
|
||||||
|
- update to proof v10.1.0 ([filecoin-project/lotus#7564](https://github.com/filecoin-project/lotus/pull/7564))
|
||||||
|
- github.com/filecoin-project/specs-actors/v6 (v6.0.0 -> v6.0.1):
|
||||||
|
- github.com/filecoin-project/go-jsonrpc (v0.1.4-0.20210217175800-45ea43ac2bec -> v0.1.5):
|
||||||
|
- github.com/filecoin-project/go-fil-markets (v1.13.1 -> v1.13.3):
|
||||||
|
- github.com/filecoin-project/go-data-transfer (v1.11.1 -> v1.11.4):
|
||||||
|
- github.com/filecoin-project/go-crypto (v0.0.0-20191218222705-effae4ea9f03 -> v0.0.1):
|
||||||
|
- github.com/filecoin-project/go-commp-utils (v0.1.1-0.20210427191551-70bf140d31c7 -> v0.1.2):
|
||||||
|
- github.com/filecoin-project/go-cbor-util (v0.0.0-20191219014500-08c40a1e63a2 -> v0.0.1):
|
||||||
|
- github.com/filecoin-project/go-address (v0.0.5 -> v0.0.6):
|
||||||
|
- unpin the yamux dependency ([filecoin-project/lotus#7532](https://github.com/filecoin-project/lotus/pull/7532)
|
||||||
|
- peerstore@v0.2.9 was withdrawn, let's not depend on it directly ([filecoin-project/lotus#7481](https://github.com/filecoin-project/lotus/pull/7481))
|
||||||
|
- chore(deps): use tagged github.com/ipld/go-ipld-selector-text-lite ([filecoin-project/lotus#7464](https://github.com/filecoin-project/lotus/pull/7464))
|
||||||
|
- Stop indirectly depending on deprecated github.com/prometheus/common ([filecoin-project/lotus#7473](https://github.com/filecoin-project/lotus/pull/7473))
|
||||||
|
|
||||||
|
## Others
|
||||||
|
- fix the changelog ([filecoin-project/lotus#7594](https://github.com/filecoin-project/lotus/pull/7594))
|
||||||
|
- v1.13.1-rc2 prep ([filecoin-project/lotus#7593](https://github.com/filecoin-project/lotus/pull/7593))
|
||||||
|
- lotus v1.13.1-rc1 ([filecoin-project/lotus#7569](https://github.com/filecoin-project/lotus/pull/7569))
|
||||||
|
- misc: back-port v1.13.0 back to master ([filecoin-project/lotus#7537](https://github.com/filecoin-project/lotus/pull/7537))
|
||||||
|
- Inline codegen ([filecoin-project/lotus#7495](https://github.com/filecoin-project/lotus/pull/7495))
|
||||||
|
- releases -> master ([filecoin-project/lotus#7507](https://github.com/filecoin-project/lotus/pull/7507))
|
||||||
|
- Make chocolate back to master ([filecoin-project/lotus#7493](https://github.com/filecoin-project/lotus/pull/7493))
|
||||||
|
- restore filters for the build-macos job ([filecoin-project/lotus#7455](https://github.com/filecoin-project/lotus/pull/7455))
|
||||||
|
- bump master to v1.13.1-dev ([filecoin-project/lotus#7451](https://github.com/filecoin-project/lotus/pull/7451))
|
||||||
|
|
||||||
|
Contributors
|
||||||
|
|
||||||
|
| Contributor | Commits | Lines ± | Files Changed |
|
||||||
|
|-------------|---------|---------|---------------|
|
||||||
|
| @magik6k | 27 | +1285/-531 | 76 |
|
||||||
|
| @ribasushi | 7 | +265/-1635 | 21 |
|
||||||
|
| @raulk | 2 | +2/-737 | 13 |
|
||||||
|
| @nonsens | 4 | +391/-21 | 19 |
|
||||||
|
| @arajasek | 6 | +216/-23 | 14 |
|
||||||
|
| @jennijuju| 8 | +102/-37 | 29 |
|
||||||
|
| Steven Allen | 2 | +77/-29 | 6 |
|
||||||
|
| @jennijuju | 4 | +19/-18 | 11 |
|
||||||
|
| @dirkmc | 2 | +9/-9 | 4 |
|
||||||
|
| @@coryschwartz | 1 | +16/-2 | 2 |
|
||||||
|
| @frrist | 1 | +12/-0 | 2 |
|
||||||
|
| @Kubuxu | 5 | +5/-5 | 5 |
|
||||||
|
| @hunjixin | 2 | +6/-3 | 2 |
|
||||||
|
| @vyzo | 1 | +3/-3 | 2 |
|
||||||
|
| @@rvagg | 1 | +3/-3 | 2 |
|
||||||
|
| @hannahhoward | 1 | +3/-2 | 2 |
|
||||||
|
| Marten Seemann | 1 | +3/-0 | 1 |
|
||||||
|
| @ZenGround0 | 1 | +1/-1 | 1 |
|
||||||
|
|
||||||
|
|
||||||
# v1.13.0 / 2021-10-18
|
# v1.13.0 / 2021-10-18
|
||||||
|
|
||||||
Lotus v1.13.0 is a *highly recommended* feature release for all lotus users(i.e: storage providers, data brokers, application developers and so on) that supports the upcoming
|
Lotus v1.13.0 is a *highly recommended* feature release for all lotus users(i.e: storage providers, data brokers, application developers and so on) that supports the upcoming
|
||||||
|
@ -36,7 +36,7 @@ WORKDIR /opt/filecoin
|
|||||||
ARG RUSTFLAGS=""
|
ARG RUSTFLAGS=""
|
||||||
ARG GOFLAGS=""
|
ARG GOFLAGS=""
|
||||||
|
|
||||||
RUN make lotus lotus-miner lotus-worker lotus-shed lotus-wallet lotus-gateway
|
RUN make lotus lotus-miner lotus-worker lotus-shed lotus-wallet lotus-gateway lotus-stats
|
||||||
|
|
||||||
|
|
||||||
FROM ubuntu:20.04 AS base
|
FROM ubuntu:20.04 AS base
|
||||||
@ -66,8 +66,6 @@ COPY scripts/docker-lotus-entrypoint.sh /
|
|||||||
|
|
||||||
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
|
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
|
||||||
ENV LOTUS_PATH /var/lib/lotus
|
ENV LOTUS_PATH /var/lib/lotus
|
||||||
ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1
|
|
||||||
ENV LOTUS_JAEGER_AGENT_PORT 6831
|
|
||||||
ENV DOCKER_LOTUS_IMPORT_SNAPSHOT https://fil-chain-snapshots-fallback.s3.amazonaws.com/mainnet/minimal_finality_stateroots_latest.car
|
ENV DOCKER_LOTUS_IMPORT_SNAPSHOT https://fil-chain-snapshots-fallback.s3.amazonaws.com/mainnet/minimal_finality_stateroots_latest.car
|
||||||
ENV DOCKER_LOTUS_IMPORT_WALLET ""
|
ENV DOCKER_LOTUS_IMPORT_WALLET ""
|
||||||
|
|
||||||
@ -92,8 +90,6 @@ MAINTAINER Lotus Development Team
|
|||||||
COPY --from=builder /opt/filecoin/lotus-wallet /usr/local/bin/
|
COPY --from=builder /opt/filecoin/lotus-wallet /usr/local/bin/
|
||||||
|
|
||||||
ENV WALLET_PATH /var/lib/lotus-wallet
|
ENV WALLET_PATH /var/lib/lotus-wallet
|
||||||
ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1
|
|
||||||
ENV LOTUS_JAEGER_AGENT_PORT 6831
|
|
||||||
|
|
||||||
RUN mkdir /var/lib/lotus-wallet
|
RUN mkdir /var/lib/lotus-wallet
|
||||||
RUN chown fc: /var/lib/lotus-wallet
|
RUN chown fc: /var/lib/lotus-wallet
|
||||||
@ -114,10 +110,6 @@ MAINTAINER Lotus Development Team
|
|||||||
|
|
||||||
COPY --from=builder /opt/filecoin/lotus-gateway /usr/local/bin/
|
COPY --from=builder /opt/filecoin/lotus-gateway /usr/local/bin/
|
||||||
|
|
||||||
ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1
|
|
||||||
ENV LOTUS_JAEGER_AGENT_PORT 6831
|
|
||||||
ENV FULLNODE_API_INFO /ip4/127.0.0.1/tcp/1234/http
|
|
||||||
|
|
||||||
USER fc
|
USER fc
|
||||||
|
|
||||||
EXPOSE 1234
|
EXPOSE 1234
|
||||||
@ -135,11 +127,7 @@ COPY --from=builder /opt/filecoin/lotus-miner /usr/local/bin/
|
|||||||
COPY scripts/docker-lotus-miner-entrypoint.sh /
|
COPY scripts/docker-lotus-miner-entrypoint.sh /
|
||||||
|
|
||||||
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
|
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
|
||||||
ENV FULLNODE_API_INFO /ip4/127.0.0.1/tcp/1234/http
|
|
||||||
ENV LOTUS_MINER_PATH /var/lib/lotus-miner
|
ENV LOTUS_MINER_PATH /var/lib/lotus-miner
|
||||||
ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1
|
|
||||||
ENV LOTUS_JAEGER_AGENT_PORT 6831
|
|
||||||
ENV DOCKER_LOTUS_MINER_INIT true
|
|
||||||
|
|
||||||
RUN mkdir /var/lib/lotus-miner /var/tmp/filecoin-proof-parameters
|
RUN mkdir /var/lib/lotus-miner /var/tmp/filecoin-proof-parameters
|
||||||
RUN chown fc: /var/lib/lotus-miner /var/tmp/filecoin-proof-parameters
|
RUN chown fc: /var/lib/lotus-miner /var/tmp/filecoin-proof-parameters
|
||||||
@ -163,10 +151,7 @@ MAINTAINER Lotus Development Team
|
|||||||
COPY --from=builder /opt/filecoin/lotus-worker /usr/local/bin/
|
COPY --from=builder /opt/filecoin/lotus-worker /usr/local/bin/
|
||||||
|
|
||||||
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
|
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
|
||||||
ENV MINER_API_INFO /ip4/127.0.0.1/tcp/2345/http
|
|
||||||
ENV LOTUS_WORKER_PATH /var/lib/lotus-worker
|
ENV LOTUS_WORKER_PATH /var/lib/lotus-worker
|
||||||
ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1
|
|
||||||
ENV LOTUS_JAEGER_AGENT_PORT 6831
|
|
||||||
|
|
||||||
RUN mkdir /var/lib/lotus-worker
|
RUN mkdir /var/lib/lotus-worker
|
||||||
RUN chown fc: /var/lib/lotus-worker
|
RUN chown fc: /var/lib/lotus-worker
|
||||||
@ -186,16 +171,11 @@ CMD ["-help"]
|
|||||||
from base as lotus-all-in-one
|
from base as lotus-all-in-one
|
||||||
|
|
||||||
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
|
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
|
||||||
ENV FULLNODE_API_INFO /ip4/127.0.0.1/tcp/1234/http
|
|
||||||
ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1
|
|
||||||
ENV LOTUS_JAEGER_AGENT_PORT 6831
|
|
||||||
ENV LOTUS_MINER_PATH /var/lib/lotus-miner
|
ENV LOTUS_MINER_PATH /var/lib/lotus-miner
|
||||||
ENV LOTUS_PATH /var/lib/lotus
|
ENV LOTUS_PATH /var/lib/lotus
|
||||||
ENV LOTUS_WORKER_PATH /var/lib/lotus-worker
|
ENV LOTUS_WORKER_PATH /var/lib/lotus-worker
|
||||||
ENV MINER_API_INFO /ip4/127.0.0.1/tcp/2345/http
|
|
||||||
ENV WALLET_PATH /var/lib/lotus-wallet
|
ENV WALLET_PATH /var/lib/lotus-wallet
|
||||||
ENV DOCKER_LOTUS_IMPORT_SNAPSHOT https://fil-chain-snapshots-fallback.s3.amazonaws.com/mainnet/minimal_finality_stateroots_latest.car
|
ENV DOCKER_LOTUS_IMPORT_SNAPSHOT https://fil-chain-snapshots-fallback.s3.amazonaws.com/mainnet/minimal_finality_stateroots_latest.car
|
||||||
ENV DOCKER_LOTUS_MINER_INIT true
|
|
||||||
|
|
||||||
COPY --from=builder /opt/filecoin/lotus /usr/local/bin/
|
COPY --from=builder /opt/filecoin/lotus /usr/local/bin/
|
||||||
COPY --from=builder /opt/filecoin/lotus-shed /usr/local/bin/
|
COPY --from=builder /opt/filecoin/lotus-shed /usr/local/bin/
|
||||||
@ -203,6 +183,7 @@ COPY --from=builder /opt/filecoin/lotus-wallet /usr/local/bin/
|
|||||||
COPY --from=builder /opt/filecoin/lotus-gateway /usr/local/bin/
|
COPY --from=builder /opt/filecoin/lotus-gateway /usr/local/bin/
|
||||||
COPY --from=builder /opt/filecoin/lotus-miner /usr/local/bin/
|
COPY --from=builder /opt/filecoin/lotus-miner /usr/local/bin/
|
||||||
COPY --from=builder /opt/filecoin/lotus-worker /usr/local/bin/
|
COPY --from=builder /opt/filecoin/lotus-worker /usr/local/bin/
|
||||||
|
COPY --from=builder /opt/filecoin/lotus-stats /usr/local/bin/
|
||||||
|
|
||||||
RUN mkdir /var/tmp/filecoin-proof-parameters
|
RUN mkdir /var/tmp/filecoin-proof-parameters
|
||||||
RUN mkdir /var/lib/lotus
|
RUN mkdir /var/lib/lotus
|
||||||
|
@ -7,7 +7,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
textselector "github.com/ipld/go-ipld-selector-text-lite"
|
|
||||||
"github.com/libp2p/go-libp2p-core/peer"
|
"github.com/libp2p/go-libp2p-core/peer"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
@ -28,7 +27,6 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/paych"
|
"github.com/filecoin-project/lotus/chain/actors/builtin/paych"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
|
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
|
||||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
"github.com/filecoin-project/lotus/node/repo/imports"
|
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||||
)
|
)
|
||||||
@ -352,10 +350,11 @@ type FullNode interface {
|
|||||||
// ClientMinerQueryOffer returns a QueryOffer for the specific miner and file.
|
// ClientMinerQueryOffer returns a QueryOffer for the specific miner and file.
|
||||||
ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (QueryOffer, error) //perm:read
|
ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (QueryOffer, error) //perm:read
|
||||||
// ClientRetrieve initiates the retrieval of a file, as specified in the order.
|
// ClientRetrieve initiates the retrieval of a file, as specified in the order.
|
||||||
ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *FileRef) error //perm:admin
|
ClientRetrieve(ctx context.Context, params RetrievalOrder) (*RestrievalRes, error) //perm:admin
|
||||||
// ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel
|
// ClientRetrieveWait waits for retrieval to be complete
|
||||||
// of status updates.
|
ClientRetrieveWait(ctx context.Context, deal retrievalmarket.DealID) error //perm:admin
|
||||||
ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *FileRef) (<-chan marketevents.RetrievalEvent, error) //perm:admin
|
// ClientExport exports a file stored in the local filestore to a system file
|
||||||
|
ClientExport(ctx context.Context, exportRef ExportRef, fileRef FileRef) error //perm:admin
|
||||||
// ClientListRetrievals returns information about retrievals made by the local client
|
// ClientListRetrievals returns information about retrievals made by the local client
|
||||||
ClientListRetrievals(ctx context.Context) ([]RetrievalInfo, error) //perm:write
|
ClientListRetrievals(ctx context.Context) ([]RetrievalInfo, error) //perm:write
|
||||||
// ClientGetRetrievalUpdates returns status of updated retrieval deals
|
// ClientGetRetrievalUpdates returns status of updated retrieval deals
|
||||||
@ -630,10 +629,14 @@ type FullNode interface {
|
|||||||
// <sender address of the approve msg>, <method to call in the proposed message>, <params to include in the proposed message>
|
// <sender address of the approve msg>, <method to call in the proposed message>, <params to include in the proposed message>
|
||||||
MsigApproveTxnHash(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (*MessagePrototype, error) //perm:sign
|
MsigApproveTxnHash(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (*MessagePrototype, error) //perm:sign
|
||||||
|
|
||||||
|
// MsigCancel cancels a previously-proposed multisig message
|
||||||
|
// It takes the following params: <multisig address>, <proposed transaction ID> <signer address>
|
||||||
|
MsigCancel(context.Context, address.Address, uint64, address.Address) (*MessagePrototype, error) //perm:sign
|
||||||
|
|
||||||
// MsigCancel cancels a previously-proposed multisig message
|
// MsigCancel cancels a previously-proposed multisig message
|
||||||
// It takes the following params: <multisig address>, <proposed transaction ID>, <recipient address>, <value to transfer>,
|
// It takes the following params: <multisig address>, <proposed transaction ID>, <recipient address>, <value to transfer>,
|
||||||
// <sender address of the cancel msg>, <method to call in the proposed message>, <params to include in the proposed message>
|
// <sender address of the cancel msg>, <method to call in the proposed message>, <params to include in the proposed message>
|
||||||
MsigCancel(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (*MessagePrototype, error) //perm:sign
|
MsigCancelTxnHash(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (*MessagePrototype, error) //perm:sign
|
||||||
|
|
||||||
// MsigAddPropose proposes adding a signer in the multisig
|
// MsigAddPropose proposes adding a signer in the multisig
|
||||||
// It takes the following params: <multisig address>, <sender address of the propose msg>,
|
// It takes the following params: <multisig address>, <sender address of the propose msg>,
|
||||||
@ -930,15 +933,14 @@ type MarketDeal struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type RetrievalOrder struct {
|
type RetrievalOrder struct {
|
||||||
// TODO: make this less unixfs specific
|
Root cid.Cid
|
||||||
Root cid.Cid
|
Piece *cid.Cid
|
||||||
Piece *cid.Cid
|
DataSelector *Selector
|
||||||
DatamodelPathSelector *textselector.Expression
|
|
||||||
Size uint64
|
// todo: Size/Total are only used for calculating price per byte; we should let users just pass that
|
||||||
|
Size uint64
|
||||||
|
Total types.BigInt
|
||||||
|
|
||||||
FromLocalCAR string // if specified, get data from a local CARv2 file.
|
|
||||||
// TODO: support offset
|
|
||||||
Total types.BigInt
|
|
||||||
UnsealPrice types.BigInt
|
UnsealPrice types.BigInt
|
||||||
PaymentInterval uint64
|
PaymentInterval uint64
|
||||||
PaymentIntervalIncrease uint64
|
PaymentIntervalIncrease uint64
|
||||||
|
@ -31,6 +31,8 @@ import (
|
|||||||
type Gateway interface {
|
type Gateway interface {
|
||||||
ChainHasObj(context.Context, cid.Cid) (bool, error)
|
ChainHasObj(context.Context, cid.Cid) (bool, error)
|
||||||
ChainHead(ctx context.Context) (*types.TipSet, error)
|
ChainHead(ctx context.Context) (*types.TipSet, error)
|
||||||
|
ChainGetParentMessages(context.Context, cid.Cid) ([]Message, error)
|
||||||
|
ChainGetParentReceipts(context.Context, cid.Cid) ([]*types.MessageReceipt, error)
|
||||||
ChainGetBlockMessages(context.Context, cid.Cid) (*BlockMessages, error)
|
ChainGetBlockMessages(context.Context, cid.Cid) (*BlockMessages, error)
|
||||||
ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error)
|
ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error)
|
||||||
ChainGetPath(ctx context.Context, from, to types.TipSetKey) ([]*HeadChange, error)
|
ChainGetPath(ctx context.Context, from, to types.TipSetKey) ([]*HeadChange, error)
|
||||||
@ -39,6 +41,7 @@ type Gateway interface {
|
|||||||
ChainGetTipSetAfterHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error)
|
ChainGetTipSetAfterHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error)
|
||||||
ChainNotify(context.Context) (<-chan []*HeadChange, error)
|
ChainNotify(context.Context) (<-chan []*HeadChange, error)
|
||||||
ChainReadObj(context.Context, cid.Cid) ([]byte, error)
|
ChainReadObj(context.Context, cid.Cid) ([]byte, error)
|
||||||
|
ChainGetGenesis(context.Context) (*types.TipSet, error)
|
||||||
GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *MessageSendSpec, tsk types.TipSetKey) (*types.Message, error)
|
GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *MessageSendSpec, tsk types.TipSetKey) (*types.Message, error)
|
||||||
MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error)
|
MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error)
|
||||||
MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error)
|
MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error)
|
||||||
|
@ -118,17 +118,21 @@ type StorageMiner interface {
|
|||||||
WorkerJobs(context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) //perm:admin
|
WorkerJobs(context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) //perm:admin
|
||||||
|
|
||||||
//storiface.WorkerReturn
|
//storiface.WorkerReturn
|
||||||
ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error //perm:admin retry:true
|
ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error //perm:admin retry:true
|
||||||
ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err *storiface.CallError) error //perm:admin retry:true
|
ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err *storiface.CallError) error //perm:admin retry:true
|
||||||
ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err *storiface.CallError) error //perm:admin retry:true
|
ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err *storiface.CallError) error //perm:admin retry:true
|
||||||
ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err *storiface.CallError) error //perm:admin retry:true
|
ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err *storiface.CallError) error //perm:admin retry:true
|
||||||
ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err *storiface.CallError) error //perm:admin retry:true
|
ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err *storiface.CallError) error //perm:admin retry:true
|
||||||
ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
|
ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
|
||||||
ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
|
ReturnReplicaUpdate(ctx context.Context, callID storiface.CallID, out storage.ReplicaUpdateOut, err *storiface.CallError) error //perm:admin retry:true
|
||||||
ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
|
ReturnProveReplicaUpdate1(ctx context.Context, callID storiface.CallID, vanillaProofs storage.ReplicaVanillaProofs, err *storiface.CallError) error //perm:admin retry:true
|
||||||
ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
|
ReturnProveReplicaUpdate2(ctx context.Context, callID storiface.CallID, proof storage.ReplicaUpdateProof, err *storiface.CallError) error //perm:admin retry:true
|
||||||
ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err *storiface.CallError) error //perm:admin retry:true
|
ReturnGenerateSectorKeyFromData(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
|
||||||
ReturnFetch(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
|
ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
|
||||||
|
ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
|
||||||
|
ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
|
||||||
|
ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err *storiface.CallError) error //perm:admin retry:true
|
||||||
|
ReturnFetch(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
|
||||||
|
|
||||||
// SealingSchedDiag dumps internal sealing scheduler state
|
// SealingSchedDiag dumps internal sealing scheduler state
|
||||||
SealingSchedDiag(ctx context.Context, doSched bool) (interface{}, error) //perm:admin
|
SealingSchedDiag(ctx context.Context, doSched bool) (interface{}, error) //perm:admin
|
||||||
@ -145,6 +149,7 @@ type StorageMiner interface {
|
|||||||
StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error //perm:admin
|
StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error //perm:admin
|
||||||
StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) //perm:admin
|
StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) //perm:admin
|
||||||
StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) //perm:admin
|
StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) //perm:admin
|
||||||
|
StorageGetLocks(ctx context.Context) (storiface.SectorLocks, error) //perm:admin
|
||||||
|
|
||||||
StorageLocal(ctx context.Context) (map[stores.ID]string, error) //perm:admin
|
StorageLocal(ctx context.Context) (map[stores.ID]string, error) //perm:admin
|
||||||
StorageStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error) //perm:admin
|
StorageStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error) //perm:admin
|
||||||
|
@ -39,6 +39,10 @@ type Worker interface {
|
|||||||
SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) //perm:admin
|
SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) //perm:admin
|
||||||
SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storiface.CallID, error) //perm:admin
|
SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storiface.CallID, error) //perm:admin
|
||||||
FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) //perm:admin
|
FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) //perm:admin
|
||||||
|
ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storiface.CallID, error) //perm:admin
|
||||||
|
ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storiface.CallID, error) //perm:admin
|
||||||
|
ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storiface.CallID, error) //perm:admin
|
||||||
|
GenerateSectorKeyFromData(ctx context.Context, sector storage.SectorRef, commD cid.Cid) (storiface.CallID, error) //perm:admin
|
||||||
ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (storiface.CallID, error) //perm:admin
|
ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (storiface.CallID, error) //perm:admin
|
||||||
MoveStorage(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) //perm:admin
|
MoveStorage(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) //perm:admin
|
||||||
UnsealPiece(context.Context, storage.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (storiface.CallID, error) //perm:admin
|
UnsealPiece(context.Context, storage.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (storiface.CallID, error) //perm:admin
|
||||||
|
@ -91,6 +91,8 @@ func init() {
|
|||||||
|
|
||||||
storeIDExample := imports.ID(50)
|
storeIDExample := imports.ID(50)
|
||||||
textSelExample := textselector.Expression("Links/21/Hash/Links/42/Hash")
|
textSelExample := textselector.Expression("Links/21/Hash/Links/42/Hash")
|
||||||
|
apiSelExample := api.Selector("Links/21/Hash/Links/42/Hash")
|
||||||
|
clientEvent := retrievalmarket.ClientEventDealAccepted
|
||||||
|
|
||||||
addExample(bitfield.NewFromSet([]uint64{5}))
|
addExample(bitfield.NewFromSet([]uint64{5}))
|
||||||
addExample(abi.RegisteredSealProof_StackedDrg32GiBV1_1)
|
addExample(abi.RegisteredSealProof_StackedDrg32GiBV1_1)
|
||||||
@ -122,9 +124,12 @@ func init() {
|
|||||||
addExample(datatransfer.Ongoing)
|
addExample(datatransfer.Ongoing)
|
||||||
addExample(storeIDExample)
|
addExample(storeIDExample)
|
||||||
addExample(&storeIDExample)
|
addExample(&storeIDExample)
|
||||||
|
addExample(clientEvent)
|
||||||
|
addExample(&clientEvent)
|
||||||
addExample(retrievalmarket.ClientEventDealAccepted)
|
addExample(retrievalmarket.ClientEventDealAccepted)
|
||||||
addExample(retrievalmarket.DealStatusNew)
|
addExample(retrievalmarket.DealStatusNew)
|
||||||
addExample(&textSelExample)
|
addExample(&textSelExample)
|
||||||
|
addExample(&apiSelExample)
|
||||||
addExample(network.ReachabilityPublic)
|
addExample(network.ReachabilityPublic)
|
||||||
addExample(build.NewestNetworkVersion)
|
addExample(build.NewestNetworkVersion)
|
||||||
addExample(map[string]int{"name": 42})
|
addExample(map[string]int{"name": 42})
|
||||||
@ -226,16 +231,18 @@ func init() {
|
|||||||
Hostname: "host",
|
Hostname: "host",
|
||||||
Resources: storiface.WorkerResources{
|
Resources: storiface.WorkerResources{
|
||||||
MemPhysical: 256 << 30,
|
MemPhysical: 256 << 30,
|
||||||
|
MemUsed: 2 << 30,
|
||||||
MemSwap: 120 << 30,
|
MemSwap: 120 << 30,
|
||||||
MemReserved: 2 << 30,
|
MemSwapUsed: 2 << 30,
|
||||||
CPUs: 64,
|
CPUs: 64,
|
||||||
GPUs: []string{"aGPU 1337"},
|
GPUs: []string{"aGPU 1337"},
|
||||||
|
Resources: storiface.ResourceTable,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Enabled: true,
|
Enabled: true,
|
||||||
MemUsedMin: 0,
|
MemUsedMin: 0,
|
||||||
MemUsedMax: 0,
|
MemUsedMax: 0,
|
||||||
GpuUsed: false,
|
GpuUsed: 0,
|
||||||
CpuUse: 0,
|
CpuUse: 0,
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
@ -247,6 +254,13 @@ func init() {
|
|||||||
api.SectorState(sealing.Proving): 120,
|
api.SectorState(sealing.Proving): 120,
|
||||||
})
|
})
|
||||||
addExample([]abi.SectorNumber{123, 124})
|
addExample([]abi.SectorNumber{123, 124})
|
||||||
|
addExample([]storiface.SectorLock{
|
||||||
|
{
|
||||||
|
Sector: abi.SectorID{Number: 123, Miner: 1000},
|
||||||
|
Write: [storiface.FileTypes]uint{0, 0, 1},
|
||||||
|
Read: [storiface.FileTypes]uint{2, 3, 0},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
// worker specific
|
// worker specific
|
||||||
addExample(storiface.AcquireMove)
|
addExample(storiface.AcquireMove)
|
||||||
@ -281,6 +295,7 @@ func init() {
|
|||||||
State: "ShardStateAvailable",
|
State: "ShardStateAvailable",
|
||||||
Error: "<error>",
|
Error: "<error>",
|
||||||
})
|
})
|
||||||
|
addExample(storiface.ResourceTable)
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []reflect.Type) {
|
func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []reflect.Type) {
|
||||||
|
@ -25,7 +25,6 @@ import (
|
|||||||
miner "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
miner "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
types "github.com/filecoin-project/lotus/chain/types"
|
types "github.com/filecoin-project/lotus/chain/types"
|
||||||
alerting "github.com/filecoin-project/lotus/journal/alerting"
|
alerting "github.com/filecoin-project/lotus/journal/alerting"
|
||||||
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
|
||||||
dtypes "github.com/filecoin-project/lotus/node/modules/dtypes"
|
dtypes "github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
imports "github.com/filecoin-project/lotus/node/repo/imports"
|
imports "github.com/filecoin-project/lotus/node/repo/imports"
|
||||||
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||||
@ -537,6 +536,20 @@ func (mr *MockFullNodeMockRecorder) ClientDealSize(arg0, arg1 interface{}) *gomo
|
|||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDealSize", reflect.TypeOf((*MockFullNode)(nil).ClientDealSize), arg0, arg1)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDealSize", reflect.TypeOf((*MockFullNode)(nil).ClientDealSize), arg0, arg1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ClientExport mocks base method.
|
||||||
|
func (m *MockFullNode) ClientExport(arg0 context.Context, arg1 api.ExportRef, arg2 api.FileRef) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientExport", arg0, arg1, arg2)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientExport indicates an expected call of ClientExport.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientExport(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientExport", reflect.TypeOf((*MockFullNode)(nil).ClientExport), arg0, arg1, arg2)
|
||||||
|
}
|
||||||
|
|
||||||
// ClientFindData mocks base method.
|
// ClientFindData mocks base method.
|
||||||
func (m *MockFullNode) ClientFindData(arg0 context.Context, arg1 cid.Cid, arg2 *cid.Cid) ([]api.QueryOffer, error) {
|
func (m *MockFullNode) ClientFindData(arg0 context.Context, arg1 cid.Cid, arg2 *cid.Cid) ([]api.QueryOffer, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
@ -775,17 +788,18 @@ func (mr *MockFullNodeMockRecorder) ClientRestartDataTransfer(arg0, arg1, arg2,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ClientRetrieve mocks base method.
|
// ClientRetrieve mocks base method.
|
||||||
func (m *MockFullNode) ClientRetrieve(arg0 context.Context, arg1 api.RetrievalOrder, arg2 *api.FileRef) error {
|
func (m *MockFullNode) ClientRetrieve(arg0 context.Context, arg1 api.RetrievalOrder) (*api.RestrievalRes, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "ClientRetrieve", arg0, arg1, arg2)
|
ret := m.ctrl.Call(m, "ClientRetrieve", arg0, arg1)
|
||||||
ret0, _ := ret[0].(error)
|
ret0, _ := ret[0].(*api.RestrievalRes)
|
||||||
return ret0
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClientRetrieve indicates an expected call of ClientRetrieve.
|
// ClientRetrieve indicates an expected call of ClientRetrieve.
|
||||||
func (mr *MockFullNodeMockRecorder) ClientRetrieve(arg0, arg1, arg2 interface{}) *gomock.Call {
|
func (mr *MockFullNodeMockRecorder) ClientRetrieve(arg0, arg1 interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieve", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieve), arg0, arg1, arg2)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieve", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieve), arg0, arg1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClientRetrieveTryRestartInsufficientFunds mocks base method.
|
// ClientRetrieveTryRestartInsufficientFunds mocks base method.
|
||||||
@ -802,19 +816,18 @@ func (mr *MockFullNodeMockRecorder) ClientRetrieveTryRestartInsufficientFunds(ar
|
|||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveTryRestartInsufficientFunds", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveTryRestartInsufficientFunds), arg0, arg1)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveTryRestartInsufficientFunds", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveTryRestartInsufficientFunds), arg0, arg1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClientRetrieveWithEvents mocks base method.
|
// ClientRetrieveWait mocks base method.
|
||||||
func (m *MockFullNode) ClientRetrieveWithEvents(arg0 context.Context, arg1 api.RetrievalOrder, arg2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) {
|
func (m *MockFullNode) ClientRetrieveWait(arg0 context.Context, arg1 retrievalmarket.DealID) error {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "ClientRetrieveWithEvents", arg0, arg1, arg2)
|
ret := m.ctrl.Call(m, "ClientRetrieveWait", arg0, arg1)
|
||||||
ret0, _ := ret[0].(<-chan marketevents.RetrievalEvent)
|
ret0, _ := ret[0].(error)
|
||||||
ret1, _ := ret[1].(error)
|
return ret0
|
||||||
return ret0, ret1
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClientRetrieveWithEvents indicates an expected call of ClientRetrieveWithEvents.
|
// ClientRetrieveWait indicates an expected call of ClientRetrieveWait.
|
||||||
func (mr *MockFullNodeMockRecorder) ClientRetrieveWithEvents(arg0, arg1, arg2 interface{}) *gomock.Call {
|
func (mr *MockFullNodeMockRecorder) ClientRetrieveWait(arg0, arg1 interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveWithEvents", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveWithEvents), arg0, arg1, arg2)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveWait", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveWait), arg0, arg1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClientStartDeal mocks base method.
|
// ClientStartDeal mocks base method.
|
||||||
@ -1428,18 +1441,33 @@ func (mr *MockFullNodeMockRecorder) MsigApproveTxnHash(arg0, arg1, arg2, arg3, a
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MsigCancel mocks base method.
|
// MsigCancel mocks base method.
|
||||||
func (m *MockFullNode) MsigCancel(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address, arg4 big.Int, arg5 address.Address, arg6 uint64, arg7 []byte) (*api.MessagePrototype, error) {
|
func (m *MockFullNode) MsigCancel(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address) (*api.MessagePrototype, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "MsigCancel", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)
|
ret := m.ctrl.Call(m, "MsigCancel", arg0, arg1, arg2, arg3)
|
||||||
ret0, _ := ret[0].(*api.MessagePrototype)
|
ret0, _ := ret[0].(*api.MessagePrototype)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// MsigCancel indicates an expected call of MsigCancel.
|
// MsigCancel indicates an expected call of MsigCancel.
|
||||||
func (mr *MockFullNodeMockRecorder) MsigCancel(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 interface{}) *gomock.Call {
|
func (mr *MockFullNodeMockRecorder) MsigCancel(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigCancel", reflect.TypeOf((*MockFullNode)(nil).MsigCancel), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigCancel", reflect.TypeOf((*MockFullNode)(nil).MsigCancel), arg0, arg1, arg2, arg3)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MsigCancelTxnHash mocks base method.
|
||||||
|
func (m *MockFullNode) MsigCancelTxnHash(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address, arg4 big.Int, arg5 address.Address, arg6 uint64, arg7 []byte) (*api.MessagePrototype, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "MsigCancelTxnHash", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)
|
||||||
|
ret0, _ := ret[0].(*api.MessagePrototype)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// MsigCancelTxnHash indicates an expected call of MsigCancelTxnHash.
|
||||||
|
func (mr *MockFullNodeMockRecorder) MsigCancelTxnHash(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigCancelTxnHash", reflect.TypeOf((*MockFullNode)(nil).MsigCancelTxnHash), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MsigCreate mocks base method.
|
// MsigCreate mocks base method.
|
||||||
|
217
api/proxy_gen.go
217
api/proxy_gen.go
@ -28,7 +28,6 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
||||||
"github.com/filecoin-project/lotus/journal/alerting"
|
"github.com/filecoin-project/lotus/journal/alerting"
|
||||||
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
|
||||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
"github.com/filecoin-project/lotus/node/repo/imports"
|
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||||
"github.com/filecoin-project/specs-storage/storage"
|
"github.com/filecoin-project/specs-storage/storage"
|
||||||
@ -162,6 +161,8 @@ type FullNodeStruct struct {
|
|||||||
|
|
||||||
ClientDealSize func(p0 context.Context, p1 cid.Cid) (DataSize, error) `perm:"read"`
|
ClientDealSize func(p0 context.Context, p1 cid.Cid) (DataSize, error) `perm:"read"`
|
||||||
|
|
||||||
|
ClientExport func(p0 context.Context, p1 ExportRef, p2 FileRef) error `perm:"admin"`
|
||||||
|
|
||||||
ClientFindData func(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]QueryOffer, error) `perm:"read"`
|
ClientFindData func(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]QueryOffer, error) `perm:"read"`
|
||||||
|
|
||||||
ClientGenCar func(p0 context.Context, p1 FileRef, p2 string) error `perm:"write"`
|
ClientGenCar func(p0 context.Context, p1 FileRef, p2 string) error `perm:"write"`
|
||||||
@ -194,11 +195,11 @@ type FullNodeStruct struct {
|
|||||||
|
|
||||||
ClientRestartDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"`
|
ClientRestartDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"`
|
||||||
|
|
||||||
ClientRetrieve func(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) error `perm:"admin"`
|
ClientRetrieve func(p0 context.Context, p1 RetrievalOrder) (*RestrievalRes, error) `perm:"admin"`
|
||||||
|
|
||||||
ClientRetrieveTryRestartInsufficientFunds func(p0 context.Context, p1 address.Address) error `perm:"write"`
|
ClientRetrieveTryRestartInsufficientFunds func(p0 context.Context, p1 address.Address) error `perm:"write"`
|
||||||
|
|
||||||
ClientRetrieveWithEvents func(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"`
|
ClientRetrieveWait func(p0 context.Context, p1 retrievalmarket.DealID) error `perm:"admin"`
|
||||||
|
|
||||||
ClientStartDeal func(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) `perm:"admin"`
|
ClientStartDeal func(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) `perm:"admin"`
|
||||||
|
|
||||||
@ -270,7 +271,9 @@ type FullNodeStruct struct {
|
|||||||
|
|
||||||
MsigApproveTxnHash func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (*MessagePrototype, error) `perm:"sign"`
|
MsigApproveTxnHash func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (*MessagePrototype, error) `perm:"sign"`
|
||||||
|
|
||||||
MsigCancel func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) `perm:"sign"`
|
MsigCancel func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (*MessagePrototype, error) `perm:"sign"`
|
||||||
|
|
||||||
|
MsigCancelTxnHash func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) `perm:"sign"`
|
||||||
|
|
||||||
MsigCreate func(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (*MessagePrototype, error) `perm:"sign"`
|
MsigCreate func(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (*MessagePrototype, error) `perm:"sign"`
|
||||||
|
|
||||||
@ -478,8 +481,14 @@ type GatewayStruct struct {
|
|||||||
Internal struct {
|
Internal struct {
|
||||||
ChainGetBlockMessages func(p0 context.Context, p1 cid.Cid) (*BlockMessages, error) ``
|
ChainGetBlockMessages func(p0 context.Context, p1 cid.Cid) (*BlockMessages, error) ``
|
||||||
|
|
||||||
|
ChainGetGenesis func(p0 context.Context) (*types.TipSet, error) ``
|
||||||
|
|
||||||
ChainGetMessage func(p0 context.Context, p1 cid.Cid) (*types.Message, error) ``
|
ChainGetMessage func(p0 context.Context, p1 cid.Cid) (*types.Message, error) ``
|
||||||
|
|
||||||
|
ChainGetParentMessages func(p0 context.Context, p1 cid.Cid) ([]Message, error) ``
|
||||||
|
|
||||||
|
ChainGetParentReceipts func(p0 context.Context, p1 cid.Cid) ([]*types.MessageReceipt, error) ``
|
||||||
|
|
||||||
ChainGetPath func(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*HeadChange, error) ``
|
ChainGetPath func(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*HeadChange, error) ``
|
||||||
|
|
||||||
ChainGetTipSet func(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) ``
|
ChainGetTipSet func(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) ``
|
||||||
@ -707,12 +716,20 @@ type StorageMinerStruct struct {
|
|||||||
|
|
||||||
ReturnFinalizeSector func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"`
|
ReturnFinalizeSector func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"`
|
||||||
|
|
||||||
|
ReturnGenerateSectorKeyFromData func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"`
|
||||||
|
|
||||||
ReturnMoveStorage func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"`
|
ReturnMoveStorage func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"`
|
||||||
|
|
||||||
|
ReturnProveReplicaUpdate1 func(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaVanillaProofs, p3 *storiface.CallError) error `perm:"admin"`
|
||||||
|
|
||||||
|
ReturnProveReplicaUpdate2 func(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaUpdateProof, p3 *storiface.CallError) error `perm:"admin"`
|
||||||
|
|
||||||
ReturnReadPiece func(p0 context.Context, p1 storiface.CallID, p2 bool, p3 *storiface.CallError) error `perm:"admin"`
|
ReturnReadPiece func(p0 context.Context, p1 storiface.CallID, p2 bool, p3 *storiface.CallError) error `perm:"admin"`
|
||||||
|
|
||||||
ReturnReleaseUnsealed func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"`
|
ReturnReleaseUnsealed func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"`
|
||||||
|
|
||||||
|
ReturnReplicaUpdate func(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaUpdateOut, p3 *storiface.CallError) error `perm:"admin"`
|
||||||
|
|
||||||
ReturnSealCommit1 func(p0 context.Context, p1 storiface.CallID, p2 storage.Commit1Out, p3 *storiface.CallError) error `perm:"admin"`
|
ReturnSealCommit1 func(p0 context.Context, p1 storiface.CallID, p2 storage.Commit1Out, p3 *storiface.CallError) error `perm:"admin"`
|
||||||
|
|
||||||
ReturnSealCommit2 func(p0 context.Context, p1 storiface.CallID, p2 storage.Proof, p3 *storiface.CallError) error `perm:"admin"`
|
ReturnSealCommit2 func(p0 context.Context, p1 storiface.CallID, p2 storage.Proof, p3 *storiface.CallError) error `perm:"admin"`
|
||||||
@ -785,6 +802,8 @@ type StorageMinerStruct struct {
|
|||||||
|
|
||||||
StorageFindSector func(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]stores.SectorStorageInfo, error) `perm:"admin"`
|
StorageFindSector func(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]stores.SectorStorageInfo, error) `perm:"admin"`
|
||||||
|
|
||||||
|
StorageGetLocks func(p0 context.Context) (storiface.SectorLocks, error) `perm:"admin"`
|
||||||
|
|
||||||
StorageInfo func(p0 context.Context, p1 stores.ID) (stores.StorageInfo, error) `perm:"admin"`
|
StorageInfo func(p0 context.Context, p1 stores.ID) (stores.StorageInfo, error) `perm:"admin"`
|
||||||
|
|
||||||
StorageList func(p0 context.Context) (map[stores.ID][]stores.Decl, error) `perm:"admin"`
|
StorageList func(p0 context.Context) (map[stores.ID][]stores.Decl, error) `perm:"admin"`
|
||||||
@ -844,6 +863,8 @@ type WorkerStruct struct {
|
|||||||
|
|
||||||
FinalizeSector func(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) `perm:"admin"`
|
FinalizeSector func(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) `perm:"admin"`
|
||||||
|
|
||||||
|
GenerateSectorKeyFromData func(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid) (storiface.CallID, error) `perm:"admin"`
|
||||||
|
|
||||||
Info func(p0 context.Context) (storiface.WorkerInfo, error) `perm:"admin"`
|
Info func(p0 context.Context) (storiface.WorkerInfo, error) `perm:"admin"`
|
||||||
|
|
||||||
MoveStorage func(p0 context.Context, p1 storage.SectorRef, p2 storiface.SectorFileType) (storiface.CallID, error) `perm:"admin"`
|
MoveStorage func(p0 context.Context, p1 storage.SectorRef, p2 storiface.SectorFileType) (storiface.CallID, error) `perm:"admin"`
|
||||||
@ -852,10 +873,16 @@ type WorkerStruct struct {
|
|||||||
|
|
||||||
ProcessSession func(p0 context.Context) (uuid.UUID, error) `perm:"admin"`
|
ProcessSession func(p0 context.Context) (uuid.UUID, error) `perm:"admin"`
|
||||||
|
|
||||||
|
ProveReplicaUpdate1 func(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid, p3 cid.Cid, p4 cid.Cid) (storiface.CallID, error) `perm:"admin"`
|
||||||
|
|
||||||
|
ProveReplicaUpdate2 func(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid, p3 cid.Cid, p4 cid.Cid, p5 storage.ReplicaVanillaProofs) (storiface.CallID, error) `perm:"admin"`
|
||||||
|
|
||||||
ReleaseUnsealed func(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) `perm:"admin"`
|
ReleaseUnsealed func(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) `perm:"admin"`
|
||||||
|
|
||||||
Remove func(p0 context.Context, p1 abi.SectorID) error `perm:"admin"`
|
Remove func(p0 context.Context, p1 abi.SectorID) error `perm:"admin"`
|
||||||
|
|
||||||
|
ReplicaUpdate func(p0 context.Context, p1 storage.SectorRef, p2 []abi.PieceInfo) (storiface.CallID, error) `perm:"admin"`
|
||||||
|
|
||||||
SealCommit1 func(p0 context.Context, p1 storage.SectorRef, p2 abi.SealRandomness, p3 abi.InteractiveSealRandomness, p4 []abi.PieceInfo, p5 storage.SectorCids) (storiface.CallID, error) `perm:"admin"`
|
SealCommit1 func(p0 context.Context, p1 storage.SectorRef, p2 abi.SealRandomness, p3 abi.InteractiveSealRandomness, p4 []abi.PieceInfo, p5 storage.SectorCids) (storiface.CallID, error) `perm:"admin"`
|
||||||
|
|
||||||
SealCommit2 func(p0 context.Context, p1 storage.SectorRef, p2 storage.Commit1Out) (storiface.CallID, error) `perm:"admin"`
|
SealCommit2 func(p0 context.Context, p1 storage.SectorRef, p2 storage.Commit1Out) (storiface.CallID, error) `perm:"admin"`
|
||||||
@ -1349,6 +1376,17 @@ func (s *FullNodeStub) ClientDealSize(p0 context.Context, p1 cid.Cid) (DataSize,
|
|||||||
return *new(DataSize), ErrNotSupported
|
return *new(DataSize), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStruct) ClientExport(p0 context.Context, p1 ExportRef, p2 FileRef) error {
|
||||||
|
if s.Internal.ClientExport == nil {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ClientExport(p0, p1, p2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStub) ClientExport(p0 context.Context, p1 ExportRef, p2 FileRef) error {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *FullNodeStruct) ClientFindData(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]QueryOffer, error) {
|
func (s *FullNodeStruct) ClientFindData(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]QueryOffer, error) {
|
||||||
if s.Internal.ClientFindData == nil {
|
if s.Internal.ClientFindData == nil {
|
||||||
return *new([]QueryOffer), ErrNotSupported
|
return *new([]QueryOffer), ErrNotSupported
|
||||||
@ -1525,15 +1563,15 @@ func (s *FullNodeStub) ClientRestartDataTransfer(p0 context.Context, p1 datatran
|
|||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FullNodeStruct) ClientRetrieve(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) error {
|
func (s *FullNodeStruct) ClientRetrieve(p0 context.Context, p1 RetrievalOrder) (*RestrievalRes, error) {
|
||||||
if s.Internal.ClientRetrieve == nil {
|
if s.Internal.ClientRetrieve == nil {
|
||||||
return ErrNotSupported
|
return nil, ErrNotSupported
|
||||||
}
|
}
|
||||||
return s.Internal.ClientRetrieve(p0, p1, p2)
|
return s.Internal.ClientRetrieve(p0, p1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FullNodeStub) ClientRetrieve(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) error {
|
func (s *FullNodeStub) ClientRetrieve(p0 context.Context, p1 RetrievalOrder) (*RestrievalRes, error) {
|
||||||
return ErrNotSupported
|
return nil, ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FullNodeStruct) ClientRetrieveTryRestartInsufficientFunds(p0 context.Context, p1 address.Address) error {
|
func (s *FullNodeStruct) ClientRetrieveTryRestartInsufficientFunds(p0 context.Context, p1 address.Address) error {
|
||||||
@ -1547,15 +1585,15 @@ func (s *FullNodeStub) ClientRetrieveTryRestartInsufficientFunds(p0 context.Cont
|
|||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FullNodeStruct) ClientRetrieveWithEvents(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) (<-chan marketevents.RetrievalEvent, error) {
|
func (s *FullNodeStruct) ClientRetrieveWait(p0 context.Context, p1 retrievalmarket.DealID) error {
|
||||||
if s.Internal.ClientRetrieveWithEvents == nil {
|
if s.Internal.ClientRetrieveWait == nil {
|
||||||
return nil, ErrNotSupported
|
return ErrNotSupported
|
||||||
}
|
}
|
||||||
return s.Internal.ClientRetrieveWithEvents(p0, p1, p2)
|
return s.Internal.ClientRetrieveWait(p0, p1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FullNodeStub) ClientRetrieveWithEvents(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) (<-chan marketevents.RetrievalEvent, error) {
|
func (s *FullNodeStub) ClientRetrieveWait(p0 context.Context, p1 retrievalmarket.DealID) error {
|
||||||
return nil, ErrNotSupported
|
return ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FullNodeStruct) ClientStartDeal(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) {
|
func (s *FullNodeStruct) ClientStartDeal(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) {
|
||||||
@ -1943,14 +1981,25 @@ func (s *FullNodeStub) MsigApproveTxnHash(p0 context.Context, p1 address.Address
|
|||||||
return nil, ErrNotSupported
|
return nil, ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FullNodeStruct) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) {
|
func (s *FullNodeStruct) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (*MessagePrototype, error) {
|
||||||
if s.Internal.MsigCancel == nil {
|
if s.Internal.MsigCancel == nil {
|
||||||
return nil, ErrNotSupported
|
return nil, ErrNotSupported
|
||||||
}
|
}
|
||||||
return s.Internal.MsigCancel(p0, p1, p2, p3, p4, p5, p6, p7)
|
return s.Internal.MsigCancel(p0, p1, p2, p3)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FullNodeStub) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) {
|
func (s *FullNodeStub) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (*MessagePrototype, error) {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStruct) MsigCancelTxnHash(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) {
|
||||||
|
if s.Internal.MsigCancelTxnHash == nil {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.MsigCancelTxnHash(p0, p1, p2, p3, p4, p5, p6, p7)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStub) MsigCancelTxnHash(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) {
|
||||||
return nil, ErrNotSupported
|
return nil, ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3032,6 +3081,17 @@ func (s *GatewayStub) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*Bl
|
|||||||
return nil, ErrNotSupported
|
return nil, ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStruct) ChainGetGenesis(p0 context.Context) (*types.TipSet, error) {
|
||||||
|
if s.Internal.ChainGetGenesis == nil {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ChainGetGenesis(p0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStub) ChainGetGenesis(p0 context.Context) (*types.TipSet, error) {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *GatewayStruct) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Message, error) {
|
func (s *GatewayStruct) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Message, error) {
|
||||||
if s.Internal.ChainGetMessage == nil {
|
if s.Internal.ChainGetMessage == nil {
|
||||||
return nil, ErrNotSupported
|
return nil, ErrNotSupported
|
||||||
@ -3043,6 +3103,28 @@ func (s *GatewayStub) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Me
|
|||||||
return nil, ErrNotSupported
|
return nil, ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStruct) ChainGetParentMessages(p0 context.Context, p1 cid.Cid) ([]Message, error) {
|
||||||
|
if s.Internal.ChainGetParentMessages == nil {
|
||||||
|
return *new([]Message), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ChainGetParentMessages(p0, p1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStub) ChainGetParentMessages(p0 context.Context, p1 cid.Cid) ([]Message, error) {
|
||||||
|
return *new([]Message), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStruct) ChainGetParentReceipts(p0 context.Context, p1 cid.Cid) ([]*types.MessageReceipt, error) {
|
||||||
|
if s.Internal.ChainGetParentReceipts == nil {
|
||||||
|
return *new([]*types.MessageReceipt), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ChainGetParentReceipts(p0, p1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStub) ChainGetParentReceipts(p0 context.Context, p1 cid.Cid) ([]*types.MessageReceipt, error) {
|
||||||
|
return *new([]*types.MessageReceipt), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *GatewayStruct) ChainGetPath(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*HeadChange, error) {
|
func (s *GatewayStruct) ChainGetPath(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*HeadChange, error) {
|
||||||
if s.Internal.ChainGetPath == nil {
|
if s.Internal.ChainGetPath == nil {
|
||||||
return *new([]*HeadChange), ErrNotSupported
|
return *new([]*HeadChange), ErrNotSupported
|
||||||
@ -4154,6 +4236,17 @@ func (s *StorageMinerStub) ReturnFinalizeSector(p0 context.Context, p1 storiface
|
|||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *StorageMinerStruct) ReturnGenerateSectorKeyFromData(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error {
|
||||||
|
if s.Internal.ReturnGenerateSectorKeyFromData == nil {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ReturnGenerateSectorKeyFromData(p0, p1, p2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StorageMinerStub) ReturnGenerateSectorKeyFromData(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStruct) ReturnMoveStorage(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error {
|
func (s *StorageMinerStruct) ReturnMoveStorage(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error {
|
||||||
if s.Internal.ReturnMoveStorage == nil {
|
if s.Internal.ReturnMoveStorage == nil {
|
||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
@ -4165,6 +4258,28 @@ func (s *StorageMinerStub) ReturnMoveStorage(p0 context.Context, p1 storiface.Ca
|
|||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *StorageMinerStruct) ReturnProveReplicaUpdate1(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaVanillaProofs, p3 *storiface.CallError) error {
|
||||||
|
if s.Internal.ReturnProveReplicaUpdate1 == nil {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ReturnProveReplicaUpdate1(p0, p1, p2, p3)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StorageMinerStub) ReturnProveReplicaUpdate1(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaVanillaProofs, p3 *storiface.CallError) error {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StorageMinerStruct) ReturnProveReplicaUpdate2(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaUpdateProof, p3 *storiface.CallError) error {
|
||||||
|
if s.Internal.ReturnProveReplicaUpdate2 == nil {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ReturnProveReplicaUpdate2(p0, p1, p2, p3)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StorageMinerStub) ReturnProveReplicaUpdate2(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaUpdateProof, p3 *storiface.CallError) error {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStruct) ReturnReadPiece(p0 context.Context, p1 storiface.CallID, p2 bool, p3 *storiface.CallError) error {
|
func (s *StorageMinerStruct) ReturnReadPiece(p0 context.Context, p1 storiface.CallID, p2 bool, p3 *storiface.CallError) error {
|
||||||
if s.Internal.ReturnReadPiece == nil {
|
if s.Internal.ReturnReadPiece == nil {
|
||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
@ -4187,6 +4302,17 @@ func (s *StorageMinerStub) ReturnReleaseUnsealed(p0 context.Context, p1 storifac
|
|||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *StorageMinerStruct) ReturnReplicaUpdate(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaUpdateOut, p3 *storiface.CallError) error {
|
||||||
|
if s.Internal.ReturnReplicaUpdate == nil {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ReturnReplicaUpdate(p0, p1, p2, p3)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StorageMinerStub) ReturnReplicaUpdate(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaUpdateOut, p3 *storiface.CallError) error {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStruct) ReturnSealCommit1(p0 context.Context, p1 storiface.CallID, p2 storage.Commit1Out, p3 *storiface.CallError) error {
|
func (s *StorageMinerStruct) ReturnSealCommit1(p0 context.Context, p1 storiface.CallID, p2 storage.Commit1Out, p3 *storiface.CallError) error {
|
||||||
if s.Internal.ReturnSealCommit1 == nil {
|
if s.Internal.ReturnSealCommit1 == nil {
|
||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
@ -4583,6 +4709,17 @@ func (s *StorageMinerStub) StorageFindSector(p0 context.Context, p1 abi.SectorID
|
|||||||
return *new([]stores.SectorStorageInfo), ErrNotSupported
|
return *new([]stores.SectorStorageInfo), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *StorageMinerStruct) StorageGetLocks(p0 context.Context) (storiface.SectorLocks, error) {
|
||||||
|
if s.Internal.StorageGetLocks == nil {
|
||||||
|
return *new(storiface.SectorLocks), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.StorageGetLocks(p0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StorageMinerStub) StorageGetLocks(p0 context.Context) (storiface.SectorLocks, error) {
|
||||||
|
return *new(storiface.SectorLocks), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStruct) StorageInfo(p0 context.Context, p1 stores.ID) (stores.StorageInfo, error) {
|
func (s *StorageMinerStruct) StorageInfo(p0 context.Context, p1 stores.ID) (stores.StorageInfo, error) {
|
||||||
if s.Internal.StorageInfo == nil {
|
if s.Internal.StorageInfo == nil {
|
||||||
return *new(stores.StorageInfo), ErrNotSupported
|
return *new(stores.StorageInfo), ErrNotSupported
|
||||||
@ -4814,6 +4951,17 @@ func (s *WorkerStub) FinalizeSector(p0 context.Context, p1 storage.SectorRef, p2
|
|||||||
return *new(storiface.CallID), ErrNotSupported
|
return *new(storiface.CallID), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *WorkerStruct) GenerateSectorKeyFromData(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid) (storiface.CallID, error) {
|
||||||
|
if s.Internal.GenerateSectorKeyFromData == nil {
|
||||||
|
return *new(storiface.CallID), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.GenerateSectorKeyFromData(p0, p1, p2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *WorkerStub) GenerateSectorKeyFromData(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid) (storiface.CallID, error) {
|
||||||
|
return *new(storiface.CallID), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *WorkerStruct) Info(p0 context.Context) (storiface.WorkerInfo, error) {
|
func (s *WorkerStruct) Info(p0 context.Context) (storiface.WorkerInfo, error) {
|
||||||
if s.Internal.Info == nil {
|
if s.Internal.Info == nil {
|
||||||
return *new(storiface.WorkerInfo), ErrNotSupported
|
return *new(storiface.WorkerInfo), ErrNotSupported
|
||||||
@ -4858,6 +5006,28 @@ func (s *WorkerStub) ProcessSession(p0 context.Context) (uuid.UUID, error) {
|
|||||||
return *new(uuid.UUID), ErrNotSupported
|
return *new(uuid.UUID), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *WorkerStruct) ProveReplicaUpdate1(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid, p3 cid.Cid, p4 cid.Cid) (storiface.CallID, error) {
|
||||||
|
if s.Internal.ProveReplicaUpdate1 == nil {
|
||||||
|
return *new(storiface.CallID), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ProveReplicaUpdate1(p0, p1, p2, p3, p4)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *WorkerStub) ProveReplicaUpdate1(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid, p3 cid.Cid, p4 cid.Cid) (storiface.CallID, error) {
|
||||||
|
return *new(storiface.CallID), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *WorkerStruct) ProveReplicaUpdate2(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid, p3 cid.Cid, p4 cid.Cid, p5 storage.ReplicaVanillaProofs) (storiface.CallID, error) {
|
||||||
|
if s.Internal.ProveReplicaUpdate2 == nil {
|
||||||
|
return *new(storiface.CallID), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ProveReplicaUpdate2(p0, p1, p2, p3, p4, p5)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *WorkerStub) ProveReplicaUpdate2(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid, p3 cid.Cid, p4 cid.Cid, p5 storage.ReplicaVanillaProofs) (storiface.CallID, error) {
|
||||||
|
return *new(storiface.CallID), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *WorkerStruct) ReleaseUnsealed(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) {
|
func (s *WorkerStruct) ReleaseUnsealed(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) {
|
||||||
if s.Internal.ReleaseUnsealed == nil {
|
if s.Internal.ReleaseUnsealed == nil {
|
||||||
return *new(storiface.CallID), ErrNotSupported
|
return *new(storiface.CallID), ErrNotSupported
|
||||||
@ -4880,6 +5050,17 @@ func (s *WorkerStub) Remove(p0 context.Context, p1 abi.SectorID) error {
|
|||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *WorkerStruct) ReplicaUpdate(p0 context.Context, p1 storage.SectorRef, p2 []abi.PieceInfo) (storiface.CallID, error) {
|
||||||
|
if s.Internal.ReplicaUpdate == nil {
|
||||||
|
return *new(storiface.CallID), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ReplicaUpdate(p0, p1, p2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *WorkerStub) ReplicaUpdate(p0 context.Context, p1 storage.SectorRef, p2 []abi.PieceInfo) (storiface.CallID, error) {
|
||||||
|
return *new(storiface.CallID), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *WorkerStruct) SealCommit1(p0 context.Context, p1 storage.SectorRef, p2 abi.SealRandomness, p3 abi.InteractiveSealRandomness, p4 []abi.PieceInfo, p5 storage.SectorCids) (storiface.CallID, error) {
|
func (s *WorkerStruct) SealCommit1(p0 context.Context, p1 storage.SectorRef, p2 abi.SealRandomness, p3 abi.InteractiveSealRandomness, p4 []abi.PieceInfo, p5 storage.SectorCids) (storiface.CallID, error) {
|
||||||
if s.Internal.SealCommit1 == nil {
|
if s.Internal.SealCommit1 == nil {
|
||||||
return *new(storiface.CallID), ErrNotSupported
|
return *new(storiface.CallID), ErrNotSupported
|
||||||
|
48
api/types.go
48
api/types.go
@ -5,11 +5,10 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
|
||||||
|
|
||||||
datatransfer "github.com/filecoin-project/go-data-transfer"
|
datatransfer "github.com/filecoin-project/go-data-transfer"
|
||||||
|
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
|
|
||||||
"github.com/libp2p/go-libp2p-core/peer"
|
"github.com/libp2p/go-libp2p-core/peer"
|
||||||
@ -194,4 +193,47 @@ type RetrievalInfo struct {
|
|||||||
|
|
||||||
TransferChannelID *datatransfer.ChannelID
|
TransferChannelID *datatransfer.ChannelID
|
||||||
DataTransfer *DataTransferChannel
|
DataTransfer *DataTransferChannel
|
||||||
|
|
||||||
|
// optional event if part of ClientGetRetrievalUpdates
|
||||||
|
Event *retrievalmarket.ClientEvent
|
||||||
|
}
|
||||||
|
|
||||||
|
type RestrievalRes struct {
|
||||||
|
DealID retrievalmarket.DealID
|
||||||
|
}
|
||||||
|
|
||||||
|
// Selector specifies ipld selector string
|
||||||
|
// - if the string starts with '{', it's interpreted as json selector string
|
||||||
|
// see https://ipld.io/specs/selectors/ and https://ipld.io/specs/selectors/fixtures/selector-fixtures-1/
|
||||||
|
// - otherwise the string is interpreted as ipld-selector-text-lite (simple ipld path)
|
||||||
|
// see https://github.com/ipld/go-ipld-selector-text-lite
|
||||||
|
type Selector string
|
||||||
|
|
||||||
|
type DagSpec struct {
|
||||||
|
// DataSelector matches data to be retrieved
|
||||||
|
// - when using textselector, the path specifies subtree
|
||||||
|
// - the matched graph must have a single root
|
||||||
|
DataSelector *Selector
|
||||||
|
|
||||||
|
// ExportMerkleProof is applicable only when exporting to a CAR file via a path textselector
|
||||||
|
// When true, in addition to the selection target, the resulting CAR will contain every block along the
|
||||||
|
// path back to, and including the original root
|
||||||
|
// When false the resulting CAR contains only the blocks of the target subdag
|
||||||
|
ExportMerkleProof bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type ExportRef struct {
|
||||||
|
Root cid.Cid
|
||||||
|
|
||||||
|
// DAGs array specifies a list of DAGs to export
|
||||||
|
// - If exporting into unixfs files, only one DAG is supported, DataSelector is only used to find the targeted root node
|
||||||
|
// - If exporting into a car file
|
||||||
|
// - When exactly one text-path DataSelector is specified exports the subgraph and its full merkle-path from the original root
|
||||||
|
// - Otherwise ( multiple paths and/or JSON selector specs) determines each individual subroot and exports the subtrees as a multi-root car
|
||||||
|
// - When not specified defaults to a single DAG:
|
||||||
|
// - Data - the entire DAG: `{"R":{"l":{"none":{}},":>":{"a":{">":{"@":{}}}}}}`
|
||||||
|
DAGs []DagSpec
|
||||||
|
|
||||||
|
FromLocalCAR string // if specified, get data from a local CARv2 file.
|
||||||
|
DealID retrievalmarket.DealID
|
||||||
}
|
}
|
||||||
|
@ -12,6 +12,7 @@ import (
|
|||||||
"github.com/filecoin-project/go-state-types/crypto"
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
"github.com/filecoin-project/go-state-types/dline"
|
"github.com/filecoin-project/go-state-types/dline"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
|
textselector "github.com/ipld/go-ipld-selector-text-lite"
|
||||||
"github.com/libp2p/go-libp2p-core/peer"
|
"github.com/libp2p/go-libp2p-core/peer"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
@ -325,10 +326,10 @@ type FullNode interface {
|
|||||||
// ClientMinerQueryOffer returns a QueryOffer for the specific miner and file.
|
// ClientMinerQueryOffer returns a QueryOffer for the specific miner and file.
|
||||||
ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (api.QueryOffer, error) //perm:read
|
ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (api.QueryOffer, error) //perm:read
|
||||||
// ClientRetrieve initiates the retrieval of a file, as specified in the order.
|
// ClientRetrieve initiates the retrieval of a file, as specified in the order.
|
||||||
ClientRetrieve(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error //perm:admin
|
ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *api.FileRef) error //perm:admin
|
||||||
// ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel
|
// ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel
|
||||||
// of status updates.
|
// of status updates.
|
||||||
ClientRetrieveWithEvents(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) //perm:admin
|
ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) //perm:admin
|
||||||
// ClientQueryAsk returns a signed StorageAsk from the specified miner.
|
// ClientQueryAsk returns a signed StorageAsk from the specified miner.
|
||||||
// ClientListRetrievals returns information about retrievals made by the local client
|
// ClientListRetrievals returns information about retrievals made by the local client
|
||||||
ClientListRetrievals(ctx context.Context) ([]api.RetrievalInfo, error) //perm:write
|
ClientListRetrievals(ctx context.Context) ([]api.RetrievalInfo, error) //perm:write
|
||||||
@ -714,3 +715,37 @@ type FullNode interface {
|
|||||||
// the path specified when calling CreateBackup is within the base path
|
// the path specified when calling CreateBackup is within the base path
|
||||||
CreateBackup(ctx context.Context, fpath string) error //perm:admin
|
CreateBackup(ctx context.Context, fpath string) error //perm:admin
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func OfferOrder(o api.QueryOffer, client address.Address) RetrievalOrder {
|
||||||
|
return RetrievalOrder{
|
||||||
|
Root: o.Root,
|
||||||
|
Piece: o.Piece,
|
||||||
|
Size: o.Size,
|
||||||
|
Total: o.MinPrice,
|
||||||
|
UnsealPrice: o.UnsealPrice,
|
||||||
|
PaymentInterval: o.PaymentInterval,
|
||||||
|
PaymentIntervalIncrease: o.PaymentIntervalIncrease,
|
||||||
|
Client: client,
|
||||||
|
|
||||||
|
Miner: o.Miner,
|
||||||
|
MinerPeer: &o.MinerPeer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type RetrievalOrder struct {
|
||||||
|
// TODO: make this less unixfs specific
|
||||||
|
Root cid.Cid
|
||||||
|
Piece *cid.Cid
|
||||||
|
DatamodelPathSelector *textselector.Expression
|
||||||
|
Size uint64
|
||||||
|
|
||||||
|
FromLocalCAR string // if specified, get data from a local CARv2 file.
|
||||||
|
// TODO: support offset
|
||||||
|
Total types.BigInt
|
||||||
|
UnsealPrice types.BigInt
|
||||||
|
PaymentInterval uint64
|
||||||
|
PaymentIntervalIncrease uint64
|
||||||
|
Client address.Address
|
||||||
|
Miner address.Address
|
||||||
|
MinerPeer *retrievalmarket.RetrievalPeer
|
||||||
|
}
|
||||||
|
@ -125,11 +125,11 @@ type FullNodeStruct struct {
|
|||||||
|
|
||||||
ClientRestartDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"`
|
ClientRestartDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"`
|
||||||
|
|
||||||
ClientRetrieve func(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) error `perm:"admin"`
|
ClientRetrieve func(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) error `perm:"admin"`
|
||||||
|
|
||||||
ClientRetrieveTryRestartInsufficientFunds func(p0 context.Context, p1 address.Address) error `perm:"write"`
|
ClientRetrieveTryRestartInsufficientFunds func(p0 context.Context, p1 address.Address) error `perm:"write"`
|
||||||
|
|
||||||
ClientRetrieveWithEvents func(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"`
|
ClientRetrieveWithEvents func(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"`
|
||||||
|
|
||||||
ClientStartDeal func(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) `perm:"admin"`
|
ClientStartDeal func(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) `perm:"admin"`
|
||||||
|
|
||||||
@ -965,14 +965,14 @@ func (s *FullNodeStub) ClientRestartDataTransfer(p0 context.Context, p1 datatran
|
|||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FullNodeStruct) ClientRetrieve(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) error {
|
func (s *FullNodeStruct) ClientRetrieve(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) error {
|
||||||
if s.Internal.ClientRetrieve == nil {
|
if s.Internal.ClientRetrieve == nil {
|
||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
}
|
}
|
||||||
return s.Internal.ClientRetrieve(p0, p1, p2)
|
return s.Internal.ClientRetrieve(p0, p1, p2)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FullNodeStub) ClientRetrieve(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) error {
|
func (s *FullNodeStub) ClientRetrieve(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) error {
|
||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -987,14 +987,14 @@ func (s *FullNodeStub) ClientRetrieveTryRestartInsufficientFunds(p0 context.Cont
|
|||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FullNodeStruct) ClientRetrieveWithEvents(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) {
|
func (s *FullNodeStruct) ClientRetrieveWithEvents(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) {
|
||||||
if s.Internal.ClientRetrieveWithEvents == nil {
|
if s.Internal.ClientRetrieveWithEvents == nil {
|
||||||
return nil, ErrNotSupported
|
return nil, ErrNotSupported
|
||||||
}
|
}
|
||||||
return s.Internal.ClientRetrieveWithEvents(p0, p1, p2)
|
return s.Internal.ClientRetrieveWithEvents(p0, p1, p2)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FullNodeStub) ClientRetrieveWithEvents(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) {
|
func (s *FullNodeStub) ClientRetrieveWithEvents(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) {
|
||||||
return nil, ErrNotSupported
|
return nil, ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
network "github.com/filecoin-project/go-state-types/network"
|
network "github.com/filecoin-project/go-state-types/network"
|
||||||
api "github.com/filecoin-project/lotus/api"
|
api "github.com/filecoin-project/lotus/api"
|
||||||
apitypes "github.com/filecoin-project/lotus/api/types"
|
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||||
|
v0api "github.com/filecoin-project/lotus/api/v0api"
|
||||||
miner "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
miner "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
types "github.com/filecoin-project/lotus/chain/types"
|
types "github.com/filecoin-project/lotus/chain/types"
|
||||||
alerting "github.com/filecoin-project/lotus/journal/alerting"
|
alerting "github.com/filecoin-project/lotus/journal/alerting"
|
||||||
@ -760,7 +761,7 @@ func (mr *MockFullNodeMockRecorder) ClientRestartDataTransfer(arg0, arg1, arg2,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ClientRetrieve mocks base method.
|
// ClientRetrieve mocks base method.
|
||||||
func (m *MockFullNode) ClientRetrieve(arg0 context.Context, arg1 api.RetrievalOrder, arg2 *api.FileRef) error {
|
func (m *MockFullNode) ClientRetrieve(arg0 context.Context, arg1 v0api.RetrievalOrder, arg2 *api.FileRef) error {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "ClientRetrieve", arg0, arg1, arg2)
|
ret := m.ctrl.Call(m, "ClientRetrieve", arg0, arg1, arg2)
|
||||||
ret0, _ := ret[0].(error)
|
ret0, _ := ret[0].(error)
|
||||||
@ -788,7 +789,7 @@ func (mr *MockFullNodeMockRecorder) ClientRetrieveTryRestartInsufficientFunds(ar
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ClientRetrieveWithEvents mocks base method.
|
// ClientRetrieveWithEvents mocks base method.
|
||||||
func (m *MockFullNode) ClientRetrieveWithEvents(arg0 context.Context, arg1 api.RetrievalOrder, arg2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) {
|
func (m *MockFullNode) ClientRetrieveWithEvents(arg0 context.Context, arg1 v0api.RetrievalOrder, arg2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "ClientRetrieveWithEvents", arg0, arg1, arg2)
|
ret := m.ctrl.Call(m, "ClientRetrieveWithEvents", arg0, arg1, arg2)
|
||||||
ret0, _ := ret[0].(<-chan marketevents.RetrievalEvent)
|
ret0, _ := ret[0].(<-chan marketevents.RetrievalEvent)
|
||||||
|
@ -3,7 +3,10 @@ package v0api
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||||
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/go-state-types/crypto"
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
|
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
@ -108,7 +111,7 @@ func (w *WrapperV1Full) MsigApproveTxnHash(ctx context.Context, msig address.Add
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (w *WrapperV1Full) MsigCancel(ctx context.Context, msig address.Address, txID uint64, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) {
|
func (w *WrapperV1Full) MsigCancel(ctx context.Context, msig address.Address, txID uint64, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) {
|
||||||
p, err := w.FullNode.MsigCancel(ctx, msig, txID, to, amt, src, method, params)
|
p, err := w.FullNode.MsigCancelTxnHash(ctx, msig, txID, to, amt, src, method, params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
|
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
|
||||||
}
|
}
|
||||||
@ -194,4 +197,144 @@ func (w *WrapperV1Full) ChainGetRandomnessFromBeacon(ctx context.Context, tsk ty
|
|||||||
return w.StateGetRandomnessFromBeacon(ctx, personalization, randEpoch, entropy, tsk)
|
return w.StateGetRandomnessFromBeacon(ctx, personalization, randEpoch, entropy, tsk)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (w *WrapperV1Full) ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *api.FileRef) error {
|
||||||
|
events := make(chan marketevents.RetrievalEvent)
|
||||||
|
go w.clientRetrieve(ctx, order, ref, events)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case evt, ok := <-events:
|
||||||
|
if !ok { // done successfully
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if evt.Err != "" {
|
||||||
|
return xerrors.Errorf("retrieval failed: %s", evt.Err)
|
||||||
|
}
|
||||||
|
case <-ctx.Done():
|
||||||
|
return xerrors.Errorf("retrieval timed out")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WrapperV1Full) ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) {
|
||||||
|
events := make(chan marketevents.RetrievalEvent)
|
||||||
|
go w.clientRetrieve(ctx, order, ref, events)
|
||||||
|
return events, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func readSubscribeEvents(ctx context.Context, dealID retrievalmarket.DealID, subscribeEvents <-chan api.RetrievalInfo, events chan marketevents.RetrievalEvent) error {
|
||||||
|
for {
|
||||||
|
var subscribeEvent api.RetrievalInfo
|
||||||
|
var evt retrievalmarket.ClientEvent
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return xerrors.New("Retrieval Timed Out")
|
||||||
|
case subscribeEvent = <-subscribeEvents:
|
||||||
|
if subscribeEvent.ID != dealID {
|
||||||
|
// we can't check the deal ID ahead of time because:
|
||||||
|
// 1. We need to subscribe before retrieving.
|
||||||
|
// 2. We won't know the deal ID until after retrieving.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if subscribeEvent.Event != nil {
|
||||||
|
evt = *subscribeEvent.Event
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return xerrors.New("Retrieval Timed Out")
|
||||||
|
case events <- marketevents.RetrievalEvent{
|
||||||
|
Event: evt,
|
||||||
|
Status: subscribeEvent.Status,
|
||||||
|
BytesReceived: subscribeEvent.BytesReceived,
|
||||||
|
FundsSpent: subscribeEvent.TotalPaid,
|
||||||
|
}:
|
||||||
|
}
|
||||||
|
|
||||||
|
switch subscribeEvent.Status {
|
||||||
|
case retrievalmarket.DealStatusCompleted:
|
||||||
|
return nil
|
||||||
|
case retrievalmarket.DealStatusRejected:
|
||||||
|
return xerrors.Errorf("Retrieval Proposal Rejected: %s", subscribeEvent.Message)
|
||||||
|
case
|
||||||
|
retrievalmarket.DealStatusDealNotFound,
|
||||||
|
retrievalmarket.DealStatusErrored:
|
||||||
|
return xerrors.Errorf("Retrieval Error: %s", subscribeEvent.Message)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WrapperV1Full) clientRetrieve(ctx context.Context, order RetrievalOrder, ref *api.FileRef, events chan marketevents.RetrievalEvent) {
|
||||||
|
defer close(events)
|
||||||
|
|
||||||
|
finish := func(e error) {
|
||||||
|
if e != nil {
|
||||||
|
events <- marketevents.RetrievalEvent{Err: e.Error(), FundsSpent: big.Zero()}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var dealID retrievalmarket.DealID
|
||||||
|
if order.FromLocalCAR == "" {
|
||||||
|
// Subscribe to events before retrieving to avoid losing events.
|
||||||
|
subscribeCtx, cancel := context.WithCancel(ctx)
|
||||||
|
defer cancel()
|
||||||
|
retrievalEvents, err := w.ClientGetRetrievalUpdates(subscribeCtx)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
finish(xerrors.Errorf("GetRetrievalUpdates failed: %w", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
retrievalRes, err := w.FullNode.ClientRetrieve(ctx, api.RetrievalOrder{
|
||||||
|
Root: order.Root,
|
||||||
|
Piece: order.Piece,
|
||||||
|
Size: order.Size,
|
||||||
|
Total: order.Total,
|
||||||
|
UnsealPrice: order.UnsealPrice,
|
||||||
|
PaymentInterval: order.PaymentInterval,
|
||||||
|
PaymentIntervalIncrease: order.PaymentIntervalIncrease,
|
||||||
|
Client: order.Client,
|
||||||
|
Miner: order.Miner,
|
||||||
|
MinerPeer: order.MinerPeer,
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
finish(xerrors.Errorf("Retrieve failed: %w", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
dealID = retrievalRes.DealID
|
||||||
|
|
||||||
|
err = readSubscribeEvents(ctx, retrievalRes.DealID, retrievalEvents, events)
|
||||||
|
if err != nil {
|
||||||
|
finish(xerrors.Errorf("Retrieve: %w", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If ref is nil, it only fetches the data into the configured blockstore.
|
||||||
|
if ref == nil {
|
||||||
|
finish(nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
eref := api.ExportRef{
|
||||||
|
Root: order.Root,
|
||||||
|
FromLocalCAR: order.FromLocalCAR,
|
||||||
|
DealID: dealID,
|
||||||
|
}
|
||||||
|
|
||||||
|
if order.DatamodelPathSelector != nil {
|
||||||
|
s := api.Selector(*order.DatamodelPathSelector)
|
||||||
|
eref.DAGs = append(eref.DAGs, api.DagSpec{
|
||||||
|
DataSelector: &s,
|
||||||
|
ExportMerkleProof: true,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
finish(w.ClientExport(ctx, eref, *ref))
|
||||||
|
}
|
||||||
|
|
||||||
var _ FullNode = &WrapperV1Full{}
|
var _ FullNode = &WrapperV1Full{}
|
||||||
|
@ -58,7 +58,7 @@ var (
|
|||||||
FullAPIVersion1 = newVer(2, 1, 0)
|
FullAPIVersion1 = newVer(2, 1, 0)
|
||||||
|
|
||||||
MinerAPIVersion0 = newVer(1, 2, 0)
|
MinerAPIVersion0 = newVer(1, 2, 0)
|
||||||
WorkerAPIVersion0 = newVer(1, 1, 0)
|
WorkerAPIVersion0 = newVer(1, 5, 0)
|
||||||
)
|
)
|
||||||
|
|
||||||
//nolint:varcheck,deadcode
|
//nolint:varcheck,deadcode
|
||||||
|
@ -25,35 +25,35 @@ func NewAPIBlockstore(cio ChainIO) Blockstore {
|
|||||||
return Adapt(bs) // return an adapted blockstore.
|
return Adapt(bs) // return an adapted blockstore.
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *apiBlockstore) DeleteBlock(cid.Cid) error {
|
func (a *apiBlockstore) DeleteBlock(context.Context, cid.Cid) error {
|
||||||
return xerrors.New("not supported")
|
return xerrors.New("not supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *apiBlockstore) Has(c cid.Cid) (bool, error) {
|
func (a *apiBlockstore) Has(ctx context.Context, c cid.Cid) (bool, error) {
|
||||||
return a.api.ChainHasObj(context.TODO(), c)
|
return a.api.ChainHasObj(ctx, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *apiBlockstore) Get(c cid.Cid) (blocks.Block, error) {
|
func (a *apiBlockstore) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) {
|
||||||
bb, err := a.api.ChainReadObj(context.TODO(), c)
|
bb, err := a.api.ChainReadObj(ctx, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return blocks.NewBlockWithCid(bb, c)
|
return blocks.NewBlockWithCid(bb, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *apiBlockstore) GetSize(c cid.Cid) (int, error) {
|
func (a *apiBlockstore) GetSize(ctx context.Context, c cid.Cid) (int, error) {
|
||||||
bb, err := a.api.ChainReadObj(context.TODO(), c)
|
bb, err := a.api.ChainReadObj(ctx, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
return len(bb), nil
|
return len(bb), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *apiBlockstore) Put(blocks.Block) error {
|
func (a *apiBlockstore) Put(context.Context, blocks.Block) error {
|
||||||
return xerrors.New("not supported")
|
return xerrors.New("not supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *apiBlockstore) PutMany([]blocks.Block) error {
|
func (a *apiBlockstore) PutMany(context.Context, []blocks.Block) error {
|
||||||
return xerrors.New("not supported")
|
return xerrors.New("not supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -525,7 +525,7 @@ func (b *Blockstore) Size() (int64, error) {
|
|||||||
|
|
||||||
// View implements blockstore.Viewer, which leverages zero-copy read-only
|
// View implements blockstore.Viewer, which leverages zero-copy read-only
|
||||||
// access to values.
|
// access to values.
|
||||||
func (b *Blockstore) View(cid cid.Cid, fn func([]byte) error) error {
|
func (b *Blockstore) View(ctx context.Context, cid cid.Cid, fn func([]byte) error) error {
|
||||||
if err := b.access(); err != nil {
|
if err := b.access(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -552,7 +552,7 @@ func (b *Blockstore) View(cid cid.Cid, fn func([]byte) error) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Has implements Blockstore.Has.
|
// Has implements Blockstore.Has.
|
||||||
func (b *Blockstore) Has(cid cid.Cid) (bool, error) {
|
func (b *Blockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) {
|
||||||
if err := b.access(); err != nil {
|
if err := b.access(); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@ -582,7 +582,7 @@ func (b *Blockstore) Has(cid cid.Cid) (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get implements Blockstore.Get.
|
// Get implements Blockstore.Get.
|
||||||
func (b *Blockstore) Get(cid cid.Cid) (blocks.Block, error) {
|
func (b *Blockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) {
|
||||||
if !cid.Defined() {
|
if !cid.Defined() {
|
||||||
return nil, blockstore.ErrNotFound
|
return nil, blockstore.ErrNotFound
|
||||||
}
|
}
|
||||||
@ -619,7 +619,7 @@ func (b *Blockstore) Get(cid cid.Cid) (blocks.Block, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetSize implements Blockstore.GetSize.
|
// GetSize implements Blockstore.GetSize.
|
||||||
func (b *Blockstore) GetSize(cid cid.Cid) (int, error) {
|
func (b *Blockstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) {
|
||||||
if err := b.access(); err != nil {
|
if err := b.access(); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@ -652,7 +652,7 @@ func (b *Blockstore) GetSize(cid cid.Cid) (int, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Put implements Blockstore.Put.
|
// Put implements Blockstore.Put.
|
||||||
func (b *Blockstore) Put(block blocks.Block) error {
|
func (b *Blockstore) Put(ctx context.Context, block blocks.Block) error {
|
||||||
if err := b.access(); err != nil {
|
if err := b.access(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -691,7 +691,7 @@ func (b *Blockstore) Put(block blocks.Block) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PutMany implements Blockstore.PutMany.
|
// PutMany implements Blockstore.PutMany.
|
||||||
func (b *Blockstore) PutMany(blocks []blocks.Block) error {
|
func (b *Blockstore) PutMany(ctx context.Context, blocks []blocks.Block) error {
|
||||||
if err := b.access(); err != nil {
|
if err := b.access(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -755,7 +755,7 @@ func (b *Blockstore) PutMany(blocks []blocks.Block) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DeleteBlock implements Blockstore.DeleteBlock.
|
// DeleteBlock implements Blockstore.DeleteBlock.
|
||||||
func (b *Blockstore) DeleteBlock(cid cid.Cid) error {
|
func (b *Blockstore) DeleteBlock(ctx context.Context, cid cid.Cid) error {
|
||||||
if err := b.access(); err != nil {
|
if err := b.access(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -774,7 +774,7 @@ func (b *Blockstore) DeleteBlock(cid cid.Cid) error {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Blockstore) DeleteMany(cids []cid.Cid) error {
|
func (b *Blockstore) DeleteMany(ctx context.Context, cids []cid.Cid) error {
|
||||||
if err := b.access(); err != nil {
|
if err := b.access(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -2,6 +2,7 @@ package badgerbs
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
@ -98,6 +99,7 @@ func openBlockstore(optsSupplier func(path string) Options) func(tb testing.TB,
|
|||||||
}
|
}
|
||||||
|
|
||||||
func testMove(t *testing.T, optsF func(string) Options) {
|
func testMove(t *testing.T, optsF func(string) Options) {
|
||||||
|
ctx := context.Background()
|
||||||
basePath, err := ioutil.TempDir("", "")
|
basePath, err := ioutil.TempDir("", "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -122,7 +124,7 @@ func testMove(t *testing.T, optsF func(string) Options) {
|
|||||||
// add some blocks
|
// add some blocks
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
blk := blocks.NewBlock([]byte(fmt.Sprintf("some data %d", i)))
|
blk := blocks.NewBlock([]byte(fmt.Sprintf("some data %d", i)))
|
||||||
err := db.Put(blk)
|
err := db.Put(ctx, blk)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -132,7 +134,7 @@ func testMove(t *testing.T, optsF func(string) Options) {
|
|||||||
// delete some of them
|
// delete some of them
|
||||||
for i := 5; i < 10; i++ {
|
for i := 5; i < 10; i++ {
|
||||||
c := have[i].Cid()
|
c := have[i].Cid()
|
||||||
err := db.DeleteBlock(c)
|
err := db.DeleteBlock(ctx, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -145,7 +147,7 @@ func testMove(t *testing.T, optsF func(string) Options) {
|
|||||||
g.Go(func() error {
|
g.Go(func() error {
|
||||||
for i := 10; i < 1000; i++ {
|
for i := 10; i < 1000; i++ {
|
||||||
blk := blocks.NewBlock([]byte(fmt.Sprintf("some data %d", i)))
|
blk := blocks.NewBlock([]byte(fmt.Sprintf("some data %d", i)))
|
||||||
err := db.Put(blk)
|
err := db.Put(ctx, blk)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -165,7 +167,7 @@ func testMove(t *testing.T, optsF func(string) Options) {
|
|||||||
// now check that we have all the blocks in have and none in the deleted lists
|
// now check that we have all the blocks in have and none in the deleted lists
|
||||||
checkBlocks := func() {
|
checkBlocks := func() {
|
||||||
for _, blk := range have {
|
for _, blk := range have {
|
||||||
has, err := db.Has(blk.Cid())
|
has, err := db.Has(ctx, blk.Cid())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -174,7 +176,7 @@ func testMove(t *testing.T, optsF func(string) Options) {
|
|||||||
t.Fatal("missing block")
|
t.Fatal("missing block")
|
||||||
}
|
}
|
||||||
|
|
||||||
blk2, err := db.Get(blk.Cid())
|
blk2, err := db.Get(ctx, blk.Cid())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -185,7 +187,7 @@ func testMove(t *testing.T, optsF func(string) Options) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, c := range deleted {
|
for _, c := range deleted {
|
||||||
has, err := db.Has(c)
|
has, err := db.Has(ctx, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -44,28 +44,31 @@ func (s *Suite) RunTests(t *testing.T, prefix string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *Suite) TestGetWhenKeyNotPresent(t *testing.T) {
|
func (s *Suite) TestGetWhenKeyNotPresent(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
bs, _ := s.NewBlockstore(t)
|
bs, _ := s.NewBlockstore(t)
|
||||||
if c, ok := bs.(io.Closer); ok {
|
if c, ok := bs.(io.Closer); ok {
|
||||||
defer func() { require.NoError(t, c.Close()) }()
|
defer func() { require.NoError(t, c.Close()) }()
|
||||||
}
|
}
|
||||||
|
|
||||||
c := cid.NewCidV0(u.Hash([]byte("stuff")))
|
c := cid.NewCidV0(u.Hash([]byte("stuff")))
|
||||||
bl, err := bs.Get(c)
|
bl, err := bs.Get(ctx, c)
|
||||||
require.Nil(t, bl)
|
require.Nil(t, bl)
|
||||||
require.Equal(t, blockstore.ErrNotFound, err)
|
require.Equal(t, blockstore.ErrNotFound, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Suite) TestGetWhenKeyIsNil(t *testing.T) {
|
func (s *Suite) TestGetWhenKeyIsNil(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
bs, _ := s.NewBlockstore(t)
|
bs, _ := s.NewBlockstore(t)
|
||||||
if c, ok := bs.(io.Closer); ok {
|
if c, ok := bs.(io.Closer); ok {
|
||||||
defer func() { require.NoError(t, c.Close()) }()
|
defer func() { require.NoError(t, c.Close()) }()
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := bs.Get(cid.Undef)
|
_, err := bs.Get(ctx, cid.Undef)
|
||||||
require.Equal(t, blockstore.ErrNotFound, err)
|
require.Equal(t, blockstore.ErrNotFound, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Suite) TestPutThenGetBlock(t *testing.T) {
|
func (s *Suite) TestPutThenGetBlock(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
bs, _ := s.NewBlockstore(t)
|
bs, _ := s.NewBlockstore(t)
|
||||||
if c, ok := bs.(io.Closer); ok {
|
if c, ok := bs.(io.Closer); ok {
|
||||||
defer func() { require.NoError(t, c.Close()) }()
|
defer func() { require.NoError(t, c.Close()) }()
|
||||||
@ -73,15 +76,16 @@ func (s *Suite) TestPutThenGetBlock(t *testing.T) {
|
|||||||
|
|
||||||
orig := blocks.NewBlock([]byte("some data"))
|
orig := blocks.NewBlock([]byte("some data"))
|
||||||
|
|
||||||
err := bs.Put(orig)
|
err := bs.Put(ctx, orig)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
fetched, err := bs.Get(orig.Cid())
|
fetched, err := bs.Get(ctx, orig.Cid())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, orig.RawData(), fetched.RawData())
|
require.Equal(t, orig.RawData(), fetched.RawData())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Suite) TestHas(t *testing.T) {
|
func (s *Suite) TestHas(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
bs, _ := s.NewBlockstore(t)
|
bs, _ := s.NewBlockstore(t)
|
||||||
if c, ok := bs.(io.Closer); ok {
|
if c, ok := bs.(io.Closer); ok {
|
||||||
defer func() { require.NoError(t, c.Close()) }()
|
defer func() { require.NoError(t, c.Close()) }()
|
||||||
@ -89,19 +93,20 @@ func (s *Suite) TestHas(t *testing.T) {
|
|||||||
|
|
||||||
orig := blocks.NewBlock([]byte("some data"))
|
orig := blocks.NewBlock([]byte("some data"))
|
||||||
|
|
||||||
err := bs.Put(orig)
|
err := bs.Put(ctx, orig)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ok, err := bs.Has(orig.Cid())
|
ok, err := bs.Has(ctx, orig.Cid())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
|
|
||||||
ok, err = bs.Has(blocks.NewBlock([]byte("another thing")).Cid())
|
ok, err = bs.Has(ctx, blocks.NewBlock([]byte("another thing")).Cid())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.False(t, ok)
|
require.False(t, ok)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Suite) TestCidv0v1(t *testing.T) {
|
func (s *Suite) TestCidv0v1(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
bs, _ := s.NewBlockstore(t)
|
bs, _ := s.NewBlockstore(t)
|
||||||
if c, ok := bs.(io.Closer); ok {
|
if c, ok := bs.(io.Closer); ok {
|
||||||
defer func() { require.NoError(t, c.Close()) }()
|
defer func() { require.NoError(t, c.Close()) }()
|
||||||
@ -109,15 +114,17 @@ func (s *Suite) TestCidv0v1(t *testing.T) {
|
|||||||
|
|
||||||
orig := blocks.NewBlock([]byte("some data"))
|
orig := blocks.NewBlock([]byte("some data"))
|
||||||
|
|
||||||
err := bs.Put(orig)
|
err := bs.Put(ctx, orig)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
fetched, err := bs.Get(cid.NewCidV1(cid.DagProtobuf, orig.Cid().Hash()))
|
fetched, err := bs.Get(ctx, cid.NewCidV1(cid.DagProtobuf, orig.Cid().Hash()))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, orig.RawData(), fetched.RawData())
|
require.Equal(t, orig.RawData(), fetched.RawData())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Suite) TestPutThenGetSizeBlock(t *testing.T) {
|
func (s *Suite) TestPutThenGetSizeBlock(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
bs, _ := s.NewBlockstore(t)
|
bs, _ := s.NewBlockstore(t)
|
||||||
if c, ok := bs.(io.Closer); ok {
|
if c, ok := bs.(io.Closer); ok {
|
||||||
defer func() { require.NoError(t, c.Close()) }()
|
defer func() { require.NoError(t, c.Close()) }()
|
||||||
@ -127,21 +134,21 @@ func (s *Suite) TestPutThenGetSizeBlock(t *testing.T) {
|
|||||||
missingBlock := blocks.NewBlock([]byte("missingBlock"))
|
missingBlock := blocks.NewBlock([]byte("missingBlock"))
|
||||||
emptyBlock := blocks.NewBlock([]byte{})
|
emptyBlock := blocks.NewBlock([]byte{})
|
||||||
|
|
||||||
err := bs.Put(block)
|
err := bs.Put(ctx, block)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
blockSize, err := bs.GetSize(block.Cid())
|
blockSize, err := bs.GetSize(ctx, block.Cid())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, block.RawData(), blockSize)
|
require.Len(t, block.RawData(), blockSize)
|
||||||
|
|
||||||
err = bs.Put(emptyBlock)
|
err = bs.Put(ctx, emptyBlock)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
emptySize, err := bs.GetSize(emptyBlock.Cid())
|
emptySize, err := bs.GetSize(ctx, emptyBlock.Cid())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Zero(t, emptySize)
|
require.Zero(t, emptySize)
|
||||||
|
|
||||||
missingSize, err := bs.GetSize(missingBlock.Cid())
|
missingSize, err := bs.GetSize(ctx, missingBlock.Cid())
|
||||||
require.Equal(t, blockstore.ErrNotFound, err)
|
require.Equal(t, blockstore.ErrNotFound, err)
|
||||||
require.Equal(t, -1, missingSize)
|
require.Equal(t, -1, missingSize)
|
||||||
}
|
}
|
||||||
@ -203,6 +210,7 @@ func (s *Suite) TestDoubleClose(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *Suite) TestReopenPutGet(t *testing.T) {
|
func (s *Suite) TestReopenPutGet(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
bs, path := s.NewBlockstore(t)
|
bs, path := s.NewBlockstore(t)
|
||||||
c, ok := bs.(io.Closer)
|
c, ok := bs.(io.Closer)
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -210,7 +218,7 @@ func (s *Suite) TestReopenPutGet(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
orig := blocks.NewBlock([]byte("some data"))
|
orig := blocks.NewBlock([]byte("some data"))
|
||||||
err := bs.Put(orig)
|
err := bs.Put(ctx, orig)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = c.Close()
|
err = c.Close()
|
||||||
@ -219,7 +227,7 @@ func (s *Suite) TestReopenPutGet(t *testing.T) {
|
|||||||
bs, err = s.OpenBlockstore(t, path)
|
bs, err = s.OpenBlockstore(t, path)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
fetched, err := bs.Get(orig.Cid())
|
fetched, err := bs.Get(ctx, orig.Cid())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, orig.RawData(), fetched.RawData())
|
require.Equal(t, orig.RawData(), fetched.RawData())
|
||||||
|
|
||||||
@ -228,6 +236,7 @@ func (s *Suite) TestReopenPutGet(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *Suite) TestPutMany(t *testing.T) {
|
func (s *Suite) TestPutMany(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
bs, _ := s.NewBlockstore(t)
|
bs, _ := s.NewBlockstore(t)
|
||||||
if c, ok := bs.(io.Closer); ok {
|
if c, ok := bs.(io.Closer); ok {
|
||||||
defer func() { require.NoError(t, c.Close()) }()
|
defer func() { require.NoError(t, c.Close()) }()
|
||||||
@ -238,15 +247,15 @@ func (s *Suite) TestPutMany(t *testing.T) {
|
|||||||
blocks.NewBlock([]byte("foo2")),
|
blocks.NewBlock([]byte("foo2")),
|
||||||
blocks.NewBlock([]byte("foo3")),
|
blocks.NewBlock([]byte("foo3")),
|
||||||
}
|
}
|
||||||
err := bs.PutMany(blks)
|
err := bs.PutMany(ctx, blks)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
for _, blk := range blks {
|
for _, blk := range blks {
|
||||||
fetched, err := bs.Get(blk.Cid())
|
fetched, err := bs.Get(ctx, blk.Cid())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, blk.RawData(), fetched.RawData())
|
require.Equal(t, blk.RawData(), fetched.RawData())
|
||||||
|
|
||||||
ok, err := bs.Has(blk.Cid())
|
ok, err := bs.Has(ctx, blk.Cid())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
}
|
}
|
||||||
@ -259,6 +268,7 @@ func (s *Suite) TestPutMany(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *Suite) TestDelete(t *testing.T) {
|
func (s *Suite) TestDelete(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
bs, _ := s.NewBlockstore(t)
|
bs, _ := s.NewBlockstore(t)
|
||||||
if c, ok := bs.(io.Closer); ok {
|
if c, ok := bs.(io.Closer); ok {
|
||||||
defer func() { require.NoError(t, c.Close()) }()
|
defer func() { require.NoError(t, c.Close()) }()
|
||||||
@ -269,10 +279,10 @@ func (s *Suite) TestDelete(t *testing.T) {
|
|||||||
blocks.NewBlock([]byte("foo2")),
|
blocks.NewBlock([]byte("foo2")),
|
||||||
blocks.NewBlock([]byte("foo3")),
|
blocks.NewBlock([]byte("foo3")),
|
||||||
}
|
}
|
||||||
err := bs.PutMany(blks)
|
err := bs.PutMany(ctx, blks)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = bs.DeleteBlock(blks[1].Cid())
|
err = bs.DeleteBlock(ctx, blks[1].Cid())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ch, err := bs.AllKeysChan(context.Background())
|
ch, err := bs.AllKeysChan(context.Background())
|
||||||
@ -285,17 +295,17 @@ func (s *Suite) TestDelete(t *testing.T) {
|
|||||||
cid.NewCidV1(cid.Raw, blks[2].Cid().Hash()),
|
cid.NewCidV1(cid.Raw, blks[2].Cid().Hash()),
|
||||||
})
|
})
|
||||||
|
|
||||||
has, err := bs.Has(blks[1].Cid())
|
has, err := bs.Has(ctx, blks[1].Cid())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.False(t, has)
|
require.False(t, has)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func insertBlocks(t *testing.T, bs blockstore.BasicBlockstore, count int) []cid.Cid {
|
func insertBlocks(t *testing.T, bs blockstore.BasicBlockstore, count int) []cid.Cid {
|
||||||
|
ctx := context.Background()
|
||||||
keys := make([]cid.Cid, count)
|
keys := make([]cid.Cid, count)
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
block := blocks.NewBlock([]byte(fmt.Sprintf("some data %d", i)))
|
block := blocks.NewBlock([]byte(fmt.Sprintf("some data %d", i)))
|
||||||
err := bs.Put(block)
|
err := bs.Put(ctx, block)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// NewBlock assigns a CIDv0; we convert it to CIDv1 because that's what
|
// NewBlock assigns a CIDv0; we convert it to CIDv1 because that's what
|
||||||
// the store returns.
|
// the store returns.
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
package blockstore
|
package blockstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
cid "github.com/ipfs/go-cid"
|
cid "github.com/ipfs/go-cid"
|
||||||
ds "github.com/ipfs/go-datastore"
|
ds "github.com/ipfs/go-datastore"
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
@ -27,7 +29,7 @@ type BasicBlockstore = blockstore.Blockstore
|
|||||||
type Viewer = blockstore.Viewer
|
type Viewer = blockstore.Viewer
|
||||||
|
|
||||||
type BatchDeleter interface {
|
type BatchDeleter interface {
|
||||||
DeleteMany(cids []cid.Cid) error
|
DeleteMany(ctx context.Context, cids []cid.Cid) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// BlockstoreIterator is a trait for efficient iteration
|
// BlockstoreIterator is a trait for efficient iteration
|
||||||
@ -93,17 +95,17 @@ type adaptedBlockstore struct {
|
|||||||
|
|
||||||
var _ Blockstore = (*adaptedBlockstore)(nil)
|
var _ Blockstore = (*adaptedBlockstore)(nil)
|
||||||
|
|
||||||
func (a *adaptedBlockstore) View(cid cid.Cid, callback func([]byte) error) error {
|
func (a *adaptedBlockstore) View(ctx context.Context, cid cid.Cid, callback func([]byte) error) error {
|
||||||
blk, err := a.Get(cid)
|
blk, err := a.Get(ctx, cid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return callback(blk.RawData())
|
return callback(blk.RawData())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *adaptedBlockstore) DeleteMany(cids []cid.Cid) error {
|
func (a *adaptedBlockstore) DeleteMany(ctx context.Context, cids []cid.Cid) error {
|
||||||
for _, cid := range cids {
|
for _, cid := range cids {
|
||||||
err := a.DeleteBlock(cid)
|
err := a.DeleteBlock(ctx, cid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -88,34 +88,34 @@ func (bs *BufferedBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid,
|
|||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bs *BufferedBlockstore) DeleteBlock(c cid.Cid) error {
|
func (bs *BufferedBlockstore) DeleteBlock(ctx context.Context, c cid.Cid) error {
|
||||||
if err := bs.read.DeleteBlock(c); err != nil {
|
if err := bs.read.DeleteBlock(ctx, c); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return bs.write.DeleteBlock(c)
|
return bs.write.DeleteBlock(ctx, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bs *BufferedBlockstore) DeleteMany(cids []cid.Cid) error {
|
func (bs *BufferedBlockstore) DeleteMany(ctx context.Context, cids []cid.Cid) error {
|
||||||
if err := bs.read.DeleteMany(cids); err != nil {
|
if err := bs.read.DeleteMany(ctx, cids); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return bs.write.DeleteMany(cids)
|
return bs.write.DeleteMany(ctx, cids)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bs *BufferedBlockstore) View(c cid.Cid, callback func([]byte) error) error {
|
func (bs *BufferedBlockstore) View(ctx context.Context, c cid.Cid, callback func([]byte) error) error {
|
||||||
// both stores are viewable.
|
// both stores are viewable.
|
||||||
if err := bs.write.View(c, callback); err == ErrNotFound {
|
if err := bs.write.View(ctx, c, callback); err == ErrNotFound {
|
||||||
// not found in write blockstore; fall through.
|
// not found in write blockstore; fall through.
|
||||||
} else {
|
} else {
|
||||||
return err // propagate errors, or nil, i.e. found.
|
return err // propagate errors, or nil, i.e. found.
|
||||||
}
|
}
|
||||||
return bs.read.View(c, callback)
|
return bs.read.View(ctx, c, callback)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bs *BufferedBlockstore) Get(c cid.Cid) (block.Block, error) {
|
func (bs *BufferedBlockstore) Get(ctx context.Context, c cid.Cid) (block.Block, error) {
|
||||||
if out, err := bs.write.Get(c); err != nil {
|
if out, err := bs.write.Get(ctx, c); err != nil {
|
||||||
if err != ErrNotFound {
|
if err != ErrNotFound {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -123,20 +123,20 @@ func (bs *BufferedBlockstore) Get(c cid.Cid) (block.Block, error) {
|
|||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return bs.read.Get(c)
|
return bs.read.Get(ctx, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bs *BufferedBlockstore) GetSize(c cid.Cid) (int, error) {
|
func (bs *BufferedBlockstore) GetSize(ctx context.Context, c cid.Cid) (int, error) {
|
||||||
s, err := bs.read.GetSize(c)
|
s, err := bs.read.GetSize(ctx, c)
|
||||||
if err == ErrNotFound || s == 0 {
|
if err == ErrNotFound || s == 0 {
|
||||||
return bs.write.GetSize(c)
|
return bs.write.GetSize(ctx, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
return s, err
|
return s, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bs *BufferedBlockstore) Put(blk block.Block) error {
|
func (bs *BufferedBlockstore) Put(ctx context.Context, blk block.Block) error {
|
||||||
has, err := bs.read.Has(blk.Cid()) // TODO: consider dropping this check
|
has, err := bs.read.Has(ctx, blk.Cid()) // TODO: consider dropping this check
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -145,11 +145,11 @@ func (bs *BufferedBlockstore) Put(blk block.Block) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return bs.write.Put(blk)
|
return bs.write.Put(ctx, blk)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bs *BufferedBlockstore) Has(c cid.Cid) (bool, error) {
|
func (bs *BufferedBlockstore) Has(ctx context.Context, c cid.Cid) (bool, error) {
|
||||||
has, err := bs.write.Has(c)
|
has, err := bs.write.Has(ctx, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@ -157,7 +157,7 @@ func (bs *BufferedBlockstore) Has(c cid.Cid) (bool, error) {
|
|||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return bs.read.Has(c)
|
return bs.read.Has(ctx, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bs *BufferedBlockstore) HashOnRead(hor bool) {
|
func (bs *BufferedBlockstore) HashOnRead(hor bool) {
|
||||||
@ -165,8 +165,8 @@ func (bs *BufferedBlockstore) HashOnRead(hor bool) {
|
|||||||
bs.write.HashOnRead(hor)
|
bs.write.HashOnRead(hor)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bs *BufferedBlockstore) PutMany(blks []block.Block) error {
|
func (bs *BufferedBlockstore) PutMany(ctx context.Context, blks []block.Block) error {
|
||||||
return bs.write.PutMany(blks)
|
return bs.write.PutMany(ctx, blks)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bs *BufferedBlockstore) Read() Blockstore {
|
func (bs *BufferedBlockstore) Read() Blockstore {
|
||||||
|
@ -18,39 +18,39 @@ func NewDiscardStore(bs Blockstore) Blockstore {
|
|||||||
return &discardstore{bs: bs}
|
return &discardstore{bs: bs}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *discardstore) Has(cid cid.Cid) (bool, error) {
|
func (b *discardstore) Has(ctx context.Context, cid cid.Cid) (bool, error) {
|
||||||
return b.bs.Has(cid)
|
return b.bs.Has(ctx, cid)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *discardstore) HashOnRead(hor bool) {
|
func (b *discardstore) HashOnRead(hor bool) {
|
||||||
b.bs.HashOnRead(hor)
|
b.bs.HashOnRead(hor)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *discardstore) Get(cid cid.Cid) (blocks.Block, error) {
|
func (b *discardstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) {
|
||||||
return b.bs.Get(cid)
|
return b.bs.Get(ctx, cid)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *discardstore) GetSize(cid cid.Cid) (int, error) {
|
func (b *discardstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) {
|
||||||
return b.bs.GetSize(cid)
|
return b.bs.GetSize(ctx, cid)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *discardstore) View(cid cid.Cid, f func([]byte) error) error {
|
func (b *discardstore) View(ctx context.Context, cid cid.Cid, f func([]byte) error) error {
|
||||||
return b.bs.View(cid, f)
|
return b.bs.View(ctx, cid, f)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *discardstore) Put(blk blocks.Block) error {
|
func (b *discardstore) Put(ctx context.Context, blk blocks.Block) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *discardstore) PutMany(blks []blocks.Block) error {
|
func (b *discardstore) PutMany(ctx context.Context, blks []blocks.Block) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *discardstore) DeleteBlock(cid cid.Cid) error {
|
func (b *discardstore) DeleteBlock(ctx context.Context, cid cid.Cid) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *discardstore) DeleteMany(cids []cid.Cid) error {
|
func (b *discardstore) DeleteMany(ctx context.Context, cids []cid.Cid) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -71,14 +71,14 @@ func (fbs *FallbackStore) getFallback(c cid.Cid) (blocks.Block, error) {
|
|||||||
// chain bitswap puts blocks in temp blockstore which is cleaned up
|
// chain bitswap puts blocks in temp blockstore which is cleaned up
|
||||||
// every few min (to drop any messages we fetched but don't want)
|
// every few min (to drop any messages we fetched but don't want)
|
||||||
// in this case we want to keep this block around
|
// in this case we want to keep this block around
|
||||||
if err := fbs.Put(b); err != nil {
|
if err := fbs.Put(ctx, b); err != nil {
|
||||||
return nil, xerrors.Errorf("persisting fallback-fetched block: %w", err)
|
return nil, xerrors.Errorf("persisting fallback-fetched block: %w", err)
|
||||||
}
|
}
|
||||||
return b, nil
|
return b, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fbs *FallbackStore) Get(c cid.Cid) (blocks.Block, error) {
|
func (fbs *FallbackStore) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) {
|
||||||
b, err := fbs.Blockstore.Get(c)
|
b, err := fbs.Blockstore.Get(ctx, c)
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
return b, nil
|
return b, nil
|
||||||
@ -89,8 +89,8 @@ func (fbs *FallbackStore) Get(c cid.Cid) (blocks.Block, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fbs *FallbackStore) GetSize(c cid.Cid) (int, error) {
|
func (fbs *FallbackStore) GetSize(ctx context.Context, c cid.Cid) (int, error) {
|
||||||
sz, err := fbs.Blockstore.GetSize(c)
|
sz, err := fbs.Blockstore.GetSize(ctx, c)
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
return sz, nil
|
return sz, nil
|
||||||
|
@ -38,7 +38,7 @@ func decodeCid(cid cid.Cid) (inline bool, data []byte, err error) {
|
|||||||
return false, nil, err
|
return false, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *idstore) Has(cid cid.Cid) (bool, error) {
|
func (b *idstore) Has(ctx context.Context, cid cid.Cid) (bool, error) {
|
||||||
inline, _, err := decodeCid(cid)
|
inline, _, err := decodeCid(cid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, xerrors.Errorf("error decoding Cid: %w", err)
|
return false, xerrors.Errorf("error decoding Cid: %w", err)
|
||||||
@ -48,10 +48,10 @@ func (b *idstore) Has(cid cid.Cid) (bool, error) {
|
|||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return b.bs.Has(cid)
|
return b.bs.Has(ctx, cid)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *idstore) Get(cid cid.Cid) (blocks.Block, error) {
|
func (b *idstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) {
|
||||||
inline, data, err := decodeCid(cid)
|
inline, data, err := decodeCid(cid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("error decoding Cid: %w", err)
|
return nil, xerrors.Errorf("error decoding Cid: %w", err)
|
||||||
@ -61,10 +61,10 @@ func (b *idstore) Get(cid cid.Cid) (blocks.Block, error) {
|
|||||||
return blocks.NewBlockWithCid(data, cid)
|
return blocks.NewBlockWithCid(data, cid)
|
||||||
}
|
}
|
||||||
|
|
||||||
return b.bs.Get(cid)
|
return b.bs.Get(ctx, cid)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *idstore) GetSize(cid cid.Cid) (int, error) {
|
func (b *idstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) {
|
||||||
inline, data, err := decodeCid(cid)
|
inline, data, err := decodeCid(cid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, xerrors.Errorf("error decoding Cid: %w", err)
|
return 0, xerrors.Errorf("error decoding Cid: %w", err)
|
||||||
@ -74,10 +74,10 @@ func (b *idstore) GetSize(cid cid.Cid) (int, error) {
|
|||||||
return len(data), err
|
return len(data), err
|
||||||
}
|
}
|
||||||
|
|
||||||
return b.bs.GetSize(cid)
|
return b.bs.GetSize(ctx, cid)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *idstore) View(cid cid.Cid, cb func([]byte) error) error {
|
func (b *idstore) View(ctx context.Context, cid cid.Cid, cb func([]byte) error) error {
|
||||||
inline, data, err := decodeCid(cid)
|
inline, data, err := decodeCid(cid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("error decoding Cid: %w", err)
|
return xerrors.Errorf("error decoding Cid: %w", err)
|
||||||
@ -87,10 +87,10 @@ func (b *idstore) View(cid cid.Cid, cb func([]byte) error) error {
|
|||||||
return cb(data)
|
return cb(data)
|
||||||
}
|
}
|
||||||
|
|
||||||
return b.bs.View(cid, cb)
|
return b.bs.View(ctx, cid, cb)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *idstore) Put(blk blocks.Block) error {
|
func (b *idstore) Put(ctx context.Context, blk blocks.Block) error {
|
||||||
inline, _, err := decodeCid(blk.Cid())
|
inline, _, err := decodeCid(blk.Cid())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("error decoding Cid: %w", err)
|
return xerrors.Errorf("error decoding Cid: %w", err)
|
||||||
@ -100,10 +100,10 @@ func (b *idstore) Put(blk blocks.Block) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return b.bs.Put(blk)
|
return b.bs.Put(ctx, blk)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *idstore) PutMany(blks []blocks.Block) error {
|
func (b *idstore) PutMany(ctx context.Context, blks []blocks.Block) error {
|
||||||
toPut := make([]blocks.Block, 0, len(blks))
|
toPut := make([]blocks.Block, 0, len(blks))
|
||||||
for _, blk := range blks {
|
for _, blk := range blks {
|
||||||
inline, _, err := decodeCid(blk.Cid())
|
inline, _, err := decodeCid(blk.Cid())
|
||||||
@ -118,13 +118,13 @@ func (b *idstore) PutMany(blks []blocks.Block) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(toPut) > 0 {
|
if len(toPut) > 0 {
|
||||||
return b.bs.PutMany(toPut)
|
return b.bs.PutMany(ctx, toPut)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *idstore) DeleteBlock(cid cid.Cid) error {
|
func (b *idstore) DeleteBlock(ctx context.Context, cid cid.Cid) error {
|
||||||
inline, _, err := decodeCid(cid)
|
inline, _, err := decodeCid(cid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("error decoding Cid: %w", err)
|
return xerrors.Errorf("error decoding Cid: %w", err)
|
||||||
@ -134,10 +134,10 @@ func (b *idstore) DeleteBlock(cid cid.Cid) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return b.bs.DeleteBlock(cid)
|
return b.bs.DeleteBlock(ctx, cid)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *idstore) DeleteMany(cids []cid.Cid) error {
|
func (b *idstore) DeleteMany(ctx context.Context, cids []cid.Cid) error {
|
||||||
toDelete := make([]cid.Cid, 0, len(cids))
|
toDelete := make([]cid.Cid, 0, len(cids))
|
||||||
for _, cid := range cids {
|
for _, cid := range cids {
|
||||||
inline, _, err := decodeCid(cid)
|
inline, _, err := decodeCid(cid)
|
||||||
@ -152,7 +152,7 @@ func (b *idstore) DeleteMany(cids []cid.Cid) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(toDelete) > 0 {
|
if len(toDelete) > 0 {
|
||||||
return b.bs.DeleteMany(toDelete)
|
return b.bs.DeleteMany(ctx, toDelete)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -79,12 +79,12 @@ func NewRemoteIPFSBlockstore(ctx context.Context, maddr multiaddr.Multiaddr, onl
|
|||||||
return Adapt(bs), nil
|
return Adapt(bs), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *IPFSBlockstore) DeleteBlock(cid cid.Cid) error {
|
func (i *IPFSBlockstore) DeleteBlock(ctx context.Context, cid cid.Cid) error {
|
||||||
return xerrors.Errorf("not supported")
|
return xerrors.Errorf("not supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *IPFSBlockstore) Has(cid cid.Cid) (bool, error) {
|
func (i *IPFSBlockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) {
|
||||||
_, err := i.offlineAPI.Block().Stat(i.ctx, path.IpldPath(cid))
|
_, err := i.offlineAPI.Block().Stat(ctx, path.IpldPath(cid))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// The underlying client is running in Offline mode.
|
// The underlying client is running in Offline mode.
|
||||||
// Stat() will fail with an err if the block isn't in the
|
// Stat() will fail with an err if the block isn't in the
|
||||||
@ -99,8 +99,8 @@ func (i *IPFSBlockstore) Has(cid cid.Cid) (bool, error) {
|
|||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *IPFSBlockstore) Get(cid cid.Cid) (blocks.Block, error) {
|
func (i *IPFSBlockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) {
|
||||||
rd, err := i.api.Block().Get(i.ctx, path.IpldPath(cid))
|
rd, err := i.api.Block().Get(ctx, path.IpldPath(cid))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("getting ipfs block: %w", err)
|
return nil, xerrors.Errorf("getting ipfs block: %w", err)
|
||||||
}
|
}
|
||||||
@ -113,8 +113,8 @@ func (i *IPFSBlockstore) Get(cid cid.Cid) (blocks.Block, error) {
|
|||||||
return blocks.NewBlockWithCid(data, cid)
|
return blocks.NewBlockWithCid(data, cid)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *IPFSBlockstore) GetSize(cid cid.Cid) (int, error) {
|
func (i *IPFSBlockstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) {
|
||||||
st, err := i.api.Block().Stat(i.ctx, path.IpldPath(cid))
|
st, err := i.api.Block().Stat(ctx, path.IpldPath(cid))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, xerrors.Errorf("getting ipfs block: %w", err)
|
return 0, xerrors.Errorf("getting ipfs block: %w", err)
|
||||||
}
|
}
|
||||||
@ -122,23 +122,23 @@ func (i *IPFSBlockstore) GetSize(cid cid.Cid) (int, error) {
|
|||||||
return st.Size(), nil
|
return st.Size(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *IPFSBlockstore) Put(block blocks.Block) error {
|
func (i *IPFSBlockstore) Put(ctx context.Context, block blocks.Block) error {
|
||||||
mhd, err := multihash.Decode(block.Cid().Hash())
|
mhd, err := multihash.Decode(block.Cid().Hash())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = i.api.Block().Put(i.ctx, bytes.NewReader(block.RawData()),
|
_, err = i.api.Block().Put(ctx, bytes.NewReader(block.RawData()),
|
||||||
options.Block.Hash(mhd.Code, mhd.Length),
|
options.Block.Hash(mhd.Code, mhd.Length),
|
||||||
options.Block.Format(cid.CodecToStr[block.Cid().Type()]))
|
options.Block.Format(cid.CodecToStr[block.Cid().Type()]))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *IPFSBlockstore) PutMany(blocks []blocks.Block) error {
|
func (i *IPFSBlockstore) PutMany(ctx context.Context, blocks []blocks.Block) error {
|
||||||
// TODO: could be done in parallel
|
// TODO: could be done in parallel
|
||||||
|
|
||||||
for _, block := range blocks {
|
for _, block := range blocks {
|
||||||
if err := i.Put(block); err != nil {
|
if err := i.Put(ctx, block); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -15,24 +15,24 @@ func NewMemory() MemBlockstore {
|
|||||||
// MemBlockstore is a terminal blockstore that keeps blocks in memory.
|
// MemBlockstore is a terminal blockstore that keeps blocks in memory.
|
||||||
type MemBlockstore map[cid.Cid]blocks.Block
|
type MemBlockstore map[cid.Cid]blocks.Block
|
||||||
|
|
||||||
func (m MemBlockstore) DeleteBlock(k cid.Cid) error {
|
func (m MemBlockstore) DeleteBlock(ctx context.Context, k cid.Cid) error {
|
||||||
delete(m, k)
|
delete(m, k)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m MemBlockstore) DeleteMany(ks []cid.Cid) error {
|
func (m MemBlockstore) DeleteMany(ctx context.Context, ks []cid.Cid) error {
|
||||||
for _, k := range ks {
|
for _, k := range ks {
|
||||||
delete(m, k)
|
delete(m, k)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m MemBlockstore) Has(k cid.Cid) (bool, error) {
|
func (m MemBlockstore) Has(ctx context.Context, k cid.Cid) (bool, error) {
|
||||||
_, ok := m[k]
|
_, ok := m[k]
|
||||||
return ok, nil
|
return ok, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m MemBlockstore) View(k cid.Cid, callback func([]byte) error) error {
|
func (m MemBlockstore) View(ctx context.Context, k cid.Cid, callback func([]byte) error) error {
|
||||||
b, ok := m[k]
|
b, ok := m[k]
|
||||||
if !ok {
|
if !ok {
|
||||||
return ErrNotFound
|
return ErrNotFound
|
||||||
@ -40,7 +40,7 @@ func (m MemBlockstore) View(k cid.Cid, callback func([]byte) error) error {
|
|||||||
return callback(b.RawData())
|
return callback(b.RawData())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m MemBlockstore) Get(k cid.Cid) (blocks.Block, error) {
|
func (m MemBlockstore) Get(ctx context.Context, k cid.Cid) (blocks.Block, error) {
|
||||||
b, ok := m[k]
|
b, ok := m[k]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, ErrNotFound
|
return nil, ErrNotFound
|
||||||
@ -49,7 +49,7 @@ func (m MemBlockstore) Get(k cid.Cid) (blocks.Block, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetSize returns the CIDs mapped BlockSize
|
// GetSize returns the CIDs mapped BlockSize
|
||||||
func (m MemBlockstore) GetSize(k cid.Cid) (int, error) {
|
func (m MemBlockstore) GetSize(ctx context.Context, k cid.Cid) (int, error) {
|
||||||
b, ok := m[k]
|
b, ok := m[k]
|
||||||
if !ok {
|
if !ok {
|
||||||
return 0, ErrNotFound
|
return 0, ErrNotFound
|
||||||
@ -58,7 +58,7 @@ func (m MemBlockstore) GetSize(k cid.Cid) (int, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Put puts a given block to the underlying datastore
|
// Put puts a given block to the underlying datastore
|
||||||
func (m MemBlockstore) Put(b blocks.Block) error {
|
func (m MemBlockstore) Put(ctx context.Context, b blocks.Block) error {
|
||||||
// Convert to a basic block for safety, but try to reuse the existing
|
// Convert to a basic block for safety, but try to reuse the existing
|
||||||
// block if it's already a basic block.
|
// block if it's already a basic block.
|
||||||
k := b.Cid()
|
k := b.Cid()
|
||||||
@ -76,9 +76,9 @@ func (m MemBlockstore) Put(b blocks.Block) error {
|
|||||||
|
|
||||||
// PutMany puts a slice of blocks at the same time using batching
|
// PutMany puts a slice of blocks at the same time using batching
|
||||||
// capabilities of the underlying datastore whenever possible.
|
// capabilities of the underlying datastore whenever possible.
|
||||||
func (m MemBlockstore) PutMany(bs []blocks.Block) error {
|
func (m MemBlockstore) PutMany(ctx context.Context, bs []blocks.Block) error {
|
||||||
for _, b := range bs {
|
for _, b := range bs {
|
||||||
_ = m.Put(b) // can't fail
|
_ = m.Put(ctx, b) // can't fail
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -18,6 +18,8 @@ import (
|
|||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
bstore "github.com/filecoin-project/lotus/blockstore"
|
bstore "github.com/filecoin-project/lotus/blockstore"
|
||||||
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/metrics"
|
"github.com/filecoin-project/lotus/metrics"
|
||||||
|
|
||||||
@ -47,6 +49,9 @@ var (
|
|||||||
enableDebugLog = false
|
enableDebugLog = false
|
||||||
// set this to true if you want to track origin stack traces in the write log
|
// set this to true if you want to track origin stack traces in the write log
|
||||||
enableDebugLogWriteTraces = false
|
enableDebugLogWriteTraces = false
|
||||||
|
|
||||||
|
// upgradeBoundary is the boundary before and after an upgrade where we suppress compaction
|
||||||
|
upgradeBoundary = build.Finality
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -98,6 +103,12 @@ type ChainAccessor interface {
|
|||||||
SubscribeHeadChanges(change func(revert []*types.TipSet, apply []*types.TipSet) error)
|
SubscribeHeadChanges(change func(revert []*types.TipSet, apply []*types.TipSet) error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// upgradeRange is a precomputed epoch range during which we shouldn't compact so as to not
|
||||||
|
// interfere with an upgrade
|
||||||
|
type upgradeRange struct {
|
||||||
|
start, end abi.ChainEpoch
|
||||||
|
}
|
||||||
|
|
||||||
// hotstore is the interface that must be satisfied by the hot blockstore; it is an extension
|
// hotstore is the interface that must be satisfied by the hot blockstore; it is an extension
|
||||||
// of the Blockstore interface with the traits we need for compaction.
|
// of the Blockstore interface with the traits we need for compaction.
|
||||||
type hotstore interface {
|
type hotstore interface {
|
||||||
@ -125,6 +136,8 @@ type SplitStore struct {
|
|||||||
cold bstore.Blockstore
|
cold bstore.Blockstore
|
||||||
hot hotstore
|
hot hotstore
|
||||||
|
|
||||||
|
upgrades []upgradeRange
|
||||||
|
|
||||||
markSetEnv MarkSetEnv
|
markSetEnv MarkSetEnv
|
||||||
markSetSize int64
|
markSetSize int64
|
||||||
|
|
||||||
@ -203,17 +216,17 @@ func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Co
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Blockstore interface
|
// Blockstore interface
|
||||||
func (s *SplitStore) DeleteBlock(_ cid.Cid) error {
|
func (s *SplitStore) DeleteBlock(_ context.Context, _ cid.Cid) error {
|
||||||
// afaict we don't seem to be using this method, so it's not implemented
|
// afaict we don't seem to be using this method, so it's not implemented
|
||||||
return errors.New("DeleteBlock not implemented on SplitStore; don't do this Luke!") //nolint
|
return errors.New("DeleteBlock not implemented on SplitStore; don't do this Luke!") //nolint
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SplitStore) DeleteMany(_ []cid.Cid) error {
|
func (s *SplitStore) DeleteMany(_ context.Context, _ []cid.Cid) error {
|
||||||
// afaict we don't seem to be using this method, so it's not implemented
|
// afaict we don't seem to be using this method, so it's not implemented
|
||||||
return errors.New("DeleteMany not implemented on SplitStore; don't do this Luke!") //nolint
|
return errors.New("DeleteMany not implemented on SplitStore; don't do this Luke!") //nolint
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SplitStore) Has(cid cid.Cid) (bool, error) {
|
func (s *SplitStore) Has(ctx context.Context, cid cid.Cid) (bool, error) {
|
||||||
if isIdentiyCid(cid) {
|
if isIdentiyCid(cid) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
@ -221,7 +234,7 @@ func (s *SplitStore) Has(cid cid.Cid) (bool, error) {
|
|||||||
s.txnLk.RLock()
|
s.txnLk.RLock()
|
||||||
defer s.txnLk.RUnlock()
|
defer s.txnLk.RUnlock()
|
||||||
|
|
||||||
has, err := s.hot.Has(cid)
|
has, err := s.hot.Has(ctx, cid)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return has, err
|
return has, err
|
||||||
@ -232,10 +245,10 @@ func (s *SplitStore) Has(cid cid.Cid) (bool, error) {
|
|||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return s.cold.Has(cid)
|
return s.cold.Has(ctx, cid)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SplitStore) Get(cid cid.Cid) (blocks.Block, error) {
|
func (s *SplitStore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) {
|
||||||
if isIdentiyCid(cid) {
|
if isIdentiyCid(cid) {
|
||||||
data, err := decodeIdentityCid(cid)
|
data, err := decodeIdentityCid(cid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -248,7 +261,7 @@ func (s *SplitStore) Get(cid cid.Cid) (blocks.Block, error) {
|
|||||||
s.txnLk.RLock()
|
s.txnLk.RLock()
|
||||||
defer s.txnLk.RUnlock()
|
defer s.txnLk.RUnlock()
|
||||||
|
|
||||||
blk, err := s.hot.Get(cid)
|
blk, err := s.hot.Get(ctx, cid)
|
||||||
|
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
@ -260,7 +273,7 @@ func (s *SplitStore) Get(cid cid.Cid) (blocks.Block, error) {
|
|||||||
s.debug.LogReadMiss(cid)
|
s.debug.LogReadMiss(cid)
|
||||||
}
|
}
|
||||||
|
|
||||||
blk, err = s.cold.Get(cid)
|
blk, err = s.cold.Get(ctx, cid)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
stats.Record(s.ctx, metrics.SplitstoreMiss.M(1))
|
stats.Record(s.ctx, metrics.SplitstoreMiss.M(1))
|
||||||
|
|
||||||
@ -272,7 +285,7 @@ func (s *SplitStore) Get(cid cid.Cid) (blocks.Block, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SplitStore) GetSize(cid cid.Cid) (int, error) {
|
func (s *SplitStore) GetSize(ctx context.Context, cid cid.Cid) (int, error) {
|
||||||
if isIdentiyCid(cid) {
|
if isIdentiyCid(cid) {
|
||||||
data, err := decodeIdentityCid(cid)
|
data, err := decodeIdentityCid(cid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -285,7 +298,7 @@ func (s *SplitStore) GetSize(cid cid.Cid) (int, error) {
|
|||||||
s.txnLk.RLock()
|
s.txnLk.RLock()
|
||||||
defer s.txnLk.RUnlock()
|
defer s.txnLk.RUnlock()
|
||||||
|
|
||||||
size, err := s.hot.GetSize(cid)
|
size, err := s.hot.GetSize(ctx, cid)
|
||||||
|
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
@ -297,7 +310,7 @@ func (s *SplitStore) GetSize(cid cid.Cid) (int, error) {
|
|||||||
s.debug.LogReadMiss(cid)
|
s.debug.LogReadMiss(cid)
|
||||||
}
|
}
|
||||||
|
|
||||||
size, err = s.cold.GetSize(cid)
|
size, err = s.cold.GetSize(ctx, cid)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
stats.Record(s.ctx, metrics.SplitstoreMiss.M(1))
|
stats.Record(s.ctx, metrics.SplitstoreMiss.M(1))
|
||||||
}
|
}
|
||||||
@ -308,7 +321,7 @@ func (s *SplitStore) GetSize(cid cid.Cid) (int, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SplitStore) Put(blk blocks.Block) error {
|
func (s *SplitStore) Put(ctx context.Context, blk blocks.Block) error {
|
||||||
if isIdentiyCid(blk.Cid()) {
|
if isIdentiyCid(blk.Cid()) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -316,7 +329,7 @@ func (s *SplitStore) Put(blk blocks.Block) error {
|
|||||||
s.txnLk.RLock()
|
s.txnLk.RLock()
|
||||||
defer s.txnLk.RUnlock()
|
defer s.txnLk.RUnlock()
|
||||||
|
|
||||||
err := s.hot.Put(blk)
|
err := s.hot.Put(ctx, blk)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -327,7 +340,7 @@ func (s *SplitStore) Put(blk blocks.Block) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SplitStore) PutMany(blks []blocks.Block) error {
|
func (s *SplitStore) PutMany(ctx context.Context, blks []blocks.Block) error {
|
||||||
// filter identites
|
// filter identites
|
||||||
idcids := 0
|
idcids := 0
|
||||||
for _, blk := range blks {
|
for _, blk := range blks {
|
||||||
@ -361,7 +374,7 @@ func (s *SplitStore) PutMany(blks []blocks.Block) error {
|
|||||||
s.txnLk.RLock()
|
s.txnLk.RLock()
|
||||||
defer s.txnLk.RUnlock()
|
defer s.txnLk.RUnlock()
|
||||||
|
|
||||||
err := s.hot.PutMany(blks)
|
err := s.hot.PutMany(ctx, blks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -417,7 +430,7 @@ func (s *SplitStore) HashOnRead(enabled bool) {
|
|||||||
s.cold.HashOnRead(enabled)
|
s.cold.HashOnRead(enabled)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SplitStore) View(cid cid.Cid, cb func([]byte) error) error {
|
func (s *SplitStore) View(ctx context.Context, cid cid.Cid, cb func([]byte) error) error {
|
||||||
if isIdentiyCid(cid) {
|
if isIdentiyCid(cid) {
|
||||||
data, err := decodeIdentityCid(cid)
|
data, err := decodeIdentityCid(cid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -438,14 +451,14 @@ func (s *SplitStore) View(cid cid.Cid, cb func([]byte) error) error {
|
|||||||
s.protectView(cid)
|
s.protectView(cid)
|
||||||
defer s.viewDone()
|
defer s.viewDone()
|
||||||
|
|
||||||
err := s.hot.View(cid, cb)
|
err := s.hot.View(ctx, cid, cb)
|
||||||
switch err {
|
switch err {
|
||||||
case bstore.ErrNotFound:
|
case bstore.ErrNotFound:
|
||||||
if s.isWarm() {
|
if s.isWarm() {
|
||||||
s.debug.LogReadMiss(cid)
|
s.debug.LogReadMiss(cid)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = s.cold.View(cid, cb)
|
err = s.cold.View(ctx, cid, cb)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
stats.Record(s.ctx, metrics.SplitstoreMiss.M(1))
|
stats.Record(s.ctx, metrics.SplitstoreMiss.M(1))
|
||||||
}
|
}
|
||||||
@ -463,16 +476,33 @@ func (s *SplitStore) isWarm() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// State tracking
|
// State tracking
|
||||||
func (s *SplitStore) Start(chain ChainAccessor) error {
|
func (s *SplitStore) Start(chain ChainAccessor, us stmgr.UpgradeSchedule) error {
|
||||||
s.chain = chain
|
s.chain = chain
|
||||||
curTs := chain.GetHeaviestTipSet()
|
curTs := chain.GetHeaviestTipSet()
|
||||||
|
|
||||||
|
// precompute the upgrade boundaries
|
||||||
|
s.upgrades = make([]upgradeRange, 0, len(us))
|
||||||
|
for _, upgrade := range us {
|
||||||
|
boundary := upgrade.Height
|
||||||
|
for _, pre := range upgrade.PreMigrations {
|
||||||
|
preMigrationBoundary := upgrade.Height - pre.StartWithin
|
||||||
|
if preMigrationBoundary < boundary {
|
||||||
|
boundary = preMigrationBoundary
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
upgradeStart := boundary - upgradeBoundary
|
||||||
|
upgradeEnd := upgrade.Height + upgradeBoundary
|
||||||
|
|
||||||
|
s.upgrades = append(s.upgrades, upgradeRange{start: upgradeStart, end: upgradeEnd})
|
||||||
|
}
|
||||||
|
|
||||||
// should we warmup
|
// should we warmup
|
||||||
warmup := false
|
warmup := false
|
||||||
|
|
||||||
// load base epoch from metadata ds
|
// load base epoch from metadata ds
|
||||||
// if none, then use current epoch because it's a fresh start
|
// if none, then use current epoch because it's a fresh start
|
||||||
bs, err := s.ds.Get(baseEpochKey)
|
bs, err := s.ds.Get(s.ctx, baseEpochKey)
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
s.baseEpoch = bytesToEpoch(bs)
|
s.baseEpoch = bytesToEpoch(bs)
|
||||||
@ -493,7 +523,7 @@ func (s *SplitStore) Start(chain ChainAccessor) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// load warmup epoch from metadata ds
|
// load warmup epoch from metadata ds
|
||||||
bs, err = s.ds.Get(warmupEpochKey)
|
bs, err = s.ds.Get(s.ctx, warmupEpochKey)
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
s.warmupEpoch = bytesToEpoch(bs)
|
s.warmupEpoch = bytesToEpoch(bs)
|
||||||
@ -506,7 +536,7 @@ func (s *SplitStore) Start(chain ChainAccessor) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// load markSetSize from metadata ds to provide a size hint for marksets
|
// load markSetSize from metadata ds to provide a size hint for marksets
|
||||||
bs, err = s.ds.Get(markSetSizeKey)
|
bs, err = s.ds.Get(s.ctx, markSetSizeKey)
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
s.markSetSize = bytesToInt64(bs)
|
s.markSetSize = bytesToInt64(bs)
|
||||||
@ -517,7 +547,7 @@ func (s *SplitStore) Start(chain ChainAccessor) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// load compactionIndex from metadata ds to provide a hint as to when to perform moving gc
|
// load compactionIndex from metadata ds to provide a hint as to when to perform moving gc
|
||||||
bs, err = s.ds.Get(compactionIndexKey)
|
bs, err = s.ds.Get(s.ctx, compactionIndexKey)
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
s.compactionIndex = bytesToInt64(bs)
|
s.compactionIndex = bytesToInt64(bs)
|
||||||
@ -579,5 +609,5 @@ func (s *SplitStore) checkClosing() error {
|
|||||||
|
|
||||||
func (s *SplitStore) setBaseEpoch(epoch abi.ChainEpoch) error {
|
func (s *SplitStore) setBaseEpoch(epoch abi.ChainEpoch) error {
|
||||||
s.baseEpoch = epoch
|
s.baseEpoch = epoch
|
||||||
return s.ds.Put(baseEpochKey, epochToBytes(epoch))
|
return s.ds.Put(s.ctx, baseEpochKey, epochToBytes(epoch))
|
||||||
}
|
}
|
||||||
|
@ -96,7 +96,7 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error {
|
|||||||
return errStopWalk
|
return errStopWalk
|
||||||
}
|
}
|
||||||
|
|
||||||
has, err := s.hot.Has(c)
|
has, err := s.hot.Has(s.ctx, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("error checking hotstore: %w", err)
|
return xerrors.Errorf("error checking hotstore: %w", err)
|
||||||
}
|
}
|
||||||
@ -105,7 +105,7 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
has, err = s.cold.Has(c)
|
has, err = s.cold.Has(s.ctx, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("error checking coldstore: %w", err)
|
return xerrors.Errorf("error checking coldstore: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -99,6 +99,12 @@ func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if s.isNearUpgrade(epoch) {
|
||||||
|
// we are near an upgrade epoch, suppress compaction
|
||||||
|
atomic.StoreInt32(&s.compacting, 0)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
if epoch-s.baseEpoch > CompactionThreshold {
|
if epoch-s.baseEpoch > CompactionThreshold {
|
||||||
// it's time to compact -- prepare the transaction and go!
|
// it's time to compact -- prepare the transaction and go!
|
||||||
s.beginTxnProtect()
|
s.beginTxnProtect()
|
||||||
@ -121,6 +127,16 @@ func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *SplitStore) isNearUpgrade(epoch abi.ChainEpoch) bool {
|
||||||
|
for _, upgrade := range s.upgrades {
|
||||||
|
if epoch >= upgrade.start && epoch <= upgrade.end {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// transactionally protect incoming tipsets
|
// transactionally protect incoming tipsets
|
||||||
func (s *SplitStore) protectTipSets(apply []*types.TipSet) {
|
func (s *SplitStore) protectTipSets(apply []*types.TipSet) {
|
||||||
s.txnLk.RLock()
|
s.txnLk.RLock()
|
||||||
@ -561,13 +577,13 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
|||||||
return xerrors.Errorf("error saving base epoch: %w", err)
|
return xerrors.Errorf("error saving base epoch: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = s.ds.Put(markSetSizeKey, int64ToBytes(s.markSetSize))
|
err = s.ds.Put(s.ctx, markSetSizeKey, int64ToBytes(s.markSetSize))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("error saving mark set size: %w", err)
|
return xerrors.Errorf("error saving mark set size: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.compactionIndex++
|
s.compactionIndex++
|
||||||
err = s.ds.Put(compactionIndexKey, int64ToBytes(s.compactionIndex))
|
err = s.ds.Put(s.ctx, compactionIndexKey, int64ToBytes(s.compactionIndex))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("error saving compaction index: %w", err)
|
return xerrors.Errorf("error saving compaction index: %w", err)
|
||||||
}
|
}
|
||||||
@ -819,10 +835,10 @@ func (s *SplitStore) view(c cid.Cid, cb func([]byte) error) error {
|
|||||||
return cb(data)
|
return cb(data)
|
||||||
}
|
}
|
||||||
|
|
||||||
err := s.hot.View(c, cb)
|
err := s.hot.View(s.ctx, c, cb)
|
||||||
switch err {
|
switch err {
|
||||||
case bstore.ErrNotFound:
|
case bstore.ErrNotFound:
|
||||||
return s.cold.View(c, cb)
|
return s.cold.View(s.ctx, c, cb)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return err
|
return err
|
||||||
@ -834,13 +850,13 @@ func (s *SplitStore) has(c cid.Cid) (bool, error) {
|
|||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
has, err := s.hot.Has(c)
|
has, err := s.hot.Has(s.ctx, c)
|
||||||
|
|
||||||
if has || err != nil {
|
if has || err != nil {
|
||||||
return has, err
|
return has, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return s.cold.Has(c)
|
return s.cold.Has(s.ctx, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error {
|
func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error {
|
||||||
@ -851,7 +867,7 @@ func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
blk, err := s.hot.Get(c)
|
blk, err := s.hot.Get(s.ctx, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == bstore.ErrNotFound {
|
if err == bstore.ErrNotFound {
|
||||||
log.Warnf("hotstore missing block %s", c)
|
log.Warnf("hotstore missing block %s", c)
|
||||||
@ -863,7 +879,7 @@ func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error {
|
|||||||
|
|
||||||
batch = append(batch, blk)
|
batch = append(batch, blk)
|
||||||
if len(batch) == batchSize {
|
if len(batch) == batchSize {
|
||||||
err = s.cold.PutMany(batch)
|
err = s.cold.PutMany(s.ctx, batch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("error putting batch to coldstore: %w", err)
|
return xerrors.Errorf("error putting batch to coldstore: %w", err)
|
||||||
}
|
}
|
||||||
@ -872,7 +888,7 @@ func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(batch) > 0 {
|
if len(batch) > 0 {
|
||||||
err := s.cold.PutMany(batch)
|
err := s.cold.PutMany(s.ctx, batch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("error putting batch to coldstore: %w", err)
|
return xerrors.Errorf("error putting batch to coldstore: %w", err)
|
||||||
}
|
}
|
||||||
@ -1042,7 +1058,7 @@ func (s *SplitStore) purge(cids []cid.Cid, markSet MarkSetVisitor) error {
|
|||||||
deadCids = append(deadCids, c)
|
deadCids = append(deadCids, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
err := s.hot.DeleteMany(deadCids)
|
err := s.hot.DeleteMany(s.ctx, deadCids)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("error purging cold objects: %w", err)
|
return xerrors.Errorf("error purging cold objects: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -20,28 +20,28 @@ func (s *SplitStore) Expose() bstore.Blockstore {
|
|||||||
return &exposedSplitStore{s: s}
|
return &exposedSplitStore{s: s}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (es *exposedSplitStore) DeleteBlock(_ cid.Cid) error {
|
func (es *exposedSplitStore) DeleteBlock(_ context.Context, _ cid.Cid) error {
|
||||||
return errors.New("DeleteBlock: operation not supported")
|
return errors.New("DeleteBlock: operation not supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (es *exposedSplitStore) DeleteMany(_ []cid.Cid) error {
|
func (es *exposedSplitStore) DeleteMany(_ context.Context, _ []cid.Cid) error {
|
||||||
return errors.New("DeleteMany: operation not supported")
|
return errors.New("DeleteMany: operation not supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (es *exposedSplitStore) Has(c cid.Cid) (bool, error) {
|
func (es *exposedSplitStore) Has(ctx context.Context, c cid.Cid) (bool, error) {
|
||||||
if isIdentiyCid(c) {
|
if isIdentiyCid(c) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
has, err := es.s.hot.Has(c)
|
has, err := es.s.hot.Has(ctx, c)
|
||||||
if has || err != nil {
|
if has || err != nil {
|
||||||
return has, err
|
return has, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return es.s.cold.Has(c)
|
return es.s.cold.Has(ctx, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (es *exposedSplitStore) Get(c cid.Cid) (blocks.Block, error) {
|
func (es *exposedSplitStore) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) {
|
||||||
if isIdentiyCid(c) {
|
if isIdentiyCid(c) {
|
||||||
data, err := decodeIdentityCid(c)
|
data, err := decodeIdentityCid(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -51,16 +51,16 @@ func (es *exposedSplitStore) Get(c cid.Cid) (blocks.Block, error) {
|
|||||||
return blocks.NewBlockWithCid(data, c)
|
return blocks.NewBlockWithCid(data, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
blk, err := es.s.hot.Get(c)
|
blk, err := es.s.hot.Get(ctx, c)
|
||||||
switch err {
|
switch err {
|
||||||
case bstore.ErrNotFound:
|
case bstore.ErrNotFound:
|
||||||
return es.s.cold.Get(c)
|
return es.s.cold.Get(ctx, c)
|
||||||
default:
|
default:
|
||||||
return blk, err
|
return blk, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (es *exposedSplitStore) GetSize(c cid.Cid) (int, error) {
|
func (es *exposedSplitStore) GetSize(ctx context.Context, c cid.Cid) (int, error) {
|
||||||
if isIdentiyCid(c) {
|
if isIdentiyCid(c) {
|
||||||
data, err := decodeIdentityCid(c)
|
data, err := decodeIdentityCid(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -70,21 +70,21 @@ func (es *exposedSplitStore) GetSize(c cid.Cid) (int, error) {
|
|||||||
return len(data), nil
|
return len(data), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
size, err := es.s.hot.GetSize(c)
|
size, err := es.s.hot.GetSize(ctx, c)
|
||||||
switch err {
|
switch err {
|
||||||
case bstore.ErrNotFound:
|
case bstore.ErrNotFound:
|
||||||
return es.s.cold.GetSize(c)
|
return es.s.cold.GetSize(ctx, c)
|
||||||
default:
|
default:
|
||||||
return size, err
|
return size, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (es *exposedSplitStore) Put(blk blocks.Block) error {
|
func (es *exposedSplitStore) Put(ctx context.Context, blk blocks.Block) error {
|
||||||
return es.s.Put(blk)
|
return es.s.Put(ctx, blk)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (es *exposedSplitStore) PutMany(blks []blocks.Block) error {
|
func (es *exposedSplitStore) PutMany(ctx context.Context, blks []blocks.Block) error {
|
||||||
return es.s.PutMany(blks)
|
return es.s.PutMany(ctx, blks)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (es *exposedSplitStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
func (es *exposedSplitStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
||||||
@ -93,7 +93,7 @@ func (es *exposedSplitStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, e
|
|||||||
|
|
||||||
func (es *exposedSplitStore) HashOnRead(enabled bool) {}
|
func (es *exposedSplitStore) HashOnRead(enabled bool) {}
|
||||||
|
|
||||||
func (es *exposedSplitStore) View(c cid.Cid, f func([]byte) error) error {
|
func (es *exposedSplitStore) View(ctx context.Context, c cid.Cid, f func([]byte) error) error {
|
||||||
if isIdentiyCid(c) {
|
if isIdentiyCid(c) {
|
||||||
data, err := decodeIdentityCid(c)
|
data, err := decodeIdentityCid(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -103,10 +103,10 @@ func (es *exposedSplitStore) View(c cid.Cid, f func([]byte) error) error {
|
|||||||
return f(data)
|
return f(data)
|
||||||
}
|
}
|
||||||
|
|
||||||
err := es.s.hot.View(c, f)
|
err := es.s.hot.View(ctx, c, f)
|
||||||
switch err {
|
switch err {
|
||||||
case bstore.ErrNotFound:
|
case bstore.ErrNotFound:
|
||||||
return es.s.cold.View(c, f)
|
return es.s.cold.View(ctx, c, f)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return err
|
return err
|
||||||
|
@ -11,6 +11,7 @@ import (
|
|||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/lotus/blockstore"
|
"github.com/filecoin-project/lotus/blockstore"
|
||||||
|
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/chain/types/mock"
|
"github.com/filecoin-project/lotus/chain/types/mock"
|
||||||
|
|
||||||
@ -29,6 +30,7 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func testSplitStore(t *testing.T, cfg *Config) {
|
func testSplitStore(t *testing.T, cfg *Config) {
|
||||||
|
ctx := context.Background()
|
||||||
chain := &mockChain{t: t}
|
chain := &mockChain{t: t}
|
||||||
|
|
||||||
// the myriads of stores
|
// the myriads of stores
|
||||||
@ -38,7 +40,7 @@ func testSplitStore(t *testing.T, cfg *Config) {
|
|||||||
|
|
||||||
// this is necessary to avoid the garbage mock puts in the blocks
|
// this is necessary to avoid the garbage mock puts in the blocks
|
||||||
garbage := blocks.NewBlock([]byte{1, 2, 3})
|
garbage := blocks.NewBlock([]byte{1, 2, 3})
|
||||||
err := cold.Put(garbage)
|
err := cold.Put(ctx, garbage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -59,21 +61,21 @@ func testSplitStore(t *testing.T, cfg *Config) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = cold.Put(blk)
|
err = cold.Put(ctx, blk)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// create a garbage block that is protected with a rgistered protector
|
// create a garbage block that is protected with a rgistered protector
|
||||||
protected := blocks.NewBlock([]byte("protected!"))
|
protected := blocks.NewBlock([]byte("protected!"))
|
||||||
err = hot.Put(protected)
|
err = hot.Put(ctx, protected)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// and another one that is not protected
|
// and another one that is not protected
|
||||||
unprotected := blocks.NewBlock([]byte("unprotected!"))
|
unprotected := blocks.NewBlock([]byte("unprotected!"))
|
||||||
err = hot.Put(unprotected)
|
err = hot.Put(ctx, unprotected)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -90,7 +92,7 @@ func testSplitStore(t *testing.T, cfg *Config) {
|
|||||||
return protect(protected.Cid())
|
return protect(protected.Cid())
|
||||||
})
|
})
|
||||||
|
|
||||||
err = ss.Start(chain)
|
err = ss.Start(chain, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -108,11 +110,11 @@ func testSplitStore(t *testing.T, cfg *Config) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
err = ss.Put(stateRoot)
|
err = ss.Put(ctx, stateRoot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
err = ss.Put(sblk)
|
err = ss.Put(ctx, sblk)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -175,7 +177,7 @@ func testSplitStore(t *testing.T, cfg *Config) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ensure our protected block is still there
|
// ensure our protected block is still there
|
||||||
has, err := hot.Has(protected.Cid())
|
has, err := hot.Has(ctx, protected.Cid())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -185,7 +187,7 @@ func testSplitStore(t *testing.T, cfg *Config) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ensure our unprotected block is in the coldstore now
|
// ensure our unprotected block is in the coldstore now
|
||||||
has, err = hot.Has(unprotected.Cid())
|
has, err = hot.Has(ctx, unprotected.Cid())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -194,7 +196,7 @@ func testSplitStore(t *testing.T, cfg *Config) {
|
|||||||
t.Fatal("unprotected block is still in hotstore")
|
t.Fatal("unprotected block is still in hotstore")
|
||||||
}
|
}
|
||||||
|
|
||||||
has, err = cold.Has(unprotected.Cid())
|
has, err = cold.Has(ctx, unprotected.Cid())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -220,6 +222,141 @@ func TestSplitStoreCompactionWithBadger(t *testing.T) {
|
|||||||
testSplitStore(t, &Config{MarkSetType: "badger"})
|
testSplitStore(t, &Config{MarkSetType: "badger"})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSplitStoreSuppressCompactionNearUpgrade(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
chain := &mockChain{t: t}
|
||||||
|
|
||||||
|
// the myriads of stores
|
||||||
|
ds := dssync.MutexWrap(datastore.NewMapDatastore())
|
||||||
|
hot := newMockStore()
|
||||||
|
cold := newMockStore()
|
||||||
|
|
||||||
|
// this is necessary to avoid the garbage mock puts in the blocks
|
||||||
|
garbage := blocks.NewBlock([]byte{1, 2, 3})
|
||||||
|
err := cold.Put(ctx, garbage)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// genesis
|
||||||
|
genBlock := mock.MkBlock(nil, 0, 0)
|
||||||
|
genBlock.Messages = garbage.Cid()
|
||||||
|
genBlock.ParentMessageReceipts = garbage.Cid()
|
||||||
|
genBlock.ParentStateRoot = garbage.Cid()
|
||||||
|
genBlock.Timestamp = uint64(time.Now().Unix())
|
||||||
|
|
||||||
|
genTs := mock.TipSet(genBlock)
|
||||||
|
chain.push(genTs)
|
||||||
|
|
||||||
|
// put the genesis block to cold store
|
||||||
|
blk, err := genBlock.ToStorageBlock()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = cold.Put(ctx, blk)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// open the splitstore
|
||||||
|
ss, err := Open("", ds, hot, cold, &Config{MarkSetType: "map"})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer ss.Close() //nolint
|
||||||
|
|
||||||
|
// create an upgrade schedule that will suppress compaction during the test
|
||||||
|
upgradeBoundary = 0
|
||||||
|
upgrade := stmgr.Upgrade{
|
||||||
|
Height: 10,
|
||||||
|
PreMigrations: []stmgr.PreMigration{{StartWithin: 10}},
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ss.Start(chain, []stmgr.Upgrade{upgrade})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
mkBlock := func(curTs *types.TipSet, i int, stateRoot blocks.Block) *types.TipSet {
|
||||||
|
blk := mock.MkBlock(curTs, uint64(i), uint64(i))
|
||||||
|
|
||||||
|
blk.Messages = garbage.Cid()
|
||||||
|
blk.ParentMessageReceipts = garbage.Cid()
|
||||||
|
blk.ParentStateRoot = stateRoot.Cid()
|
||||||
|
blk.Timestamp = uint64(time.Now().Unix())
|
||||||
|
|
||||||
|
sblk, err := blk.ToStorageBlock()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
err = ss.Put(ctx, stateRoot)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
err = ss.Put(ctx, sblk)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
ts := mock.TipSet(blk)
|
||||||
|
chain.push(ts)
|
||||||
|
|
||||||
|
return ts
|
||||||
|
}
|
||||||
|
|
||||||
|
waitForCompaction := func() {
|
||||||
|
for atomic.LoadInt32(&ss.compacting) == 1 {
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
curTs := genTs
|
||||||
|
for i := 1; i < 10; i++ {
|
||||||
|
stateRoot := blocks.NewBlock([]byte{byte(i), 3, 3, 7})
|
||||||
|
curTs = mkBlock(curTs, i, stateRoot)
|
||||||
|
waitForCompaction()
|
||||||
|
}
|
||||||
|
|
||||||
|
countBlocks := func(bs blockstore.Blockstore) int {
|
||||||
|
count := 0
|
||||||
|
_ = bs.(blockstore.BlockstoreIterator).ForEachKey(func(_ cid.Cid) error {
|
||||||
|
count++
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
// we should not have compacted due to suppression and everything should still be hot
|
||||||
|
hotCnt := countBlocks(hot)
|
||||||
|
coldCnt := countBlocks(cold)
|
||||||
|
|
||||||
|
if hotCnt != 20 {
|
||||||
|
t.Errorf("expected %d blocks, but got %d", 20, hotCnt)
|
||||||
|
}
|
||||||
|
|
||||||
|
if coldCnt != 2 {
|
||||||
|
t.Errorf("expected %d blocks, but got %d", 2, coldCnt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// put some more blocks, now we should compact
|
||||||
|
for i := 10; i < 20; i++ {
|
||||||
|
stateRoot := blocks.NewBlock([]byte{byte(i), 3, 3, 7})
|
||||||
|
curTs = mkBlock(curTs, i, stateRoot)
|
||||||
|
waitForCompaction()
|
||||||
|
}
|
||||||
|
|
||||||
|
hotCnt = countBlocks(hot)
|
||||||
|
coldCnt = countBlocks(cold)
|
||||||
|
|
||||||
|
if hotCnt != 24 {
|
||||||
|
t.Errorf("expected %d blocks, but got %d", 24, hotCnt)
|
||||||
|
}
|
||||||
|
|
||||||
|
if coldCnt != 18 {
|
||||||
|
t.Errorf("expected %d blocks, but got %d", 18, coldCnt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type mockChain struct {
|
type mockChain struct {
|
||||||
t testing.TB
|
t testing.TB
|
||||||
|
|
||||||
@ -296,7 +433,7 @@ func newMockStore() *mockStore {
|
|||||||
return &mockStore{set: make(map[cid.Cid]blocks.Block)}
|
return &mockStore{set: make(map[cid.Cid]blocks.Block)}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *mockStore) Has(cid cid.Cid) (bool, error) {
|
func (b *mockStore) Has(_ context.Context, cid cid.Cid) (bool, error) {
|
||||||
b.mx.Lock()
|
b.mx.Lock()
|
||||||
defer b.mx.Unlock()
|
defer b.mx.Unlock()
|
||||||
_, ok := b.set[cid]
|
_, ok := b.set[cid]
|
||||||
@ -305,7 +442,7 @@ func (b *mockStore) Has(cid cid.Cid) (bool, error) {
|
|||||||
|
|
||||||
func (b *mockStore) HashOnRead(hor bool) {}
|
func (b *mockStore) HashOnRead(hor bool) {}
|
||||||
|
|
||||||
func (b *mockStore) Get(cid cid.Cid) (blocks.Block, error) {
|
func (b *mockStore) Get(_ context.Context, cid cid.Cid) (blocks.Block, error) {
|
||||||
b.mx.Lock()
|
b.mx.Lock()
|
||||||
defer b.mx.Unlock()
|
defer b.mx.Unlock()
|
||||||
|
|
||||||
@ -316,8 +453,8 @@ func (b *mockStore) Get(cid cid.Cid) (blocks.Block, error) {
|
|||||||
return blk, nil
|
return blk, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *mockStore) GetSize(cid cid.Cid) (int, error) {
|
func (b *mockStore) GetSize(ctx context.Context, cid cid.Cid) (int, error) {
|
||||||
blk, err := b.Get(cid)
|
blk, err := b.Get(ctx, cid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@ -325,15 +462,15 @@ func (b *mockStore) GetSize(cid cid.Cid) (int, error) {
|
|||||||
return len(blk.RawData()), nil
|
return len(blk.RawData()), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *mockStore) View(cid cid.Cid, f func([]byte) error) error {
|
func (b *mockStore) View(ctx context.Context, cid cid.Cid, f func([]byte) error) error {
|
||||||
blk, err := b.Get(cid)
|
blk, err := b.Get(ctx, cid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return f(blk.RawData())
|
return f(blk.RawData())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *mockStore) Put(blk blocks.Block) error {
|
func (b *mockStore) Put(_ context.Context, blk blocks.Block) error {
|
||||||
b.mx.Lock()
|
b.mx.Lock()
|
||||||
defer b.mx.Unlock()
|
defer b.mx.Unlock()
|
||||||
|
|
||||||
@ -341,7 +478,7 @@ func (b *mockStore) Put(blk blocks.Block) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *mockStore) PutMany(blks []blocks.Block) error {
|
func (b *mockStore) PutMany(_ context.Context, blks []blocks.Block) error {
|
||||||
b.mx.Lock()
|
b.mx.Lock()
|
||||||
defer b.mx.Unlock()
|
defer b.mx.Unlock()
|
||||||
|
|
||||||
@ -351,7 +488,7 @@ func (b *mockStore) PutMany(blks []blocks.Block) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *mockStore) DeleteBlock(cid cid.Cid) error {
|
func (b *mockStore) DeleteBlock(_ context.Context, cid cid.Cid) error {
|
||||||
b.mx.Lock()
|
b.mx.Lock()
|
||||||
defer b.mx.Unlock()
|
defer b.mx.Unlock()
|
||||||
|
|
||||||
@ -359,7 +496,7 @@ func (b *mockStore) DeleteBlock(cid cid.Cid) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *mockStore) DeleteMany(cids []cid.Cid) error {
|
func (b *mockStore) DeleteMany(_ context.Context, cids []cid.Cid) error {
|
||||||
b.mx.Lock()
|
b.mx.Lock()
|
||||||
defer b.mx.Unlock()
|
defer b.mx.Unlock()
|
||||||
|
|
||||||
|
@ -75,7 +75,7 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error {
|
|||||||
|
|
||||||
count++
|
count++
|
||||||
|
|
||||||
has, err := s.hot.Has(c)
|
has, err := s.hot.Has(s.ctx, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -84,7 +84,7 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
blk, err := s.cold.Get(c)
|
blk, err := s.cold.Get(s.ctx, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == bstore.ErrNotFound {
|
if err == bstore.ErrNotFound {
|
||||||
missing++
|
missing++
|
||||||
@ -97,7 +97,7 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error {
|
|||||||
|
|
||||||
batchHot = append(batchHot, blk)
|
batchHot = append(batchHot, blk)
|
||||||
if len(batchHot) == batchSize {
|
if len(batchHot) == batchSize {
|
||||||
err = s.hot.PutMany(batchHot)
|
err = s.hot.PutMany(s.ctx, batchHot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -112,7 +112,7 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(batchHot) > 0 {
|
if len(batchHot) > 0 {
|
||||||
err = s.hot.PutMany(batchHot)
|
err = s.hot.PutMany(s.ctx, batchHot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -121,13 +121,13 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error {
|
|||||||
log.Infow("warmup stats", "visited", count, "warm", xcount, "missing", missing)
|
log.Infow("warmup stats", "visited", count, "warm", xcount, "missing", missing)
|
||||||
|
|
||||||
s.markSetSize = count + count>>2 // overestimate a bit
|
s.markSetSize = count + count>>2 // overestimate a bit
|
||||||
err = s.ds.Put(markSetSizeKey, int64ToBytes(s.markSetSize))
|
err = s.ds.Put(s.ctx, markSetSizeKey, int64ToBytes(s.markSetSize))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("error saving mark set size: %s", err)
|
log.Warnf("error saving mark set size: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// save the warmup epoch
|
// save the warmup epoch
|
||||||
err = s.ds.Put(warmupEpochKey, epochToBytes(epoch))
|
err = s.ds.Put(s.ctx, warmupEpochKey, epochToBytes(epoch))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("error saving warm up epoch: %w", err)
|
return xerrors.Errorf("error saving warm up epoch: %w", err)
|
||||||
}
|
}
|
||||||
@ -136,7 +136,7 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error {
|
|||||||
s.mx.Unlock()
|
s.mx.Unlock()
|
||||||
|
|
||||||
// also save the compactionIndex, as this is used as an indicator of warmup for upgraded nodes
|
// also save the compactionIndex, as this is used as an indicator of warmup for upgraded nodes
|
||||||
err = s.ds.Put(compactionIndexKey, int64ToBytes(s.compactionIndex))
|
err = s.ds.Put(s.ctx, compactionIndexKey, int64ToBytes(s.compactionIndex))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("error saving compaction index: %w", err)
|
return xerrors.Errorf("error saving compaction index: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -20,53 +20,53 @@ type SyncBlockstore struct {
|
|||||||
bs MemBlockstore // specifically use a memStore to save indirection overhead.
|
bs MemBlockstore // specifically use a memStore to save indirection overhead.
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *SyncBlockstore) DeleteBlock(k cid.Cid) error {
|
func (m *SyncBlockstore) DeleteBlock(ctx context.Context, k cid.Cid) error {
|
||||||
m.mu.Lock()
|
m.mu.Lock()
|
||||||
defer m.mu.Unlock()
|
defer m.mu.Unlock()
|
||||||
return m.bs.DeleteBlock(k)
|
return m.bs.DeleteBlock(ctx, k)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *SyncBlockstore) DeleteMany(ks []cid.Cid) error {
|
func (m *SyncBlockstore) DeleteMany(ctx context.Context, ks []cid.Cid) error {
|
||||||
m.mu.Lock()
|
m.mu.Lock()
|
||||||
defer m.mu.Unlock()
|
defer m.mu.Unlock()
|
||||||
return m.bs.DeleteMany(ks)
|
return m.bs.DeleteMany(ctx, ks)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *SyncBlockstore) Has(k cid.Cid) (bool, error) {
|
func (m *SyncBlockstore) Has(ctx context.Context, k cid.Cid) (bool, error) {
|
||||||
m.mu.RLock()
|
m.mu.RLock()
|
||||||
defer m.mu.RUnlock()
|
defer m.mu.RUnlock()
|
||||||
return m.bs.Has(k)
|
return m.bs.Has(ctx, k)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *SyncBlockstore) View(k cid.Cid, callback func([]byte) error) error {
|
func (m *SyncBlockstore) View(ctx context.Context, k cid.Cid, callback func([]byte) error) error {
|
||||||
m.mu.RLock()
|
m.mu.RLock()
|
||||||
defer m.mu.RUnlock()
|
defer m.mu.RUnlock()
|
||||||
|
|
||||||
return m.bs.View(k, callback)
|
return m.bs.View(ctx, k, callback)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *SyncBlockstore) Get(k cid.Cid) (blocks.Block, error) {
|
func (m *SyncBlockstore) Get(ctx context.Context, k cid.Cid) (blocks.Block, error) {
|
||||||
m.mu.RLock()
|
m.mu.RLock()
|
||||||
defer m.mu.RUnlock()
|
defer m.mu.RUnlock()
|
||||||
return m.bs.Get(k)
|
return m.bs.Get(ctx, k)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *SyncBlockstore) GetSize(k cid.Cid) (int, error) {
|
func (m *SyncBlockstore) GetSize(ctx context.Context, k cid.Cid) (int, error) {
|
||||||
m.mu.RLock()
|
m.mu.RLock()
|
||||||
defer m.mu.RUnlock()
|
defer m.mu.RUnlock()
|
||||||
return m.bs.GetSize(k)
|
return m.bs.GetSize(ctx, k)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *SyncBlockstore) Put(b blocks.Block) error {
|
func (m *SyncBlockstore) Put(ctx context.Context, b blocks.Block) error {
|
||||||
m.mu.Lock()
|
m.mu.Lock()
|
||||||
defer m.mu.Unlock()
|
defer m.mu.Unlock()
|
||||||
return m.bs.Put(b)
|
return m.bs.Put(ctx, b)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *SyncBlockstore) PutMany(bs []blocks.Block) error {
|
func (m *SyncBlockstore) PutMany(ctx context.Context, bs []blocks.Block) error {
|
||||||
m.mu.Lock()
|
m.mu.Lock()
|
||||||
defer m.mu.Unlock()
|
defer m.mu.Unlock()
|
||||||
return m.bs.PutMany(bs)
|
return m.bs.PutMany(ctx, bs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *SyncBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
func (m *SyncBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
||||||
|
@ -92,28 +92,28 @@ func (t *TimedCacheBlockstore) rotate() {
|
|||||||
t.mu.Unlock()
|
t.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TimedCacheBlockstore) Put(b blocks.Block) error {
|
func (t *TimedCacheBlockstore) Put(ctx context.Context, b blocks.Block) error {
|
||||||
// Don't check the inactive set here. We want to keep this block for at
|
// Don't check the inactive set here. We want to keep this block for at
|
||||||
// least one interval.
|
// least one interval.
|
||||||
t.mu.Lock()
|
t.mu.Lock()
|
||||||
defer t.mu.Unlock()
|
defer t.mu.Unlock()
|
||||||
return t.active.Put(b)
|
return t.active.Put(ctx, b)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TimedCacheBlockstore) PutMany(bs []blocks.Block) error {
|
func (t *TimedCacheBlockstore) PutMany(ctx context.Context, bs []blocks.Block) error {
|
||||||
t.mu.Lock()
|
t.mu.Lock()
|
||||||
defer t.mu.Unlock()
|
defer t.mu.Unlock()
|
||||||
return t.active.PutMany(bs)
|
return t.active.PutMany(ctx, bs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TimedCacheBlockstore) View(k cid.Cid, callback func([]byte) error) error {
|
func (t *TimedCacheBlockstore) View(ctx context.Context, k cid.Cid, callback func([]byte) error) error {
|
||||||
// The underlying blockstore is always a "mem" blockstore so there's no difference,
|
// The underlying blockstore is always a "mem" blockstore so there's no difference,
|
||||||
// from a performance perspective, between view & get. So we call Get to avoid
|
// from a performance perspective, between view & get. So we call Get to avoid
|
||||||
// calling an arbitrary callback while holding a lock.
|
// calling an arbitrary callback while holding a lock.
|
||||||
t.mu.RLock()
|
t.mu.RLock()
|
||||||
block, err := t.active.Get(k)
|
block, err := t.active.Get(ctx, k)
|
||||||
if err == ErrNotFound {
|
if err == ErrNotFound {
|
||||||
block, err = t.inactive.Get(k)
|
block, err = t.inactive.Get(ctx, k)
|
||||||
}
|
}
|
||||||
t.mu.RUnlock()
|
t.mu.RUnlock()
|
||||||
|
|
||||||
@ -123,51 +123,51 @@ func (t *TimedCacheBlockstore) View(k cid.Cid, callback func([]byte) error) erro
|
|||||||
return callback(block.RawData())
|
return callback(block.RawData())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TimedCacheBlockstore) Get(k cid.Cid) (blocks.Block, error) {
|
func (t *TimedCacheBlockstore) Get(ctx context.Context, k cid.Cid) (blocks.Block, error) {
|
||||||
t.mu.RLock()
|
t.mu.RLock()
|
||||||
defer t.mu.RUnlock()
|
defer t.mu.RUnlock()
|
||||||
b, err := t.active.Get(k)
|
b, err := t.active.Get(ctx, k)
|
||||||
if err == ErrNotFound {
|
if err == ErrNotFound {
|
||||||
b, err = t.inactive.Get(k)
|
b, err = t.inactive.Get(ctx, k)
|
||||||
}
|
}
|
||||||
return b, err
|
return b, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TimedCacheBlockstore) GetSize(k cid.Cid) (int, error) {
|
func (t *TimedCacheBlockstore) GetSize(ctx context.Context, k cid.Cid) (int, error) {
|
||||||
t.mu.RLock()
|
t.mu.RLock()
|
||||||
defer t.mu.RUnlock()
|
defer t.mu.RUnlock()
|
||||||
size, err := t.active.GetSize(k)
|
size, err := t.active.GetSize(ctx, k)
|
||||||
if err == ErrNotFound {
|
if err == ErrNotFound {
|
||||||
size, err = t.inactive.GetSize(k)
|
size, err = t.inactive.GetSize(ctx, k)
|
||||||
}
|
}
|
||||||
return size, err
|
return size, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TimedCacheBlockstore) Has(k cid.Cid) (bool, error) {
|
func (t *TimedCacheBlockstore) Has(ctx context.Context, k cid.Cid) (bool, error) {
|
||||||
t.mu.RLock()
|
t.mu.RLock()
|
||||||
defer t.mu.RUnlock()
|
defer t.mu.RUnlock()
|
||||||
if has, err := t.active.Has(k); err != nil {
|
if has, err := t.active.Has(ctx, k); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
} else if has {
|
} else if has {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
return t.inactive.Has(k)
|
return t.inactive.Has(ctx, k)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TimedCacheBlockstore) HashOnRead(_ bool) {
|
func (t *TimedCacheBlockstore) HashOnRead(_ bool) {
|
||||||
// no-op
|
// no-op
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TimedCacheBlockstore) DeleteBlock(k cid.Cid) error {
|
func (t *TimedCacheBlockstore) DeleteBlock(ctx context.Context, k cid.Cid) error {
|
||||||
t.mu.Lock()
|
t.mu.Lock()
|
||||||
defer t.mu.Unlock()
|
defer t.mu.Unlock()
|
||||||
return multierr.Combine(t.active.DeleteBlock(k), t.inactive.DeleteBlock(k))
|
return multierr.Combine(t.active.DeleteBlock(ctx, k), t.inactive.DeleteBlock(ctx, k))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TimedCacheBlockstore) DeleteMany(ks []cid.Cid) error {
|
func (t *TimedCacheBlockstore) DeleteMany(ctx context.Context, ks []cid.Cid) error {
|
||||||
t.mu.Lock()
|
t.mu.Lock()
|
||||||
defer t.mu.Unlock()
|
defer t.mu.Unlock()
|
||||||
return multierr.Combine(t.active.DeleteMany(ks), t.inactive.DeleteMany(ks))
|
return multierr.Combine(t.active.DeleteMany(ctx, ks), t.inactive.DeleteMany(ctx, ks))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TimedCacheBlockstore) AllKeysChan(_ context.Context) (<-chan cid.Cid, error) {
|
func (t *TimedCacheBlockstore) AllKeysChan(_ context.Context) (<-chan cid.Cid, error) {
|
||||||
|
@ -19,6 +19,8 @@ func TestTimedCacheBlockstoreSimple(t *testing.T) {
|
|||||||
tc.clock = mClock
|
tc.clock = mClock
|
||||||
tc.doneRotatingCh = make(chan struct{})
|
tc.doneRotatingCh = make(chan struct{})
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
_ = tc.Start(context.Background())
|
_ = tc.Start(context.Background())
|
||||||
mClock.Add(1) // IDK why it is needed but it makes it work
|
mClock.Add(1) // IDK why it is needed but it makes it work
|
||||||
|
|
||||||
@ -27,18 +29,18 @@ func TestTimedCacheBlockstoreSimple(t *testing.T) {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
b1 := blocks.NewBlock([]byte("foo"))
|
b1 := blocks.NewBlock([]byte("foo"))
|
||||||
require.NoError(t, tc.Put(b1))
|
require.NoError(t, tc.Put(ctx, b1))
|
||||||
|
|
||||||
b2 := blocks.NewBlock([]byte("bar"))
|
b2 := blocks.NewBlock([]byte("bar"))
|
||||||
require.NoError(t, tc.Put(b2))
|
require.NoError(t, tc.Put(ctx, b2))
|
||||||
|
|
||||||
b3 := blocks.NewBlock([]byte("baz"))
|
b3 := blocks.NewBlock([]byte("baz"))
|
||||||
|
|
||||||
b1out, err := tc.Get(b1.Cid())
|
b1out, err := tc.Get(ctx, b1.Cid())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, b1.RawData(), b1out.RawData())
|
require.Equal(t, b1.RawData(), b1out.RawData())
|
||||||
|
|
||||||
has, err := tc.Has(b1.Cid())
|
has, err := tc.Has(ctx, b1.Cid())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.True(t, has)
|
require.True(t, has)
|
||||||
|
|
||||||
@ -46,17 +48,17 @@ func TestTimedCacheBlockstoreSimple(t *testing.T) {
|
|||||||
<-tc.doneRotatingCh
|
<-tc.doneRotatingCh
|
||||||
|
|
||||||
// We should still have everything.
|
// We should still have everything.
|
||||||
has, err = tc.Has(b1.Cid())
|
has, err = tc.Has(ctx, b1.Cid())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.True(t, has)
|
require.True(t, has)
|
||||||
|
|
||||||
has, err = tc.Has(b2.Cid())
|
has, err = tc.Has(ctx, b2.Cid())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.True(t, has)
|
require.True(t, has)
|
||||||
|
|
||||||
// extend b2, add b3.
|
// extend b2, add b3.
|
||||||
require.NoError(t, tc.Put(b2))
|
require.NoError(t, tc.Put(ctx, b2))
|
||||||
require.NoError(t, tc.Put(b3))
|
require.NoError(t, tc.Put(ctx, b3))
|
||||||
|
|
||||||
// all keys once.
|
// all keys once.
|
||||||
allKeys, err := tc.AllKeysChan(context.Background())
|
allKeys, err := tc.AllKeysChan(context.Background())
|
||||||
@ -71,15 +73,15 @@ func TestTimedCacheBlockstoreSimple(t *testing.T) {
|
|||||||
<-tc.doneRotatingCh
|
<-tc.doneRotatingCh
|
||||||
// should still have b2, and b3, but not b1
|
// should still have b2, and b3, but not b1
|
||||||
|
|
||||||
has, err = tc.Has(b1.Cid())
|
has, err = tc.Has(ctx, b1.Cid())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.False(t, has)
|
require.False(t, has)
|
||||||
|
|
||||||
has, err = tc.Has(b2.Cid())
|
has, err = tc.Has(ctx, b2.Cid())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.True(t, has)
|
require.True(t, has)
|
||||||
|
|
||||||
has, err = tc.Has(b3.Cid())
|
has, err = tc.Has(ctx, b3.Cid())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.True(t, has)
|
require.True(t, has)
|
||||||
}
|
}
|
||||||
|
@ -19,72 +19,72 @@ func Union(stores ...Blockstore) Blockstore {
|
|||||||
return unionBlockstore(stores)
|
return unionBlockstore(stores)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m unionBlockstore) Has(cid cid.Cid) (has bool, err error) {
|
func (m unionBlockstore) Has(ctx context.Context, cid cid.Cid) (has bool, err error) {
|
||||||
for _, bs := range m {
|
for _, bs := range m {
|
||||||
if has, err = bs.Has(cid); has || err != nil {
|
if has, err = bs.Has(ctx, cid); has || err != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return has, err
|
return has, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m unionBlockstore) Get(cid cid.Cid) (blk blocks.Block, err error) {
|
func (m unionBlockstore) Get(ctx context.Context, cid cid.Cid) (blk blocks.Block, err error) {
|
||||||
for _, bs := range m {
|
for _, bs := range m {
|
||||||
if blk, err = bs.Get(cid); err == nil || err != ErrNotFound {
|
if blk, err = bs.Get(ctx, cid); err == nil || err != ErrNotFound {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return blk, err
|
return blk, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m unionBlockstore) View(cid cid.Cid, callback func([]byte) error) (err error) {
|
func (m unionBlockstore) View(ctx context.Context, cid cid.Cid, callback func([]byte) error) (err error) {
|
||||||
for _, bs := range m {
|
for _, bs := range m {
|
||||||
if err = bs.View(cid, callback); err == nil || err != ErrNotFound {
|
if err = bs.View(ctx, cid, callback); err == nil || err != ErrNotFound {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m unionBlockstore) GetSize(cid cid.Cid) (size int, err error) {
|
func (m unionBlockstore) GetSize(ctx context.Context, cid cid.Cid) (size int, err error) {
|
||||||
for _, bs := range m {
|
for _, bs := range m {
|
||||||
if size, err = bs.GetSize(cid); err == nil || err != ErrNotFound {
|
if size, err = bs.GetSize(ctx, cid); err == nil || err != ErrNotFound {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return size, err
|
return size, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m unionBlockstore) Put(block blocks.Block) (err error) {
|
func (m unionBlockstore) Put(ctx context.Context, block blocks.Block) (err error) {
|
||||||
for _, bs := range m {
|
for _, bs := range m {
|
||||||
if err = bs.Put(block); err != nil {
|
if err = bs.Put(ctx, block); err != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m unionBlockstore) PutMany(blks []blocks.Block) (err error) {
|
func (m unionBlockstore) PutMany(ctx context.Context, blks []blocks.Block) (err error) {
|
||||||
for _, bs := range m {
|
for _, bs := range m {
|
||||||
if err = bs.PutMany(blks); err != nil {
|
if err = bs.PutMany(ctx, blks); err != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m unionBlockstore) DeleteBlock(cid cid.Cid) (err error) {
|
func (m unionBlockstore) DeleteBlock(ctx context.Context, cid cid.Cid) (err error) {
|
||||||
for _, bs := range m {
|
for _, bs := range m {
|
||||||
if err = bs.DeleteBlock(cid); err != nil {
|
if err = bs.DeleteBlock(ctx, cid); err != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m unionBlockstore) DeleteMany(cids []cid.Cid) (err error) {
|
func (m unionBlockstore) DeleteMany(ctx context.Context, cids []cid.Cid) (err error) {
|
||||||
for _, bs := range m {
|
for _, bs := range m {
|
||||||
if err = bs.DeleteMany(cids); err != nil {
|
if err = bs.DeleteMany(ctx, cids); err != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -15,79 +15,81 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestUnionBlockstore_Get(t *testing.T) {
|
func TestUnionBlockstore_Get(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
m1 := NewMemory()
|
m1 := NewMemory()
|
||||||
m2 := NewMemory()
|
m2 := NewMemory()
|
||||||
|
|
||||||
_ = m1.Put(b1)
|
_ = m1.Put(ctx, b1)
|
||||||
_ = m2.Put(b2)
|
_ = m2.Put(ctx, b2)
|
||||||
|
|
||||||
u := Union(m1, m2)
|
u := Union(m1, m2)
|
||||||
|
|
||||||
v1, err := u.Get(b1.Cid())
|
v1, err := u.Get(ctx, b1.Cid())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, b1.RawData(), v1.RawData())
|
require.Equal(t, b1.RawData(), v1.RawData())
|
||||||
|
|
||||||
v2, err := u.Get(b2.Cid())
|
v2, err := u.Get(ctx, b2.Cid())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, b2.RawData(), v2.RawData())
|
require.Equal(t, b2.RawData(), v2.RawData())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUnionBlockstore_Put_PutMany_Delete_AllKeysChan(t *testing.T) {
|
func TestUnionBlockstore_Put_PutMany_Delete_AllKeysChan(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
m1 := NewMemory()
|
m1 := NewMemory()
|
||||||
m2 := NewMemory()
|
m2 := NewMemory()
|
||||||
|
|
||||||
u := Union(m1, m2)
|
u := Union(m1, m2)
|
||||||
|
|
||||||
err := u.Put(b0)
|
err := u.Put(ctx, b0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var has bool
|
var has bool
|
||||||
|
|
||||||
// write was broadcasted to all stores.
|
// write was broadcasted to all stores.
|
||||||
has, _ = m1.Has(b0.Cid())
|
has, _ = m1.Has(ctx, b0.Cid())
|
||||||
require.True(t, has)
|
require.True(t, has)
|
||||||
|
|
||||||
has, _ = m2.Has(b0.Cid())
|
has, _ = m2.Has(ctx, b0.Cid())
|
||||||
require.True(t, has)
|
require.True(t, has)
|
||||||
|
|
||||||
has, _ = u.Has(b0.Cid())
|
has, _ = u.Has(ctx, b0.Cid())
|
||||||
require.True(t, has)
|
require.True(t, has)
|
||||||
|
|
||||||
// put many.
|
// put many.
|
||||||
err = u.PutMany([]blocks.Block{b1, b2})
|
err = u.PutMany(ctx, []blocks.Block{b1, b2})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// write was broadcasted to all stores.
|
// write was broadcasted to all stores.
|
||||||
has, _ = m1.Has(b1.Cid())
|
has, _ = m1.Has(ctx, b1.Cid())
|
||||||
require.True(t, has)
|
require.True(t, has)
|
||||||
|
|
||||||
has, _ = m1.Has(b2.Cid())
|
has, _ = m1.Has(ctx, b2.Cid())
|
||||||
require.True(t, has)
|
require.True(t, has)
|
||||||
|
|
||||||
has, _ = m2.Has(b1.Cid())
|
has, _ = m2.Has(ctx, b1.Cid())
|
||||||
require.True(t, has)
|
require.True(t, has)
|
||||||
|
|
||||||
has, _ = m2.Has(b2.Cid())
|
has, _ = m2.Has(ctx, b2.Cid())
|
||||||
require.True(t, has)
|
require.True(t, has)
|
||||||
|
|
||||||
// also in the union store.
|
// also in the union store.
|
||||||
has, _ = u.Has(b1.Cid())
|
has, _ = u.Has(ctx, b1.Cid())
|
||||||
require.True(t, has)
|
require.True(t, has)
|
||||||
|
|
||||||
has, _ = u.Has(b2.Cid())
|
has, _ = u.Has(ctx, b2.Cid())
|
||||||
require.True(t, has)
|
require.True(t, has)
|
||||||
|
|
||||||
// deleted from all stores.
|
// deleted from all stores.
|
||||||
err = u.DeleteBlock(b1.Cid())
|
err = u.DeleteBlock(ctx, b1.Cid())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
has, _ = u.Has(b1.Cid())
|
has, _ = u.Has(ctx, b1.Cid())
|
||||||
require.False(t, has)
|
require.False(t, has)
|
||||||
|
|
||||||
has, _ = m1.Has(b1.Cid())
|
has, _ = m1.Has(ctx, b1.Cid())
|
||||||
require.False(t, has)
|
require.False(t, has)
|
||||||
|
|
||||||
has, _ = m2.Has(b1.Cid())
|
has, _ = m2.Has(ctx, b1.Cid())
|
||||||
require.False(t, has)
|
require.False(t, has)
|
||||||
|
|
||||||
// check that AllKeysChan returns b0 and b2, twice (once per backing store)
|
// check that AllKeysChan returns b0 and b2, twice (once per backing store)
|
||||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -47,6 +47,8 @@ var UpgradeHyperdriveHeight = abi.ChainEpoch(-16)
|
|||||||
|
|
||||||
var UpgradeChocolateHeight = abi.ChainEpoch(-17)
|
var UpgradeChocolateHeight = abi.ChainEpoch(-17)
|
||||||
|
|
||||||
|
var UpgradeSnapDealsHeight = abi.ChainEpoch(-18)
|
||||||
|
|
||||||
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||||
0: DrandMainnet,
|
0: DrandMainnet,
|
||||||
}
|
}
|
||||||
|
@ -41,6 +41,7 @@ const UpgradeNorwegianHeight = -14
|
|||||||
const UpgradeTurboHeight = -15
|
const UpgradeTurboHeight = -15
|
||||||
const UpgradeHyperdriveHeight = -16
|
const UpgradeHyperdriveHeight = -16
|
||||||
const UpgradeChocolateHeight = 6360
|
const UpgradeChocolateHeight = 6360
|
||||||
|
const UpgradeSnapDealsHeight = 99999999
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2 << 30))
|
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2 << 30))
|
||||||
|
@ -54,6 +54,8 @@ const UpgradeHyperdriveHeight = 420
|
|||||||
|
|
||||||
const UpgradeChocolateHeight = 312746
|
const UpgradeChocolateHeight = 312746
|
||||||
|
|
||||||
|
const UpgradeSnapDealsHeight = 99999999
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
policy.SetConsensusMinerMinPower(abi.NewStoragePower(32 << 30))
|
policy.SetConsensusMinerMinPower(abi.NewStoragePower(32 << 30))
|
||||||
policy.SetSupportedProofTypes(
|
policy.SetSupportedProofTypes(
|
||||||
|
@ -47,6 +47,7 @@ var UpgradeTurboHeight = abi.ChainEpoch(-15)
|
|||||||
|
|
||||||
var UpgradeHyperdriveHeight = abi.ChainEpoch(-16)
|
var UpgradeHyperdriveHeight = abi.ChainEpoch(-16)
|
||||||
var UpgradeChocolateHeight = abi.ChainEpoch(-17)
|
var UpgradeChocolateHeight = abi.ChainEpoch(-17)
|
||||||
|
var UpgradeSnapDealsHeight = abi.ChainEpoch(-18)
|
||||||
|
|
||||||
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||||
0: DrandMainnet,
|
0: DrandMainnet,
|
||||||
|
@ -62,18 +62,20 @@ const UpgradeNorwegianHeight = 665280
|
|||||||
const UpgradeTurboHeight = 712320
|
const UpgradeTurboHeight = 712320
|
||||||
|
|
||||||
// 2021-06-30T22:00:00Z
|
// 2021-06-30T22:00:00Z
|
||||||
var UpgradeHyperdriveHeight = abi.ChainEpoch(892800)
|
const UpgradeHyperdriveHeight = 892800
|
||||||
|
|
||||||
// 2021-10-26T13:30:00Z
|
// 2021-10-26T13:30:00Z
|
||||||
var UpgradeChocolateHeight = abi.ChainEpoch(1231620)
|
const UpgradeChocolateHeight = 1231620
|
||||||
|
|
||||||
|
var UpgradeSnapDealsHeight = abi.ChainEpoch(999999999999)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" {
|
if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" {
|
||||||
SetAddressNetwork(address.Mainnet)
|
SetAddressNetwork(address.Mainnet)
|
||||||
}
|
}
|
||||||
|
|
||||||
if os.Getenv("LOTUS_DISABLE_CHOCOLATE") == "1" {
|
if os.Getenv("LOTUS_DISABLE_SNAPDEALS") == "1" {
|
||||||
UpgradeChocolateHeight = math.MaxInt64
|
UpgradeSnapDealsHeight = math.MaxInt64
|
||||||
}
|
}
|
||||||
|
|
||||||
Devnet = false
|
Devnet = false
|
||||||
|
@ -34,7 +34,7 @@ const NewestNetworkVersion = network.Version{{.latestNetworkVersion}}
|
|||||||
|
|
||||||
/* inline-gen start */
|
/* inline-gen start */
|
||||||
|
|
||||||
const NewestNetworkVersion = network.Version14
|
const NewestNetworkVersion = network.Version15
|
||||||
|
|
||||||
/* inline-gen end */
|
/* inline-gen end */
|
||||||
|
|
||||||
|
@ -99,6 +99,7 @@ var (
|
|||||||
UpgradeTurboHeight abi.ChainEpoch = -14
|
UpgradeTurboHeight abi.ChainEpoch = -14
|
||||||
UpgradeHyperdriveHeight abi.ChainEpoch = -15
|
UpgradeHyperdriveHeight abi.ChainEpoch = -15
|
||||||
UpgradeChocolateHeight abi.ChainEpoch = -16
|
UpgradeChocolateHeight abi.ChainEpoch = -16
|
||||||
|
UpgradeSnapDealsHeight abi.ChainEpoch = -17
|
||||||
|
|
||||||
DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||||
0: DrandMainnet,
|
0: DrandMainnet,
|
||||||
|
@ -37,7 +37,7 @@ func BuildTypeString() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// BuildVersion is the local build version
|
// BuildVersion is the local build version
|
||||||
const BuildVersion = "1.13.2-dev"
|
const BuildVersion = "1.13.3-dev"
|
||||||
|
|
||||||
func UserVersion() string {
|
func UserVersion() string {
|
||||||
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {
|
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {
|
||||||
|
@ -23,6 +23,8 @@ import (
|
|||||||
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
|
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
|
||||||
|
|
||||||
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||||
|
|
||||||
|
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -50,6 +52,10 @@ func init() {
|
|||||||
builtin.RegisterActorState(builtin6.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
builtin.RegisterActorState(builtin6.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||||
return load6(store, root)
|
return load6(store, root)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
builtin.RegisterActorState(builtin7.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||||
|
return load7(store, root)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
var Methods = builtin4.MethodsAccount
|
var Methods = builtin4.MethodsAccount
|
||||||
@ -75,6 +81,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
|||||||
case builtin6.AccountActorCodeID:
|
case builtin6.AccountActorCodeID:
|
||||||
return load6(store, act.Head)
|
return load6(store, act.Head)
|
||||||
|
|
||||||
|
case builtin7.AccountActorCodeID:
|
||||||
|
return load7(store, act.Head)
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||||
}
|
}
|
||||||
@ -100,6 +109,9 @@ func MakeState(store adt.Store, av actors.Version, addr address.Address) (State,
|
|||||||
case actors.Version6:
|
case actors.Version6:
|
||||||
return make6(store, addr)
|
return make6(store, addr)
|
||||||
|
|
||||||
|
case actors.Version7:
|
||||||
|
return make7(store, addr)
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||||
}
|
}
|
||||||
@ -125,6 +137,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
|
|||||||
case actors.Version6:
|
case actors.Version6:
|
||||||
return builtin6.AccountActorCodeID, nil
|
return builtin6.AccountActorCodeID, nil
|
||||||
|
|
||||||
|
case actors.Version7:
|
||||||
|
return builtin7.AccountActorCodeID, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
||||||
|
40
chain/actors/builtin/account/v7.go
Normal file
40
chain/actors/builtin/account/v7.go
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
package account
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||||
|
|
||||||
|
account7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/account"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ State = (*state7)(nil)
|
||||||
|
|
||||||
|
func load7(store adt.Store, root cid.Cid) (State, error) {
|
||||||
|
out := state7{store: store}
|
||||||
|
err := store.Get(store.Context(), root, &out)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func make7(store adt.Store, addr address.Address) (State, error) {
|
||||||
|
out := state7{store: store}
|
||||||
|
out.State = account7.State{Address: addr}
|
||||||
|
return &out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type state7 struct {
|
||||||
|
account7.State
|
||||||
|
store adt.Store
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) PubkeyAddress() (address.Address, error) {
|
||||||
|
return s.Address, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) GetState() interface{} {
|
||||||
|
return &s.State
|
||||||
|
}
|
@ -23,46 +23,49 @@ import (
|
|||||||
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||||
smoothing6 "github.com/filecoin-project/specs-actors/v6/actors/util/smoothing"
|
smoothing6 "github.com/filecoin-project/specs-actors/v6/actors/util/smoothing"
|
||||||
|
|
||||||
|
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
|
||||||
|
smoothing7 "github.com/filecoin-project/specs-actors/v7/actors/util/smoothing"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-state-types/cbor"
|
"github.com/filecoin-project/go-state-types/cbor"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
|
||||||
miner6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/miner"
|
miner7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/miner"
|
||||||
proof6 "github.com/filecoin-project/specs-actors/v6/actors/runtime/proof"
|
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
|
||||||
)
|
)
|
||||||
|
|
||||||
var SystemActorAddr = builtin6.SystemActorAddr
|
var SystemActorAddr = builtin7.SystemActorAddr
|
||||||
var BurntFundsActorAddr = builtin6.BurntFundsActorAddr
|
var BurntFundsActorAddr = builtin7.BurntFundsActorAddr
|
||||||
var CronActorAddr = builtin6.CronActorAddr
|
var CronActorAddr = builtin7.CronActorAddr
|
||||||
var SaftAddress = makeAddress("t0122")
|
var SaftAddress = makeAddress("t0122")
|
||||||
var ReserveAddress = makeAddress("t090")
|
var ReserveAddress = makeAddress("t090")
|
||||||
var RootVerifierAddress = makeAddress("t080")
|
var RootVerifierAddress = makeAddress("t080")
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ExpectedLeadersPerEpoch = builtin6.ExpectedLeadersPerEpoch
|
ExpectedLeadersPerEpoch = builtin7.ExpectedLeadersPerEpoch
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
EpochDurationSeconds = builtin6.EpochDurationSeconds
|
EpochDurationSeconds = builtin7.EpochDurationSeconds
|
||||||
EpochsInDay = builtin6.EpochsInDay
|
EpochsInDay = builtin7.EpochsInDay
|
||||||
SecondsInDay = builtin6.SecondsInDay
|
SecondsInDay = builtin7.SecondsInDay
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
MethodSend = builtin6.MethodSend
|
MethodSend = builtin7.MethodSend
|
||||||
MethodConstructor = builtin6.MethodConstructor
|
MethodConstructor = builtin7.MethodConstructor
|
||||||
)
|
)
|
||||||
|
|
||||||
// These are all just type aliases across actor versions. In the future, that might change
|
// These are all just type aliases across actor versions. In the future, that might change
|
||||||
// and we might need to do something fancier.
|
// and we might need to do something fancier.
|
||||||
type SectorInfo = proof6.SectorInfo
|
type SectorInfo = proof7.SectorInfo
|
||||||
type PoStProof = proof6.PoStProof
|
type PoStProof = proof7.PoStProof
|
||||||
type FilterEstimate = smoothing0.FilterEstimate
|
type FilterEstimate = smoothing0.FilterEstimate
|
||||||
|
|
||||||
func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.StoragePower {
|
func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.StoragePower {
|
||||||
return miner6.QAPowerForWeight(size, duration, dealWeight, verifiedWeight)
|
return miner7.QAPowerForWeight(size, duration, dealWeight, verifiedWeight)
|
||||||
}
|
}
|
||||||
|
|
||||||
func FromV0FilterEstimate(v0 smoothing0.FilterEstimate) FilterEstimate {
|
func FromV0FilterEstimate(v0 smoothing0.FilterEstimate) FilterEstimate {
|
||||||
@ -101,6 +104,12 @@ func FromV6FilterEstimate(v6 smoothing6.FilterEstimate) FilterEstimate {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func FromV7FilterEstimate(v7 smoothing7.FilterEstimate) FilterEstimate {
|
||||||
|
|
||||||
|
return (FilterEstimate)(v7)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
type ActorStateLoader func(store adt.Store, root cid.Cid) (cbor.Marshaler, error)
|
type ActorStateLoader func(store adt.Store, root cid.Cid) (cbor.Marshaler, error)
|
||||||
|
|
||||||
var ActorStateLoaders = make(map[cid.Cid]ActorStateLoader)
|
var ActorStateLoaders = make(map[cid.Cid]ActorStateLoader)
|
||||||
@ -138,6 +147,9 @@ func ActorNameByCode(c cid.Cid) string {
|
|||||||
case builtin6.IsBuiltinActor(c):
|
case builtin6.IsBuiltinActor(c):
|
||||||
return builtin6.ActorNameByCode(c)
|
return builtin6.ActorNameByCode(c)
|
||||||
|
|
||||||
|
case builtin7.IsBuiltinActor(c):
|
||||||
|
return builtin7.ActorNameByCode(c)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return "<unknown>"
|
return "<unknown>"
|
||||||
}
|
}
|
||||||
@ -169,6 +181,10 @@ func IsBuiltinActor(c cid.Cid) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if builtin7.IsBuiltinActor(c) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -198,6 +214,10 @@ func IsAccountActor(c cid.Cid) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if c == builtin7.AccountActorCodeID {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -227,6 +247,10 @@ func IsStorageMinerActor(c cid.Cid) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if c == builtin7.StorageMinerActorCodeID {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -256,6 +280,10 @@ func IsMultisigActor(c cid.Cid) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if c == builtin7.MultisigActorCodeID {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -285,6 +313,10 @@ func IsPaymentChannelActor(c cid.Cid) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if c == builtin7.PaymentChannelActorCodeID {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,6 +17,8 @@ import (
|
|||||||
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
|
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
|
||||||
|
|
||||||
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||||
|
|
||||||
|
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
|
||||||
)
|
)
|
||||||
|
|
||||||
func MakeState(store adt.Store, av actors.Version) (State, error) {
|
func MakeState(store adt.Store, av actors.Version) (State, error) {
|
||||||
@ -40,6 +42,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) {
|
|||||||
case actors.Version6:
|
case actors.Version6:
|
||||||
return make6(store)
|
return make6(store)
|
||||||
|
|
||||||
|
case actors.Version7:
|
||||||
|
return make7(store)
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||||
}
|
}
|
||||||
@ -65,14 +70,17 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
|
|||||||
case actors.Version6:
|
case actors.Version6:
|
||||||
return builtin6.CronActorCodeID, nil
|
return builtin6.CronActorCodeID, nil
|
||||||
|
|
||||||
|
case actors.Version7:
|
||||||
|
return builtin7.CronActorCodeID, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
Address = builtin6.CronActorAddr
|
Address = builtin7.CronActorAddr
|
||||||
Methods = builtin6.MethodsCron
|
Methods = builtin7.MethodsCron
|
||||||
)
|
)
|
||||||
|
|
||||||
type State interface {
|
type State interface {
|
||||||
|
35
chain/actors/builtin/cron/v7.go
Normal file
35
chain/actors/builtin/cron/v7.go
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
package cron
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||||
|
|
||||||
|
cron7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/cron"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ State = (*state7)(nil)
|
||||||
|
|
||||||
|
func load7(store adt.Store, root cid.Cid) (State, error) {
|
||||||
|
out := state7{store: store}
|
||||||
|
err := store.Get(store.Context(), root, &out)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func make7(store adt.Store) (State, error) {
|
||||||
|
out := state7{store: store}
|
||||||
|
out.State = *cron7.ConstructState(cron7.BuiltInEntries())
|
||||||
|
return &out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type state7 struct {
|
||||||
|
cron7.State
|
||||||
|
store adt.Store
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) GetState() interface{} {
|
||||||
|
return &s.State
|
||||||
|
}
|
@ -25,6 +25,8 @@ import (
|
|||||||
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
|
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
|
||||||
|
|
||||||
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||||
|
|
||||||
|
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -52,11 +54,15 @@ func init() {
|
|||||||
builtin.RegisterActorState(builtin6.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
builtin.RegisterActorState(builtin6.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||||
return load6(store, root)
|
return load6(store, root)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
builtin.RegisterActorState(builtin7.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||||
|
return load7(store, root)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
Address = builtin6.InitActorAddr
|
Address = builtin7.InitActorAddr
|
||||||
Methods = builtin6.MethodsInit
|
Methods = builtin7.MethodsInit
|
||||||
)
|
)
|
||||||
|
|
||||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||||
@ -80,6 +86,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
|||||||
case builtin6.InitActorCodeID:
|
case builtin6.InitActorCodeID:
|
||||||
return load6(store, act.Head)
|
return load6(store, act.Head)
|
||||||
|
|
||||||
|
case builtin7.InitActorCodeID:
|
||||||
|
return load7(store, act.Head)
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||||
}
|
}
|
||||||
@ -105,6 +114,9 @@ func MakeState(store adt.Store, av actors.Version, networkName string) (State, e
|
|||||||
case actors.Version6:
|
case actors.Version6:
|
||||||
return make6(store, networkName)
|
return make6(store, networkName)
|
||||||
|
|
||||||
|
case actors.Version7:
|
||||||
|
return make7(store, networkName)
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||||
}
|
}
|
||||||
@ -130,6 +142,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
|
|||||||
case actors.Version6:
|
case actors.Version6:
|
||||||
return builtin6.InitActorCodeID, nil
|
return builtin6.InitActorCodeID, nil
|
||||||
|
|
||||||
|
case actors.Version7:
|
||||||
|
return builtin7.InitActorCodeID, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
||||||
|
114
chain/actors/builtin/init/v7.go
Normal file
114
chain/actors/builtin/init/v7.go
Normal file
@ -0,0 +1,114 @@
|
|||||||
|
package init
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
cbg "github.com/whyrusleeping/cbor-gen"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||||
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
|
|
||||||
|
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
|
||||||
|
|
||||||
|
init7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/init"
|
||||||
|
adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ State = (*state7)(nil)
|
||||||
|
|
||||||
|
func load7(store adt.Store, root cid.Cid) (State, error) {
|
||||||
|
out := state7{store: store}
|
||||||
|
err := store.Get(store.Context(), root, &out)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func make7(store adt.Store, networkName string) (State, error) {
|
||||||
|
out := state7{store: store}
|
||||||
|
|
||||||
|
s, err := init7.ConstructState(store, networkName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
out.State = *s
|
||||||
|
|
||||||
|
return &out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type state7 struct {
|
||||||
|
init7.State
|
||||||
|
store adt.Store
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) ResolveAddress(address address.Address) (address.Address, bool, error) {
|
||||||
|
return s.State.ResolveAddress(s.store, address)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) MapAddressToNewID(address address.Address) (address.Address, error) {
|
||||||
|
return s.State.MapAddressToNewID(s.store, address)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error {
|
||||||
|
addrs, err := adt7.AsMap(s.store, s.State.AddressMap, builtin7.DefaultHamtBitwidth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var actorID cbg.CborInt
|
||||||
|
return addrs.ForEach(&actorID, func(key string) error {
|
||||||
|
addr, err := address.NewFromBytes([]byte(key))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return cb(abi.ActorID(actorID), addr)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) NetworkName() (dtypes.NetworkName, error) {
|
||||||
|
return dtypes.NetworkName(s.State.NetworkName), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) SetNetworkName(name string) error {
|
||||||
|
s.State.NetworkName = name
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) SetNextID(id abi.ActorID) error {
|
||||||
|
s.State.NextID = id
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) Remove(addrs ...address.Address) (err error) {
|
||||||
|
m, err := adt7.AsMap(s.store, s.State.AddressMap, builtin7.DefaultHamtBitwidth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, addr := range addrs {
|
||||||
|
if err = m.Delete(abi.AddrKey(addr)); err != nil {
|
||||||
|
return xerrors.Errorf("failed to delete entry for address: %s; err: %w", addr, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
amr, err := m.Root()
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to get address map root: %w", err)
|
||||||
|
}
|
||||||
|
s.State.AddressMap = amr
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) SetAddressMap(mcid cid.Cid) error {
|
||||||
|
s.State.AddressMap = mcid
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) AddressMap() (adt.Map, error) {
|
||||||
|
return adt7.AsMap(s.store, s.State.AddressMap, builtin7.DefaultHamtBitwidth)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) GetState() interface{} {
|
||||||
|
return &s.State
|
||||||
|
}
|
@ -25,6 +25,8 @@ import (
|
|||||||
|
|
||||||
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||||
|
|
||||||
|
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors"
|
"github.com/filecoin-project/lotus/chain/actors"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||||
@ -56,11 +58,15 @@ func init() {
|
|||||||
builtin.RegisterActorState(builtin6.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
builtin.RegisterActorState(builtin6.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||||
return load6(store, root)
|
return load6(store, root)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
builtin.RegisterActorState(builtin7.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||||
|
return load7(store, root)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
Address = builtin6.StorageMarketActorAddr
|
Address = builtin7.StorageMarketActorAddr
|
||||||
Methods = builtin6.MethodsMarket
|
Methods = builtin7.MethodsMarket
|
||||||
)
|
)
|
||||||
|
|
||||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||||
@ -84,6 +90,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
|||||||
case builtin6.StorageMarketActorCodeID:
|
case builtin6.StorageMarketActorCodeID:
|
||||||
return load6(store, act.Head)
|
return load6(store, act.Head)
|
||||||
|
|
||||||
|
case builtin7.StorageMarketActorCodeID:
|
||||||
|
return load7(store, act.Head)
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||||
}
|
}
|
||||||
@ -109,6 +118,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) {
|
|||||||
case actors.Version6:
|
case actors.Version6:
|
||||||
return make6(store)
|
return make6(store)
|
||||||
|
|
||||||
|
case actors.Version7:
|
||||||
|
return make7(store)
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||||
}
|
}
|
||||||
@ -134,6 +146,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
|
|||||||
case actors.Version6:
|
case actors.Version6:
|
||||||
return builtin6.StorageMarketActorCodeID, nil
|
return builtin6.StorageMarketActorCodeID, nil
|
||||||
|
|
||||||
|
case actors.Version7:
|
||||||
|
return builtin7.StorageMarketActorCodeID, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
||||||
@ -211,6 +226,9 @@ func DecodePublishStorageDealsReturn(b []byte, nv network.Version) (PublishStora
|
|||||||
case actors.Version6:
|
case actors.Version6:
|
||||||
return decodePublishStorageDealsReturn6(b)
|
return decodePublishStorageDealsReturn6(b)
|
||||||
|
|
||||||
|
case actors.Version7:
|
||||||
|
return decodePublishStorageDealsReturn7(b)
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||||
}
|
}
|
||||||
|
252
chain/actors/builtin/market/v7.go
Normal file
252
chain/actors/builtin/market/v7.go
Normal file
@ -0,0 +1,252 @@
|
|||||||
|
package market
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
cbg "github.com/whyrusleeping/cbor-gen"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
|
||||||
|
market7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/market"
|
||||||
|
adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ State = (*state7)(nil)
|
||||||
|
|
||||||
|
func load7(store adt.Store, root cid.Cid) (State, error) {
|
||||||
|
out := state7{store: store}
|
||||||
|
err := store.Get(store.Context(), root, &out)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func make7(store adt.Store) (State, error) {
|
||||||
|
out := state7{store: store}
|
||||||
|
|
||||||
|
s, err := market7.ConstructState(store)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
out.State = *s
|
||||||
|
|
||||||
|
return &out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type state7 struct {
|
||||||
|
market7.State
|
||||||
|
store adt.Store
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) TotalLocked() (abi.TokenAmount, error) {
|
||||||
|
fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral)
|
||||||
|
fml = types.BigAdd(fml, s.TotalClientStorageFee)
|
||||||
|
return fml, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) BalancesChanged(otherState State) (bool, error) {
|
||||||
|
otherState7, ok := otherState.(*state7)
|
||||||
|
if !ok {
|
||||||
|
// there's no way to compare different versions of the state, so let's
|
||||||
|
// just say that means the state of balances has changed
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return !s.State.EscrowTable.Equals(otherState7.State.EscrowTable) || !s.State.LockedTable.Equals(otherState7.State.LockedTable), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) StatesChanged(otherState State) (bool, error) {
|
||||||
|
otherState7, ok := otherState.(*state7)
|
||||||
|
if !ok {
|
||||||
|
// there's no way to compare different versions of the state, so let's
|
||||||
|
// just say that means the state of balances has changed
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return !s.State.States.Equals(otherState7.State.States), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) States() (DealStates, error) {
|
||||||
|
stateArray, err := adt7.AsArray(s.store, s.State.States, market7.StatesAmtBitwidth)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &dealStates7{stateArray}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) ProposalsChanged(otherState State) (bool, error) {
|
||||||
|
otherState7, ok := otherState.(*state7)
|
||||||
|
if !ok {
|
||||||
|
// there's no way to compare different versions of the state, so let's
|
||||||
|
// just say that means the state of balances has changed
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return !s.State.Proposals.Equals(otherState7.State.Proposals), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) Proposals() (DealProposals, error) {
|
||||||
|
proposalArray, err := adt7.AsArray(s.store, s.State.Proposals, market7.ProposalsAmtBitwidth)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &dealProposals7{proposalArray}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) EscrowTable() (BalanceTable, error) {
|
||||||
|
bt, err := adt7.AsBalanceTable(s.store, s.State.EscrowTable)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &balanceTable7{bt}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) LockedTable() (BalanceTable, error) {
|
||||||
|
bt, err := adt7.AsBalanceTable(s.store, s.State.LockedTable)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &balanceTable7{bt}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) VerifyDealsForActivation(
|
||||||
|
minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch,
|
||||||
|
) (weight, verifiedWeight abi.DealWeight, err error) {
|
||||||
|
w, vw, _, err := market7.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch)
|
||||||
|
return w, vw, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) NextID() (abi.DealID, error) {
|
||||||
|
return s.State.NextID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type balanceTable7 struct {
|
||||||
|
*adt7.BalanceTable
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bt *balanceTable7) ForEach(cb func(address.Address, abi.TokenAmount) error) error {
|
||||||
|
asMap := (*adt7.Map)(bt.BalanceTable)
|
||||||
|
var ta abi.TokenAmount
|
||||||
|
return asMap.ForEach(&ta, func(key string) error {
|
||||||
|
a, err := address.NewFromBytes([]byte(key))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return cb(a, ta)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type dealStates7 struct {
|
||||||
|
adt.Array
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *dealStates7) Get(dealID abi.DealID) (*DealState, bool, error) {
|
||||||
|
var deal7 market7.DealState
|
||||||
|
found, err := s.Array.Get(uint64(dealID), &deal7)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
return nil, false, nil
|
||||||
|
}
|
||||||
|
deal := fromV7DealState(deal7)
|
||||||
|
return &deal, true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *dealStates7) ForEach(cb func(dealID abi.DealID, ds DealState) error) error {
|
||||||
|
var ds7 market7.DealState
|
||||||
|
return s.Array.ForEach(&ds7, func(idx int64) error {
|
||||||
|
return cb(abi.DealID(idx), fromV7DealState(ds7))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *dealStates7) decode(val *cbg.Deferred) (*DealState, error) {
|
||||||
|
var ds7 market7.DealState
|
||||||
|
if err := ds7.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ds := fromV7DealState(ds7)
|
||||||
|
return &ds, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *dealStates7) array() adt.Array {
|
||||||
|
return s.Array
|
||||||
|
}
|
||||||
|
|
||||||
|
func fromV7DealState(v7 market7.DealState) DealState {
|
||||||
|
return (DealState)(v7)
|
||||||
|
}
|
||||||
|
|
||||||
|
type dealProposals7 struct {
|
||||||
|
adt.Array
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *dealProposals7) Get(dealID abi.DealID) (*DealProposal, bool, error) {
|
||||||
|
var proposal7 market7.DealProposal
|
||||||
|
found, err := s.Array.Get(uint64(dealID), &proposal7)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
return nil, false, nil
|
||||||
|
}
|
||||||
|
proposal := fromV7DealProposal(proposal7)
|
||||||
|
return &proposal, true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *dealProposals7) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error {
|
||||||
|
var dp7 market7.DealProposal
|
||||||
|
return s.Array.ForEach(&dp7, func(idx int64) error {
|
||||||
|
return cb(abi.DealID(idx), fromV7DealProposal(dp7))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *dealProposals7) decode(val *cbg.Deferred) (*DealProposal, error) {
|
||||||
|
var dp7 market7.DealProposal
|
||||||
|
if err := dp7.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
dp := fromV7DealProposal(dp7)
|
||||||
|
return &dp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *dealProposals7) array() adt.Array {
|
||||||
|
return s.Array
|
||||||
|
}
|
||||||
|
|
||||||
|
func fromV7DealProposal(v7 market7.DealProposal) DealProposal {
|
||||||
|
return (DealProposal)(v7)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) GetState() interface{} {
|
||||||
|
return &s.State
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ PublishStorageDealsReturn = (*publishStorageDealsReturn7)(nil)
|
||||||
|
|
||||||
|
func decodePublishStorageDealsReturn7(b []byte) (PublishStorageDealsReturn, error) {
|
||||||
|
var retval market7.PublishStorageDealsReturn
|
||||||
|
if err := retval.UnmarshalCBOR(bytes.NewReader(b)); err != nil {
|
||||||
|
return nil, xerrors.Errorf("failed to unmarshal PublishStorageDealsReturn: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &publishStorageDealsReturn7{retval}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type publishStorageDealsReturn7 struct {
|
||||||
|
market7.PublishStorageDealsReturn
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *publishStorageDealsReturn7) IsDealValid(index uint64) (bool, error) {
|
||||||
|
|
||||||
|
return r.ValidDeals.IsSet(index)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *publishStorageDealsReturn7) DealIDs() ([]abi.DealID, error) {
|
||||||
|
return r.IDs, nil
|
||||||
|
}
|
@ -177,6 +177,7 @@ type SectorOnChainInfo struct {
|
|||||||
InitialPledge abi.TokenAmount
|
InitialPledge abi.TokenAmount
|
||||||
ExpectedDayReward abi.TokenAmount
|
ExpectedDayReward abi.TokenAmount
|
||||||
ExpectedStoragePledge abi.TokenAmount
|
ExpectedStoragePledge abi.TokenAmount
|
||||||
|
SectorKeyCID *cid.Cid
|
||||||
}
|
}
|
||||||
|
|
||||||
type SectorPreCommitInfo = miner0.SectorPreCommitInfo
|
type SectorPreCommitInfo = miner0.SectorPreCommitInfo
|
||||||
|
@ -35,6 +35,8 @@ import (
|
|||||||
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
|
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
|
||||||
|
|
||||||
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||||
|
|
||||||
|
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -63,9 +65,13 @@ func init() {
|
|||||||
return load6(store, root)
|
return load6(store, root)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
builtin.RegisterActorState(builtin7.StorageMinerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||||
|
return load7(store, root)
|
||||||
|
})
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var Methods = builtin6.MethodsMiner
|
var Methods = builtin7.MethodsMiner
|
||||||
|
|
||||||
// Unchanged between v0, v2, v3, v4, and v5 actors
|
// Unchanged between v0, v2, v3, v4, and v5 actors
|
||||||
var WPoStProvingPeriod = miner0.WPoStProvingPeriod
|
var WPoStProvingPeriod = miner0.WPoStProvingPeriod
|
||||||
@ -102,6 +108,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
|||||||
case builtin6.StorageMinerActorCodeID:
|
case builtin6.StorageMinerActorCodeID:
|
||||||
return load6(store, act.Head)
|
return load6(store, act.Head)
|
||||||
|
|
||||||
|
case builtin7.StorageMinerActorCodeID:
|
||||||
|
return load7(store, act.Head)
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||||
}
|
}
|
||||||
@ -127,6 +136,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) {
|
|||||||
case actors.Version6:
|
case actors.Version6:
|
||||||
return make6(store)
|
return make6(store)
|
||||||
|
|
||||||
|
case actors.Version7:
|
||||||
|
return make7(store)
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||||
}
|
}
|
||||||
@ -152,6 +164,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
|
|||||||
case actors.Version6:
|
case actors.Version6:
|
||||||
return builtin6.StorageMinerActorCodeID, nil
|
return builtin6.StorageMinerActorCodeID, nil
|
||||||
|
|
||||||
|
case actors.Version7:
|
||||||
|
return builtin7.StorageMinerActorCodeID, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
||||||
@ -251,6 +266,7 @@ type SectorOnChainInfo struct {
|
|||||||
InitialPledge abi.TokenAmount
|
InitialPledge abi.TokenAmount
|
||||||
ExpectedDayReward abi.TokenAmount
|
ExpectedDayReward abi.TokenAmount
|
||||||
ExpectedStoragePledge abi.TokenAmount
|
ExpectedStoragePledge abi.TokenAmount
|
||||||
|
SectorKeyCID *cid.Cid
|
||||||
}
|
}
|
||||||
|
|
||||||
type SectorPreCommitInfo = miner0.SectorPreCommitInfo
|
type SectorPreCommitInfo = miner0.SectorPreCommitInfo
|
||||||
|
@ -138,11 +138,22 @@ func (s *state{{.v}}) GetSectorExpiration(num abi.SectorNumber) (*SectorExpirati
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// NOTE: this can be optimized significantly.
|
// NOTE: this can be optimized significantly.
|
||||||
// 1. If the sector is non-faulty, it will either expire on-time (can be
|
{{if (ge .v 7) -}}
|
||||||
|
// 1. If the sector is non-faulty, it will expire on-time (can be
|
||||||
|
// learned from the sector info).
|
||||||
|
{{- else -}}
|
||||||
|
// 1. If the sector is non-faulty, it will either expire on-time (can be
|
||||||
// learned from the sector info), or in the next quantized expiration
|
// learned from the sector info), or in the next quantized expiration
|
||||||
// epoch (i.e., the first element in the partition's expiration queue.
|
// epoch (i.e., the first element in the partition's expiration queue.
|
||||||
|
{{- end}}
|
||||||
|
{{if (ge .v 6) -}}
|
||||||
|
// 2. If it's faulty, it will expire early within the first 42 entries
|
||||||
|
// of the expiration queue.
|
||||||
|
{{- else -}}
|
||||||
// 2. If it's faulty, it will expire early within the first 14 entries
|
// 2. If it's faulty, it will expire early within the first 14 entries
|
||||||
// of the expiration queue.
|
// of the expiration queue.
|
||||||
|
{{- end}}
|
||||||
|
|
||||||
stopErr := errors.New("stop")
|
stopErr := errors.New("stop")
|
||||||
out := SectorExpiration{}
|
out := SectorExpiration{}
|
||||||
err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner{{.v}}.Deadline) error {
|
err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner{{.v}}.Deadline) error {
|
||||||
@ -554,8 +565,7 @@ func (p *partition{{.v}}) UnprovenSectors() (bitfield.BitField, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func fromV{{.v}}SectorOnChainInfo(v{{.v}} miner{{.v}}.SectorOnChainInfo) SectorOnChainInfo {
|
func fromV{{.v}}SectorOnChainInfo(v{{.v}} miner{{.v}}.SectorOnChainInfo) SectorOnChainInfo {
|
||||||
{{if (ge .v 2)}}
|
info := SectorOnChainInfo{
|
||||||
return SectorOnChainInfo{
|
|
||||||
SectorNumber: v{{.v}}.SectorNumber,
|
SectorNumber: v{{.v}}.SectorNumber,
|
||||||
SealProof: v{{.v}}.SealProof,
|
SealProof: v{{.v}}.SealProof,
|
||||||
SealedCID: v{{.v}}.SealedCID,
|
SealedCID: v{{.v}}.SealedCID,
|
||||||
@ -567,10 +577,11 @@ func fromV{{.v}}SectorOnChainInfo(v{{.v}} miner{{.v}}.SectorOnChainInfo) SectorO
|
|||||||
InitialPledge: v{{.v}}.InitialPledge,
|
InitialPledge: v{{.v}}.InitialPledge,
|
||||||
ExpectedDayReward: v{{.v}}.ExpectedDayReward,
|
ExpectedDayReward: v{{.v}}.ExpectedDayReward,
|
||||||
ExpectedStoragePledge: v{{.v}}.ExpectedStoragePledge,
|
ExpectedStoragePledge: v{{.v}}.ExpectedStoragePledge,
|
||||||
|
{{if (ge .v 7)}}
|
||||||
|
SectorKeyCID: v{{.v}}.SectorKeyCID,
|
||||||
|
{{end}}
|
||||||
}
|
}
|
||||||
{{else}}
|
return info
|
||||||
return (SectorOnChainInfo)(v0)
|
|
||||||
{{end}}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func fromV{{.v}}SectorPreCommitOnChainInfo(v{{.v}} miner{{.v}}.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {
|
func fromV{{.v}}SectorPreCommitOnChainInfo(v{{.v}} miner{{.v}}.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {
|
||||||
|
@ -67,3 +67,22 @@ func SealProofTypeFromSectorSize(ssize abi.SectorSize, nv network.Version) (abi.
|
|||||||
|
|
||||||
return 0, xerrors.Errorf("unsupported network version")
|
return 0, xerrors.Errorf("unsupported network version")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WindowPoStProofTypeFromSectorSize returns preferred post proof type for creating
|
||||||
|
// new miner actors and new sectors
|
||||||
|
func WindowPoStProofTypeFromSectorSize(ssize abi.SectorSize) (abi.RegisteredPoStProof, error) {
|
||||||
|
switch ssize {
|
||||||
|
case 2 << 10:
|
||||||
|
return abi.RegisteredPoStProof_StackedDrgWindow2KiBV1, nil
|
||||||
|
case 8 << 20:
|
||||||
|
return abi.RegisteredPoStProof_StackedDrgWindow8MiBV1, nil
|
||||||
|
case 512 << 20:
|
||||||
|
return abi.RegisteredPoStProof_StackedDrgWindow512MiBV1, nil
|
||||||
|
case 32 << 30:
|
||||||
|
return abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, nil
|
||||||
|
case 64 << 30:
|
||||||
|
return abi.RegisteredPoStProof_StackedDrgWindow64GiBV1, nil
|
||||||
|
default:
|
||||||
|
return 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -140,6 +140,7 @@ func (s *state0) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, e
|
|||||||
// epoch (i.e., the first element in the partition's expiration queue.
|
// epoch (i.e., the first element in the partition's expiration queue.
|
||||||
// 2. If it's faulty, it will expire early within the first 14 entries
|
// 2. If it's faulty, it will expire early within the first 14 entries
|
||||||
// of the expiration queue.
|
// of the expiration queue.
|
||||||
|
|
||||||
stopErr := errors.New("stop")
|
stopErr := errors.New("stop")
|
||||||
out := SectorExpiration{}
|
out := SectorExpiration{}
|
||||||
err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner0.Deadline) error {
|
err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner0.Deadline) error {
|
||||||
@ -505,9 +506,20 @@ func (p *partition0) UnprovenSectors() (bitfield.BitField, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func fromV0SectorOnChainInfo(v0 miner0.SectorOnChainInfo) SectorOnChainInfo {
|
func fromV0SectorOnChainInfo(v0 miner0.SectorOnChainInfo) SectorOnChainInfo {
|
||||||
|
info := SectorOnChainInfo{
|
||||||
return (SectorOnChainInfo)(v0)
|
SectorNumber: v0.SectorNumber,
|
||||||
|
SealProof: v0.SealProof,
|
||||||
|
SealedCID: v0.SealedCID,
|
||||||
|
DealIDs: v0.DealIDs,
|
||||||
|
Activation: v0.Activation,
|
||||||
|
Expiration: v0.Expiration,
|
||||||
|
DealWeight: v0.DealWeight,
|
||||||
|
VerifiedDealWeight: v0.VerifiedDealWeight,
|
||||||
|
InitialPledge: v0.InitialPledge,
|
||||||
|
ExpectedDayReward: v0.ExpectedDayReward,
|
||||||
|
ExpectedStoragePledge: v0.ExpectedStoragePledge,
|
||||||
|
}
|
||||||
|
return info
|
||||||
}
|
}
|
||||||
|
|
||||||
func fromV0SectorPreCommitOnChainInfo(v0 miner0.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {
|
func fromV0SectorPreCommitOnChainInfo(v0 miner0.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {
|
||||||
|
@ -138,6 +138,7 @@ func (s *state2) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, e
|
|||||||
// epoch (i.e., the first element in the partition's expiration queue.
|
// epoch (i.e., the first element in the partition's expiration queue.
|
||||||
// 2. If it's faulty, it will expire early within the first 14 entries
|
// 2. If it's faulty, it will expire early within the first 14 entries
|
||||||
// of the expiration queue.
|
// of the expiration queue.
|
||||||
|
|
||||||
stopErr := errors.New("stop")
|
stopErr := errors.New("stop")
|
||||||
out := SectorExpiration{}
|
out := SectorExpiration{}
|
||||||
err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner2.Deadline) error {
|
err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner2.Deadline) error {
|
||||||
@ -535,8 +536,7 @@ func (p *partition2) UnprovenSectors() (bitfield.BitField, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func fromV2SectorOnChainInfo(v2 miner2.SectorOnChainInfo) SectorOnChainInfo {
|
func fromV2SectorOnChainInfo(v2 miner2.SectorOnChainInfo) SectorOnChainInfo {
|
||||||
|
info := SectorOnChainInfo{
|
||||||
return SectorOnChainInfo{
|
|
||||||
SectorNumber: v2.SectorNumber,
|
SectorNumber: v2.SectorNumber,
|
||||||
SealProof: v2.SealProof,
|
SealProof: v2.SealProof,
|
||||||
SealedCID: v2.SealedCID,
|
SealedCID: v2.SealedCID,
|
||||||
@ -549,7 +549,7 @@ func fromV2SectorOnChainInfo(v2 miner2.SectorOnChainInfo) SectorOnChainInfo {
|
|||||||
ExpectedDayReward: v2.ExpectedDayReward,
|
ExpectedDayReward: v2.ExpectedDayReward,
|
||||||
ExpectedStoragePledge: v2.ExpectedStoragePledge,
|
ExpectedStoragePledge: v2.ExpectedStoragePledge,
|
||||||
}
|
}
|
||||||
|
return info
|
||||||
}
|
}
|
||||||
|
|
||||||
func fromV2SectorPreCommitOnChainInfo(v2 miner2.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {
|
func fromV2SectorPreCommitOnChainInfo(v2 miner2.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {
|
||||||
|
@ -140,6 +140,7 @@ func (s *state3) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, e
|
|||||||
// epoch (i.e., the first element in the partition's expiration queue.
|
// epoch (i.e., the first element in the partition's expiration queue.
|
||||||
// 2. If it's faulty, it will expire early within the first 14 entries
|
// 2. If it's faulty, it will expire early within the first 14 entries
|
||||||
// of the expiration queue.
|
// of the expiration queue.
|
||||||
|
|
||||||
stopErr := errors.New("stop")
|
stopErr := errors.New("stop")
|
||||||
out := SectorExpiration{}
|
out := SectorExpiration{}
|
||||||
err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner3.Deadline) error {
|
err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner3.Deadline) error {
|
||||||
@ -536,8 +537,7 @@ func (p *partition3) UnprovenSectors() (bitfield.BitField, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func fromV3SectorOnChainInfo(v3 miner3.SectorOnChainInfo) SectorOnChainInfo {
|
func fromV3SectorOnChainInfo(v3 miner3.SectorOnChainInfo) SectorOnChainInfo {
|
||||||
|
info := SectorOnChainInfo{
|
||||||
return SectorOnChainInfo{
|
|
||||||
SectorNumber: v3.SectorNumber,
|
SectorNumber: v3.SectorNumber,
|
||||||
SealProof: v3.SealProof,
|
SealProof: v3.SealProof,
|
||||||
SealedCID: v3.SealedCID,
|
SealedCID: v3.SealedCID,
|
||||||
@ -550,7 +550,7 @@ func fromV3SectorOnChainInfo(v3 miner3.SectorOnChainInfo) SectorOnChainInfo {
|
|||||||
ExpectedDayReward: v3.ExpectedDayReward,
|
ExpectedDayReward: v3.ExpectedDayReward,
|
||||||
ExpectedStoragePledge: v3.ExpectedStoragePledge,
|
ExpectedStoragePledge: v3.ExpectedStoragePledge,
|
||||||
}
|
}
|
||||||
|
return info
|
||||||
}
|
}
|
||||||
|
|
||||||
func fromV3SectorPreCommitOnChainInfo(v3 miner3.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {
|
func fromV3SectorPreCommitOnChainInfo(v3 miner3.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {
|
||||||
|
@ -140,6 +140,7 @@ func (s *state4) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, e
|
|||||||
// epoch (i.e., the first element in the partition's expiration queue.
|
// epoch (i.e., the first element in the partition's expiration queue.
|
||||||
// 2. If it's faulty, it will expire early within the first 14 entries
|
// 2. If it's faulty, it will expire early within the first 14 entries
|
||||||
// of the expiration queue.
|
// of the expiration queue.
|
||||||
|
|
||||||
stopErr := errors.New("stop")
|
stopErr := errors.New("stop")
|
||||||
out := SectorExpiration{}
|
out := SectorExpiration{}
|
||||||
err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner4.Deadline) error {
|
err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner4.Deadline) error {
|
||||||
@ -536,8 +537,7 @@ func (p *partition4) UnprovenSectors() (bitfield.BitField, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func fromV4SectorOnChainInfo(v4 miner4.SectorOnChainInfo) SectorOnChainInfo {
|
func fromV4SectorOnChainInfo(v4 miner4.SectorOnChainInfo) SectorOnChainInfo {
|
||||||
|
info := SectorOnChainInfo{
|
||||||
return SectorOnChainInfo{
|
|
||||||
SectorNumber: v4.SectorNumber,
|
SectorNumber: v4.SectorNumber,
|
||||||
SealProof: v4.SealProof,
|
SealProof: v4.SealProof,
|
||||||
SealedCID: v4.SealedCID,
|
SealedCID: v4.SealedCID,
|
||||||
@ -550,7 +550,7 @@ func fromV4SectorOnChainInfo(v4 miner4.SectorOnChainInfo) SectorOnChainInfo {
|
|||||||
ExpectedDayReward: v4.ExpectedDayReward,
|
ExpectedDayReward: v4.ExpectedDayReward,
|
||||||
ExpectedStoragePledge: v4.ExpectedStoragePledge,
|
ExpectedStoragePledge: v4.ExpectedStoragePledge,
|
||||||
}
|
}
|
||||||
|
return info
|
||||||
}
|
}
|
||||||
|
|
||||||
func fromV4SectorPreCommitOnChainInfo(v4 miner4.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {
|
func fromV4SectorPreCommitOnChainInfo(v4 miner4.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {
|
||||||
|
@ -140,6 +140,7 @@ func (s *state5) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, e
|
|||||||
// epoch (i.e., the first element in the partition's expiration queue.
|
// epoch (i.e., the first element in the partition's expiration queue.
|
||||||
// 2. If it's faulty, it will expire early within the first 14 entries
|
// 2. If it's faulty, it will expire early within the first 14 entries
|
||||||
// of the expiration queue.
|
// of the expiration queue.
|
||||||
|
|
||||||
stopErr := errors.New("stop")
|
stopErr := errors.New("stop")
|
||||||
out := SectorExpiration{}
|
out := SectorExpiration{}
|
||||||
err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner5.Deadline) error {
|
err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner5.Deadline) error {
|
||||||
@ -536,8 +537,7 @@ func (p *partition5) UnprovenSectors() (bitfield.BitField, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func fromV5SectorOnChainInfo(v5 miner5.SectorOnChainInfo) SectorOnChainInfo {
|
func fromV5SectorOnChainInfo(v5 miner5.SectorOnChainInfo) SectorOnChainInfo {
|
||||||
|
info := SectorOnChainInfo{
|
||||||
return SectorOnChainInfo{
|
|
||||||
SectorNumber: v5.SectorNumber,
|
SectorNumber: v5.SectorNumber,
|
||||||
SealProof: v5.SealProof,
|
SealProof: v5.SealProof,
|
||||||
SealedCID: v5.SealedCID,
|
SealedCID: v5.SealedCID,
|
||||||
@ -550,7 +550,7 @@ func fromV5SectorOnChainInfo(v5 miner5.SectorOnChainInfo) SectorOnChainInfo {
|
|||||||
ExpectedDayReward: v5.ExpectedDayReward,
|
ExpectedDayReward: v5.ExpectedDayReward,
|
||||||
ExpectedStoragePledge: v5.ExpectedStoragePledge,
|
ExpectedStoragePledge: v5.ExpectedStoragePledge,
|
||||||
}
|
}
|
||||||
|
return info
|
||||||
}
|
}
|
||||||
|
|
||||||
func fromV5SectorPreCommitOnChainInfo(v5 miner5.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {
|
func fromV5SectorPreCommitOnChainInfo(v5 miner5.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {
|
||||||
|
@ -138,8 +138,9 @@ func (s *state6) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, e
|
|||||||
// 1. If the sector is non-faulty, it will either expire on-time (can be
|
// 1. If the sector is non-faulty, it will either expire on-time (can be
|
||||||
// learned from the sector info), or in the next quantized expiration
|
// learned from the sector info), or in the next quantized expiration
|
||||||
// epoch (i.e., the first element in the partition's expiration queue.
|
// epoch (i.e., the first element in the partition's expiration queue.
|
||||||
// 2. If it's faulty, it will expire early within the first 14 entries
|
// 2. If it's faulty, it will expire early within the first 42 entries
|
||||||
// of the expiration queue.
|
// of the expiration queue.
|
||||||
|
|
||||||
stopErr := errors.New("stop")
|
stopErr := errors.New("stop")
|
||||||
out := SectorExpiration{}
|
out := SectorExpiration{}
|
||||||
err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner6.Deadline) error {
|
err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner6.Deadline) error {
|
||||||
@ -536,8 +537,7 @@ func (p *partition6) UnprovenSectors() (bitfield.BitField, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func fromV6SectorOnChainInfo(v6 miner6.SectorOnChainInfo) SectorOnChainInfo {
|
func fromV6SectorOnChainInfo(v6 miner6.SectorOnChainInfo) SectorOnChainInfo {
|
||||||
|
info := SectorOnChainInfo{
|
||||||
return SectorOnChainInfo{
|
|
||||||
SectorNumber: v6.SectorNumber,
|
SectorNumber: v6.SectorNumber,
|
||||||
SealProof: v6.SealProof,
|
SealProof: v6.SealProof,
|
||||||
SealedCID: v6.SealedCID,
|
SealedCID: v6.SealedCID,
|
||||||
@ -550,7 +550,7 @@ func fromV6SectorOnChainInfo(v6 miner6.SectorOnChainInfo) SectorOnChainInfo {
|
|||||||
ExpectedDayReward: v6.ExpectedDayReward,
|
ExpectedDayReward: v6.ExpectedDayReward,
|
||||||
ExpectedStoragePledge: v6.ExpectedStoragePledge,
|
ExpectedStoragePledge: v6.ExpectedStoragePledge,
|
||||||
}
|
}
|
||||||
|
return info
|
||||||
}
|
}
|
||||||
|
|
||||||
func fromV6SectorPreCommitOnChainInfo(v6 miner6.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {
|
func fromV6SectorPreCommitOnChainInfo(v6 miner6.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {
|
||||||
|
571
chain/actors/builtin/miner/v7.go
Normal file
571
chain/actors/builtin/miner/v7.go
Normal file
@ -0,0 +1,571 @@
|
|||||||
|
package miner
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-bitfield"
|
||||||
|
rle "github.com/filecoin-project/go-bitfield/rle"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/dline"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
"github.com/libp2p/go-libp2p-core/peer"
|
||||||
|
cbg "github.com/whyrusleeping/cbor-gen"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||||
|
|
||||||
|
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
|
||||||
|
|
||||||
|
miner7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/miner"
|
||||||
|
adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ State = (*state7)(nil)
|
||||||
|
|
||||||
|
func load7(store adt.Store, root cid.Cid) (State, error) {
|
||||||
|
out := state7{store: store}
|
||||||
|
err := store.Get(store.Context(), root, &out)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func make7(store adt.Store) (State, error) {
|
||||||
|
out := state7{store: store}
|
||||||
|
out.State = miner7.State{}
|
||||||
|
return &out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type state7 struct {
|
||||||
|
miner7.State
|
||||||
|
store adt.Store
|
||||||
|
}
|
||||||
|
|
||||||
|
type deadline7 struct {
|
||||||
|
miner7.Deadline
|
||||||
|
store adt.Store
|
||||||
|
}
|
||||||
|
|
||||||
|
type partition7 struct {
|
||||||
|
miner7.Partition
|
||||||
|
store adt.Store
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) {
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
err = xerrors.Errorf("failed to get available balance: %w", r)
|
||||||
|
available = abi.NewTokenAmount(0)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
// this panics if the miner doesnt have enough funds to cover their locked pledge
|
||||||
|
available, err = s.GetAvailableBalance(bal)
|
||||||
|
return available, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) {
|
||||||
|
return s.CheckVestedFunds(s.store, epoch)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) LockedFunds() (LockedFunds, error) {
|
||||||
|
return LockedFunds{
|
||||||
|
VestingFunds: s.State.LockedFunds,
|
||||||
|
InitialPledgeRequirement: s.State.InitialPledge,
|
||||||
|
PreCommitDeposits: s.State.PreCommitDeposits,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) FeeDebt() (abi.TokenAmount, error) {
|
||||||
|
return s.State.FeeDebt, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) InitialPledge() (abi.TokenAmount, error) {
|
||||||
|
return s.State.InitialPledge, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) PreCommitDeposits() (abi.TokenAmount, error) {
|
||||||
|
return s.State.PreCommitDeposits, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) {
|
||||||
|
info, ok, err := s.State.GetSector(s.store, num)
|
||||||
|
if !ok || err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ret := fromV7SectorOnChainInfo(*info)
|
||||||
|
return &ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) FindSector(num abi.SectorNumber) (*SectorLocation, error) {
|
||||||
|
dlIdx, partIdx, err := s.State.FindSector(s.store, num)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &SectorLocation{
|
||||||
|
Deadline: dlIdx,
|
||||||
|
Partition: partIdx,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) NumLiveSectors() (uint64, error) {
|
||||||
|
dls, err := s.State.LoadDeadlines(s.store)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
var total uint64
|
||||||
|
if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner7.Deadline) error {
|
||||||
|
total += dl.LiveSectors
|
||||||
|
return nil
|
||||||
|
}); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return total, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSectorExpiration returns the effective expiration of the given sector.
|
||||||
|
//
|
||||||
|
// If the sector does not expire early, the Early expiration field is 0.
|
||||||
|
func (s *state7) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) {
|
||||||
|
dls, err := s.State.LoadDeadlines(s.store)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// NOTE: this can be optimized significantly.
|
||||||
|
// 1. If the sector is non-faulty, it will expire on-time (can be
|
||||||
|
// learned from the sector info).
|
||||||
|
// 2. If it's faulty, it will expire early within the first 42 entries
|
||||||
|
// of the expiration queue.
|
||||||
|
|
||||||
|
stopErr := errors.New("stop")
|
||||||
|
out := SectorExpiration{}
|
||||||
|
err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner7.Deadline) error {
|
||||||
|
partitions, err := dl.PartitionsArray(s.store)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
quant := s.State.QuantSpecForDeadline(dlIdx)
|
||||||
|
var part miner7.Partition
|
||||||
|
return partitions.ForEach(&part, func(partIdx int64) error {
|
||||||
|
if found, err := part.Sectors.IsSet(uint64(num)); err != nil {
|
||||||
|
return err
|
||||||
|
} else if !found {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if found, err := part.Terminated.IsSet(uint64(num)); err != nil {
|
||||||
|
return err
|
||||||
|
} else if found {
|
||||||
|
// already terminated
|
||||||
|
return stopErr
|
||||||
|
}
|
||||||
|
|
||||||
|
q, err := miner7.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant, miner7.PartitionExpirationAmtBitwidth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var exp miner7.ExpirationSet
|
||||||
|
return q.ForEach(&exp, func(epoch int64) error {
|
||||||
|
if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil {
|
||||||
|
return err
|
||||||
|
} else if early {
|
||||||
|
out.Early = abi.ChainEpoch(epoch)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil {
|
||||||
|
return err
|
||||||
|
} else if onTime {
|
||||||
|
out.OnTime = abi.ChainEpoch(epoch)
|
||||||
|
return stopErr
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
if err == stopErr {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if out.Early == 0 && out.OnTime == 0 {
|
||||||
|
return nil, xerrors.Errorf("failed to find sector %d", num)
|
||||||
|
}
|
||||||
|
return &out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) {
|
||||||
|
info, ok, err := s.State.GetPrecommittedSector(s.store, num)
|
||||||
|
if !ok || err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ret := fromV7SectorPreCommitOnChainInfo(*info)
|
||||||
|
|
||||||
|
return &ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error {
|
||||||
|
precommitted, err := adt7.AsMap(s.store, s.State.PreCommittedSectors, builtin7.DefaultHamtBitwidth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var info miner7.SectorPreCommitOnChainInfo
|
||||||
|
if err := precommitted.ForEach(&info, func(_ string) error {
|
||||||
|
return cb(fromV7SectorPreCommitOnChainInfo(info))
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) {
|
||||||
|
sectors, err := miner7.LoadSectors(s.store, s.State.Sectors)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If no sector numbers are specified, load all.
|
||||||
|
if snos == nil {
|
||||||
|
infos := make([]*SectorOnChainInfo, 0, sectors.Length())
|
||||||
|
var info7 miner7.SectorOnChainInfo
|
||||||
|
if err := sectors.ForEach(&info7, func(_ int64) error {
|
||||||
|
info := fromV7SectorOnChainInfo(info7)
|
||||||
|
infos = append(infos, &info)
|
||||||
|
return nil
|
||||||
|
}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return infos, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise, load selected.
|
||||||
|
infos7, err := sectors.Load(*snos)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
infos := make([]*SectorOnChainInfo, len(infos7))
|
||||||
|
for i, info7 := range infos7 {
|
||||||
|
info := fromV7SectorOnChainInfo(*info7)
|
||||||
|
infos[i] = &info
|
||||||
|
}
|
||||||
|
return infos, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) loadAllocatedSectorNumbers() (bitfield.BitField, error) {
|
||||||
|
var allocatedSectors bitfield.BitField
|
||||||
|
err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors)
|
||||||
|
return allocatedSectors, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) IsAllocated(num abi.SectorNumber) (bool, error) {
|
||||||
|
allocatedSectors, err := s.loadAllocatedSectorNumbers()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return allocatedSectors.IsSet(uint64(num))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) GetProvingPeriodStart() (abi.ChainEpoch, error) {
|
||||||
|
return s.State.ProvingPeriodStart, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) {
|
||||||
|
allocatedSectors, err := s.loadAllocatedSectorNumbers()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
allocatedRuns, err := allocatedSectors.RunIterator()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
unallocatedRuns, err := rle.Subtract(
|
||||||
|
&rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}},
|
||||||
|
allocatedRuns,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
iter, err := rle.BitsFromRuns(unallocatedRuns)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
sectors := make([]abi.SectorNumber, 0, count)
|
||||||
|
for iter.HasNext() && len(sectors) < count {
|
||||||
|
nextNo, err := iter.Next()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
sectors = append(sectors, abi.SectorNumber(nextNo))
|
||||||
|
}
|
||||||
|
|
||||||
|
return sectors, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) GetAllocatedSectors() (*bitfield.BitField, error) {
|
||||||
|
var allocatedSectors bitfield.BitField
|
||||||
|
if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &allocatedSectors, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) LoadDeadline(idx uint64) (Deadline, error) {
|
||||||
|
dls, err := s.State.LoadDeadlines(s.store)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
dl, err := dls.LoadDeadline(s.store, idx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &deadline7{*dl, s.store}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) ForEachDeadline(cb func(uint64, Deadline) error) error {
|
||||||
|
dls, err := s.State.LoadDeadlines(s.store)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return dls.ForEach(s.store, func(i uint64, dl *miner7.Deadline) error {
|
||||||
|
return cb(i, &deadline7{*dl, s.store})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) NumDeadlines() (uint64, error) {
|
||||||
|
return miner7.WPoStPeriodDeadlines, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) DeadlinesChanged(other State) (bool, error) {
|
||||||
|
other7, ok := other.(*state7)
|
||||||
|
if !ok {
|
||||||
|
// treat an upgrade as a change, always
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return !s.State.Deadlines.Equals(other7.Deadlines), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) MinerInfoChanged(other State) (bool, error) {
|
||||||
|
other0, ok := other.(*state7)
|
||||||
|
if !ok {
|
||||||
|
// treat an upgrade as a change, always
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return !s.State.Info.Equals(other0.State.Info), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) Info() (MinerInfo, error) {
|
||||||
|
info, err := s.State.GetInfo(s.store)
|
||||||
|
if err != nil {
|
||||||
|
return MinerInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var pid *peer.ID
|
||||||
|
if peerID, err := peer.IDFromBytes(info.PeerId); err == nil {
|
||||||
|
pid = &peerID
|
||||||
|
}
|
||||||
|
|
||||||
|
mi := MinerInfo{
|
||||||
|
Owner: info.Owner,
|
||||||
|
Worker: info.Worker,
|
||||||
|
ControlAddresses: info.ControlAddresses,
|
||||||
|
|
||||||
|
NewWorker: address.Undef,
|
||||||
|
WorkerChangeEpoch: -1,
|
||||||
|
|
||||||
|
PeerId: pid,
|
||||||
|
Multiaddrs: info.Multiaddrs,
|
||||||
|
WindowPoStProofType: info.WindowPoStProofType,
|
||||||
|
SectorSize: info.SectorSize,
|
||||||
|
WindowPoStPartitionSectors: info.WindowPoStPartitionSectors,
|
||||||
|
ConsensusFaultElapsed: info.ConsensusFaultElapsed,
|
||||||
|
}
|
||||||
|
|
||||||
|
if info.PendingWorkerKey != nil {
|
||||||
|
mi.NewWorker = info.PendingWorkerKey.NewWorker
|
||||||
|
mi.WorkerChangeEpoch = info.PendingWorkerKey.EffectiveAt
|
||||||
|
}
|
||||||
|
|
||||||
|
return mi, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) {
|
||||||
|
return s.State.RecordedDeadlineInfo(epoch), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) DeadlineCronActive() (bool, error) {
|
||||||
|
return s.State.DeadlineCronActive, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) sectors() (adt.Array, error) {
|
||||||
|
return adt7.AsArray(s.store, s.Sectors, miner7.SectorsAmtBitwidth)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) {
|
||||||
|
var si miner7.SectorOnChainInfo
|
||||||
|
err := si.UnmarshalCBOR(bytes.NewReader(val.Raw))
|
||||||
|
if err != nil {
|
||||||
|
return SectorOnChainInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return fromV7SectorOnChainInfo(si), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) precommits() (adt.Map, error) {
|
||||||
|
return adt7.AsMap(s.store, s.PreCommittedSectors, builtin7.DefaultHamtBitwidth)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreCommitOnChainInfo, error) {
|
||||||
|
var sp miner7.SectorPreCommitOnChainInfo
|
||||||
|
err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw))
|
||||||
|
if err != nil {
|
||||||
|
return SectorPreCommitOnChainInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return fromV7SectorPreCommitOnChainInfo(sp), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) EraseAllUnproven() error {
|
||||||
|
|
||||||
|
dls, err := s.State.LoadDeadlines(s.store)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = dls.ForEach(s.store, func(dindx uint64, dl *miner7.Deadline) error {
|
||||||
|
ps, err := dl.PartitionsArray(s.store)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var part miner7.Partition
|
||||||
|
err = ps.ForEach(&part, func(pindx int64) error {
|
||||||
|
_ = part.ActivateUnproven()
|
||||||
|
err = ps.Set(uint64(pindx), &part)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
dl.Partitions, err = ps.Root()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return dls.UpdateDeadline(s.store, dindx, dl)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.State.SaveDeadlines(s.store, dls)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *deadline7) LoadPartition(idx uint64) (Partition, error) {
|
||||||
|
p, err := d.Deadline.LoadPartition(d.store, idx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &partition7{*p, d.store}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *deadline7) ForEachPartition(cb func(uint64, Partition) error) error {
|
||||||
|
ps, err := d.Deadline.PartitionsArray(d.store)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var part miner7.Partition
|
||||||
|
return ps.ForEach(&part, func(i int64) error {
|
||||||
|
return cb(uint64(i), &partition7{part, d.store})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *deadline7) PartitionsChanged(other Deadline) (bool, error) {
|
||||||
|
other7, ok := other.(*deadline7)
|
||||||
|
if !ok {
|
||||||
|
// treat an upgrade as a change, always
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return !d.Deadline.Partitions.Equals(other7.Deadline.Partitions), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *deadline7) PartitionsPoSted() (bitfield.BitField, error) {
|
||||||
|
return d.Deadline.PartitionsPoSted, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *deadline7) DisputableProofCount() (uint64, error) {
|
||||||
|
|
||||||
|
ops, err := d.OptimisticProofsSnapshotArray(d.store)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ops.Length(), nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *partition7) AllSectors() (bitfield.BitField, error) {
|
||||||
|
return p.Partition.Sectors, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *partition7) FaultySectors() (bitfield.BitField, error) {
|
||||||
|
return p.Partition.Faults, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *partition7) RecoveringSectors() (bitfield.BitField, error) {
|
||||||
|
return p.Partition.Recoveries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *partition7) UnprovenSectors() (bitfield.BitField, error) {
|
||||||
|
return p.Partition.Unproven, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func fromV7SectorOnChainInfo(v7 miner7.SectorOnChainInfo) SectorOnChainInfo {
|
||||||
|
info := SectorOnChainInfo{
|
||||||
|
SectorNumber: v7.SectorNumber,
|
||||||
|
SealProof: v7.SealProof,
|
||||||
|
SealedCID: v7.SealedCID,
|
||||||
|
DealIDs: v7.DealIDs,
|
||||||
|
Activation: v7.Activation,
|
||||||
|
Expiration: v7.Expiration,
|
||||||
|
DealWeight: v7.DealWeight,
|
||||||
|
VerifiedDealWeight: v7.VerifiedDealWeight,
|
||||||
|
InitialPledge: v7.InitialPledge,
|
||||||
|
ExpectedDayReward: v7.ExpectedDayReward,
|
||||||
|
ExpectedStoragePledge: v7.ExpectedStoragePledge,
|
||||||
|
|
||||||
|
SectorKeyCID: v7.SectorKeyCID,
|
||||||
|
}
|
||||||
|
return info
|
||||||
|
}
|
||||||
|
|
||||||
|
func fromV7SectorPreCommitOnChainInfo(v7 miner7.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {
|
||||||
|
|
||||||
|
return SectorPreCommitOnChainInfo{
|
||||||
|
Info: (SectorPreCommitInfo)(v7.Info),
|
||||||
|
PreCommitDeposit: v7.PreCommitDeposit,
|
||||||
|
PreCommitEpoch: v7.PreCommitEpoch,
|
||||||
|
DealWeight: v7.DealWeight,
|
||||||
|
VerifiedDealWeight: v7.VerifiedDealWeight,
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) GetState() interface{} {
|
||||||
|
return &s.State
|
||||||
|
}
|
71
chain/actors/builtin/multisig/message7.go
Normal file
71
chain/actors/builtin/multisig/message7.go
Normal file
@ -0,0 +1,71 @@
|
|||||||
|
package multisig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
|
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
|
||||||
|
init7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/init"
|
||||||
|
multisig7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/multisig"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors"
|
||||||
|
init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type message7 struct{ message0 }
|
||||||
|
|
||||||
|
func (m message7) Create(
|
||||||
|
signers []address.Address, threshold uint64,
|
||||||
|
unlockStart, unlockDuration abi.ChainEpoch,
|
||||||
|
initialAmount abi.TokenAmount,
|
||||||
|
) (*types.Message, error) {
|
||||||
|
|
||||||
|
lenAddrs := uint64(len(signers))
|
||||||
|
|
||||||
|
if lenAddrs < threshold {
|
||||||
|
return nil, xerrors.Errorf("cannot require signing of more addresses than provided for multisig")
|
||||||
|
}
|
||||||
|
|
||||||
|
if threshold == 0 {
|
||||||
|
threshold = lenAddrs
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.from == address.Undef {
|
||||||
|
return nil, xerrors.Errorf("must provide source address")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set up constructor parameters for multisig
|
||||||
|
msigParams := &multisig7.ConstructorParams{
|
||||||
|
Signers: signers,
|
||||||
|
NumApprovalsThreshold: threshold,
|
||||||
|
UnlockDuration: unlockDuration,
|
||||||
|
StartEpoch: unlockStart,
|
||||||
|
}
|
||||||
|
|
||||||
|
enc, actErr := actors.SerializeParams(msigParams)
|
||||||
|
if actErr != nil {
|
||||||
|
return nil, actErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// new actors are created by invoking 'exec' on the init actor with the constructor params
|
||||||
|
execParams := &init7.ExecParams{
|
||||||
|
CodeCID: builtin7.MultisigActorCodeID,
|
||||||
|
ConstructorParams: enc,
|
||||||
|
}
|
||||||
|
|
||||||
|
enc, actErr = actors.SerializeParams(execParams)
|
||||||
|
if actErr != nil {
|
||||||
|
return nil, actErr
|
||||||
|
}
|
||||||
|
|
||||||
|
return &types.Message{
|
||||||
|
To: init_.Address,
|
||||||
|
From: m.from,
|
||||||
|
Method: builtin7.MethodsInit.Exec,
|
||||||
|
Params: enc,
|
||||||
|
Value: initialAmount,
|
||||||
|
}, nil
|
||||||
|
}
|
@ -13,7 +13,7 @@ import (
|
|||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
|
|
||||||
msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
|
msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
|
||||||
msig6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/multisig"
|
msig7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/multisig"
|
||||||
|
|
||||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
|
|
||||||
@ -27,6 +27,8 @@ import (
|
|||||||
|
|
||||||
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||||
|
|
||||||
|
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors"
|
"github.com/filecoin-project/lotus/chain/actors"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||||
@ -58,6 +60,10 @@ func init() {
|
|||||||
builtin.RegisterActorState(builtin6.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
builtin.RegisterActorState(builtin6.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||||
return load6(store, root)
|
return load6(store, root)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
builtin.RegisterActorState(builtin7.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||||
|
return load7(store, root)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||||
@ -81,6 +87,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
|||||||
case builtin6.MultisigActorCodeID:
|
case builtin6.MultisigActorCodeID:
|
||||||
return load6(store, act.Head)
|
return load6(store, act.Head)
|
||||||
|
|
||||||
|
case builtin7.MultisigActorCodeID:
|
||||||
|
return load7(store, act.Head)
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||||
}
|
}
|
||||||
@ -106,6 +115,9 @@ func MakeState(store adt.Store, av actors.Version, signers []address.Address, th
|
|||||||
case actors.Version6:
|
case actors.Version6:
|
||||||
return make6(store, signers, threshold, startEpoch, unlockDuration, initialBalance)
|
return make6(store, signers, threshold, startEpoch, unlockDuration, initialBalance)
|
||||||
|
|
||||||
|
case actors.Version7:
|
||||||
|
return make7(store, signers, threshold, startEpoch, unlockDuration, initialBalance)
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||||
}
|
}
|
||||||
@ -131,6 +143,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
|
|||||||
case actors.Version6:
|
case actors.Version6:
|
||||||
return builtin6.MultisigActorCodeID, nil
|
return builtin6.MultisigActorCodeID, nil
|
||||||
|
|
||||||
|
case actors.Version7:
|
||||||
|
return builtin7.MultisigActorCodeID, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
||||||
@ -156,7 +171,7 @@ type State interface {
|
|||||||
|
|
||||||
type Transaction = msig0.Transaction
|
type Transaction = msig0.Transaction
|
||||||
|
|
||||||
var Methods = builtin6.MethodsMultisig
|
var Methods = builtin7.MethodsMultisig
|
||||||
|
|
||||||
func Message(version actors.Version, from address.Address) MessageBuilder {
|
func Message(version actors.Version, from address.Address) MessageBuilder {
|
||||||
switch version {
|
switch version {
|
||||||
@ -178,6 +193,9 @@ func Message(version actors.Version, from address.Address) MessageBuilder {
|
|||||||
|
|
||||||
case actors.Version6:
|
case actors.Version6:
|
||||||
return message6{message0{from}}
|
return message6{message0{from}}
|
||||||
|
|
||||||
|
case actors.Version7:
|
||||||
|
return message7{message0{from}}
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("unsupported actors version: %d", version))
|
panic(fmt.Sprintf("unsupported actors version: %d", version))
|
||||||
}
|
}
|
||||||
@ -201,13 +219,13 @@ type MessageBuilder interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// this type is the same between v0 and v2
|
// this type is the same between v0 and v2
|
||||||
type ProposalHashData = msig6.ProposalHashData
|
type ProposalHashData = msig7.ProposalHashData
|
||||||
type ProposeReturn = msig6.ProposeReturn
|
type ProposeReturn = msig7.ProposeReturn
|
||||||
type ProposeParams = msig6.ProposeParams
|
type ProposeParams = msig7.ProposeParams
|
||||||
type ApproveReturn = msig6.ApproveReturn
|
type ApproveReturn = msig7.ApproveReturn
|
||||||
|
|
||||||
func txnParams(id uint64, data *ProposalHashData) ([]byte, error) {
|
func txnParams(id uint64, data *ProposalHashData) ([]byte, error) {
|
||||||
params := msig6.TxnIDParams{ID: msig6.TxnID(id)}
|
params := msig7.TxnIDParams{ID: msig7.TxnID(id)}
|
||||||
if data != nil {
|
if data != nil {
|
||||||
if data.Requester.Protocol() != address.ID {
|
if data.Requester.Protocol() != address.ID {
|
||||||
return nil, xerrors.Errorf("proposer address must be an ID address, was %s", data.Requester)
|
return nil, xerrors.Errorf("proposer address must be an ID address, was %s", data.Requester)
|
||||||
|
119
chain/actors/builtin/multisig/v7.go
Normal file
119
chain/actors/builtin/multisig/v7.go
Normal file
@ -0,0 +1,119 @@
|
|||||||
|
package multisig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
|
||||||
|
adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
cbg "github.com/whyrusleeping/cbor-gen"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||||
|
|
||||||
|
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
|
||||||
|
|
||||||
|
msig7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/multisig"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ State = (*state7)(nil)
|
||||||
|
|
||||||
|
func load7(store adt.Store, root cid.Cid) (State, error) {
|
||||||
|
out := state7{store: store}
|
||||||
|
err := store.Get(store.Context(), root, &out)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func make7(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) {
|
||||||
|
out := state7{store: store}
|
||||||
|
out.State = msig7.State{}
|
||||||
|
out.State.Signers = signers
|
||||||
|
out.State.NumApprovalsThreshold = threshold
|
||||||
|
out.State.StartEpoch = startEpoch
|
||||||
|
out.State.UnlockDuration = unlockDuration
|
||||||
|
out.State.InitialBalance = initialBalance
|
||||||
|
|
||||||
|
em, err := adt7.StoreEmptyMap(store, builtin7.DefaultHamtBitwidth)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
out.State.PendingTxns = em
|
||||||
|
|
||||||
|
return &out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type state7 struct {
|
||||||
|
msig7.State
|
||||||
|
store adt.Store
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) {
|
||||||
|
return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) StartEpoch() (abi.ChainEpoch, error) {
|
||||||
|
return s.State.StartEpoch, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) UnlockDuration() (abi.ChainEpoch, error) {
|
||||||
|
return s.State.UnlockDuration, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) InitialBalance() (abi.TokenAmount, error) {
|
||||||
|
return s.State.InitialBalance, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) Threshold() (uint64, error) {
|
||||||
|
return s.State.NumApprovalsThreshold, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) Signers() ([]address.Address, error) {
|
||||||
|
return s.State.Signers, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error {
|
||||||
|
arr, err := adt7.AsMap(s.store, s.State.PendingTxns, builtin7.DefaultHamtBitwidth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var out msig7.Transaction
|
||||||
|
return arr.ForEach(&out, func(key string) error {
|
||||||
|
txid, n := binary.Varint([]byte(key))
|
||||||
|
if n <= 0 {
|
||||||
|
return xerrors.Errorf("invalid pending transaction key: %v", key)
|
||||||
|
}
|
||||||
|
return cb(txid, (Transaction)(out)) //nolint:unconvert
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) PendingTxnChanged(other State) (bool, error) {
|
||||||
|
other7, ok := other.(*state7)
|
||||||
|
if !ok {
|
||||||
|
// treat an upgrade as a change, always
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return !s.State.PendingTxns.Equals(other7.PendingTxns), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) transactions() (adt.Map, error) {
|
||||||
|
return adt7.AsMap(s.store, s.PendingTxns, builtin7.DefaultHamtBitwidth)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) decodeTransaction(val *cbg.Deferred) (Transaction, error) {
|
||||||
|
var tx msig7.Transaction
|
||||||
|
if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
|
||||||
|
return Transaction{}, err
|
||||||
|
}
|
||||||
|
return tx, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) GetState() interface{} {
|
||||||
|
return &s.State
|
||||||
|
}
|
74
chain/actors/builtin/paych/message7.go
Normal file
74
chain/actors/builtin/paych/message7.go
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
package paych
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
|
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
|
||||||
|
init7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/init"
|
||||||
|
paych7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/paych"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors"
|
||||||
|
init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type message7 struct{ from address.Address }
|
||||||
|
|
||||||
|
func (m message7) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) {
|
||||||
|
params, aerr := actors.SerializeParams(&paych7.ConstructorParams{From: m.from, To: to})
|
||||||
|
if aerr != nil {
|
||||||
|
return nil, aerr
|
||||||
|
}
|
||||||
|
enc, aerr := actors.SerializeParams(&init7.ExecParams{
|
||||||
|
CodeCID: builtin7.PaymentChannelActorCodeID,
|
||||||
|
ConstructorParams: params,
|
||||||
|
})
|
||||||
|
if aerr != nil {
|
||||||
|
return nil, aerr
|
||||||
|
}
|
||||||
|
|
||||||
|
return &types.Message{
|
||||||
|
To: init_.Address,
|
||||||
|
From: m.from,
|
||||||
|
Value: initialAmount,
|
||||||
|
Method: builtin7.MethodsInit.Exec,
|
||||||
|
Params: enc,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m message7) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) {
|
||||||
|
params, aerr := actors.SerializeParams(&paych7.UpdateChannelStateParams{
|
||||||
|
Sv: *sv,
|
||||||
|
Secret: secret,
|
||||||
|
})
|
||||||
|
if aerr != nil {
|
||||||
|
return nil, aerr
|
||||||
|
}
|
||||||
|
|
||||||
|
return &types.Message{
|
||||||
|
To: paych,
|
||||||
|
From: m.from,
|
||||||
|
Value: abi.NewTokenAmount(0),
|
||||||
|
Method: builtin7.MethodsPaych.UpdateChannelState,
|
||||||
|
Params: params,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m message7) Settle(paych address.Address) (*types.Message, error) {
|
||||||
|
return &types.Message{
|
||||||
|
To: paych,
|
||||||
|
From: m.from,
|
||||||
|
Value: abi.NewTokenAmount(0),
|
||||||
|
Method: builtin7.MethodsPaych.Settle,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m message7) Collect(paych address.Address) (*types.Message, error) {
|
||||||
|
return &types.Message{
|
||||||
|
To: paych,
|
||||||
|
From: m.from,
|
||||||
|
Value: abi.NewTokenAmount(0),
|
||||||
|
Method: builtin7.MethodsPaych.Collect,
|
||||||
|
}, nil
|
||||||
|
}
|
@ -27,6 +27,8 @@ import (
|
|||||||
|
|
||||||
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||||
|
|
||||||
|
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors"
|
"github.com/filecoin-project/lotus/chain/actors"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||||
@ -58,6 +60,10 @@ func init() {
|
|||||||
builtin.RegisterActorState(builtin6.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
builtin.RegisterActorState(builtin6.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||||
return load6(store, root)
|
return load6(store, root)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
builtin.RegisterActorState(builtin7.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||||
|
return load7(store, root)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load returns an abstract copy of payment channel state, irregardless of actor version
|
// Load returns an abstract copy of payment channel state, irregardless of actor version
|
||||||
@ -82,6 +88,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
|||||||
case builtin6.PaymentChannelActorCodeID:
|
case builtin6.PaymentChannelActorCodeID:
|
||||||
return load6(store, act.Head)
|
return load6(store, act.Head)
|
||||||
|
|
||||||
|
case builtin7.PaymentChannelActorCodeID:
|
||||||
|
return load7(store, act.Head)
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||||
}
|
}
|
||||||
@ -107,6 +116,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) {
|
|||||||
case actors.Version6:
|
case actors.Version6:
|
||||||
return make6(store)
|
return make6(store)
|
||||||
|
|
||||||
|
case actors.Version7:
|
||||||
|
return make7(store)
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||||
}
|
}
|
||||||
@ -132,6 +144,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
|
|||||||
case actors.Version6:
|
case actors.Version6:
|
||||||
return builtin6.PaymentChannelActorCodeID, nil
|
return builtin6.PaymentChannelActorCodeID, nil
|
||||||
|
|
||||||
|
case actors.Version7:
|
||||||
|
return builtin7.PaymentChannelActorCodeID, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
||||||
@ -185,7 +200,7 @@ func DecodeSignedVoucher(s string) (*SignedVoucher, error) {
|
|||||||
return &sv, nil
|
return &sv, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var Methods = builtin6.MethodsPaych
|
var Methods = builtin7.MethodsPaych
|
||||||
|
|
||||||
func Message(version actors.Version, from address.Address) MessageBuilder {
|
func Message(version actors.Version, from address.Address) MessageBuilder {
|
||||||
switch version {
|
switch version {
|
||||||
@ -208,6 +223,9 @@ func Message(version actors.Version, from address.Address) MessageBuilder {
|
|||||||
case actors.Version6:
|
case actors.Version6:
|
||||||
return message6{from}
|
return message6{from}
|
||||||
|
|
||||||
|
case actors.Version7:
|
||||||
|
return message7{from}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("unsupported actors version: %d", version))
|
panic(fmt.Sprintf("unsupported actors version: %d", version))
|
||||||
}
|
}
|
||||||
|
114
chain/actors/builtin/paych/v7.go
Normal file
114
chain/actors/builtin/paych/v7.go
Normal file
@ -0,0 +1,114 @@
|
|||||||
|
package paych
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||||
|
|
||||||
|
paych7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/paych"
|
||||||
|
adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ State = (*state7)(nil)
|
||||||
|
|
||||||
|
func load7(store adt.Store, root cid.Cid) (State, error) {
|
||||||
|
out := state7{store: store}
|
||||||
|
err := store.Get(store.Context(), root, &out)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func make7(store adt.Store) (State, error) {
|
||||||
|
out := state7{store: store}
|
||||||
|
out.State = paych7.State{}
|
||||||
|
return &out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type state7 struct {
|
||||||
|
paych7.State
|
||||||
|
store adt.Store
|
||||||
|
lsAmt *adt7.Array
|
||||||
|
}
|
||||||
|
|
||||||
|
// Channel owner, who has funded the actor
|
||||||
|
func (s *state7) From() (address.Address, error) {
|
||||||
|
return s.State.From, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recipient of payouts from channel
|
||||||
|
func (s *state7) To() (address.Address, error) {
|
||||||
|
return s.State.To, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Height at which the channel can be `Collected`
|
||||||
|
func (s *state7) SettlingAt() (abi.ChainEpoch, error) {
|
||||||
|
return s.State.SettlingAt, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Amount successfully redeemed through the payment channel, paid out on `Collect()`
|
||||||
|
func (s *state7) ToSend() (abi.TokenAmount, error) {
|
||||||
|
return s.State.ToSend, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) getOrLoadLsAmt() (*adt7.Array, error) {
|
||||||
|
if s.lsAmt != nil {
|
||||||
|
return s.lsAmt, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the lane state from the chain
|
||||||
|
lsamt, err := adt7.AsArray(s.store, s.State.LaneStates, paych7.LaneStatesAmtBitwidth)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
s.lsAmt = lsamt
|
||||||
|
return lsamt, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get total number of lanes
|
||||||
|
func (s *state7) LaneCount() (uint64, error) {
|
||||||
|
lsamt, err := s.getOrLoadLsAmt()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return lsamt.Length(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) GetState() interface{} {
|
||||||
|
return &s.State
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterate lane states
|
||||||
|
func (s *state7) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error {
|
||||||
|
// Get the lane state from the chain
|
||||||
|
lsamt, err := s.getOrLoadLsAmt()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note: we use a map instead of an array to store laneStates because the
|
||||||
|
// client sets the lane ID (the index) and potentially they could use a
|
||||||
|
// very large index.
|
||||||
|
var ls paych7.LaneState
|
||||||
|
return lsamt.ForEach(&ls, func(i int64) error {
|
||||||
|
return cb(uint64(i), &laneState7{ls})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type laneState7 struct {
|
||||||
|
paych7.LaneState
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ls *laneState7) Redeemed() (big.Int, error) {
|
||||||
|
return ls.LaneState.Redeemed, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ls *laneState7) Nonce() (uint64, error) {
|
||||||
|
return ls.LaneState.Nonce, nil
|
||||||
|
}
|
@ -26,6 +26,8 @@ import (
|
|||||||
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
|
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
|
||||||
|
|
||||||
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||||
|
|
||||||
|
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -53,11 +55,15 @@ func init() {
|
|||||||
builtin.RegisterActorState(builtin6.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
builtin.RegisterActorState(builtin6.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||||
return load6(store, root)
|
return load6(store, root)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
builtin.RegisterActorState(builtin7.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||||
|
return load7(store, root)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
Address = builtin6.StoragePowerActorAddr
|
Address = builtin7.StoragePowerActorAddr
|
||||||
Methods = builtin6.MethodsPower
|
Methods = builtin7.MethodsPower
|
||||||
)
|
)
|
||||||
|
|
||||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||||
@ -81,6 +87,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
|||||||
case builtin6.StoragePowerActorCodeID:
|
case builtin6.StoragePowerActorCodeID:
|
||||||
return load6(store, act.Head)
|
return load6(store, act.Head)
|
||||||
|
|
||||||
|
case builtin7.StoragePowerActorCodeID:
|
||||||
|
return load7(store, act.Head)
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||||
}
|
}
|
||||||
@ -106,6 +115,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) {
|
|||||||
case actors.Version6:
|
case actors.Version6:
|
||||||
return make6(store)
|
return make6(store)
|
||||||
|
|
||||||
|
case actors.Version7:
|
||||||
|
return make7(store)
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||||
}
|
}
|
||||||
@ -131,6 +143,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
|
|||||||
case actors.Version6:
|
case actors.Version6:
|
||||||
return builtin6.StoragePowerActorCodeID, nil
|
return builtin6.StoragePowerActorCodeID, nil
|
||||||
|
|
||||||
|
case actors.Version7:
|
||||||
|
return builtin7.StoragePowerActorCodeID, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
||||||
|
187
chain/actors/builtin/power/v7.go
Normal file
187
chain/actors/builtin/power/v7.go
Normal file
@ -0,0 +1,187 @@
|
|||||||
|
package power
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
cbg "github.com/whyrusleeping/cbor-gen"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||||
|
|
||||||
|
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
|
||||||
|
|
||||||
|
power7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/power"
|
||||||
|
adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ State = (*state7)(nil)
|
||||||
|
|
||||||
|
func load7(store adt.Store, root cid.Cid) (State, error) {
|
||||||
|
out := state7{store: store}
|
||||||
|
err := store.Get(store.Context(), root, &out)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func make7(store adt.Store) (State, error) {
|
||||||
|
out := state7{store: store}
|
||||||
|
|
||||||
|
s, err := power7.ConstructState(store)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
out.State = *s
|
||||||
|
|
||||||
|
return &out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type state7 struct {
|
||||||
|
power7.State
|
||||||
|
store adt.Store
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) TotalLocked() (abi.TokenAmount, error) {
|
||||||
|
return s.TotalPledgeCollateral, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) TotalPower() (Claim, error) {
|
||||||
|
return Claim{
|
||||||
|
RawBytePower: s.TotalRawBytePower,
|
||||||
|
QualityAdjPower: s.TotalQualityAdjPower,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Committed power to the network. Includes miners below the minimum threshold.
|
||||||
|
func (s *state7) TotalCommitted() (Claim, error) {
|
||||||
|
return Claim{
|
||||||
|
RawBytePower: s.TotalBytesCommitted,
|
||||||
|
QualityAdjPower: s.TotalQABytesCommitted,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) MinerPower(addr address.Address) (Claim, bool, error) {
|
||||||
|
claims, err := s.claims()
|
||||||
|
if err != nil {
|
||||||
|
return Claim{}, false, err
|
||||||
|
}
|
||||||
|
var claim power7.Claim
|
||||||
|
ok, err := claims.Get(abi.AddrKey(addr), &claim)
|
||||||
|
if err != nil {
|
||||||
|
return Claim{}, false, err
|
||||||
|
}
|
||||||
|
return Claim{
|
||||||
|
RawBytePower: claim.RawBytePower,
|
||||||
|
QualityAdjPower: claim.QualityAdjPower,
|
||||||
|
}, ok, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) {
|
||||||
|
return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) TotalPowerSmoothed() (builtin.FilterEstimate, error) {
|
||||||
|
return builtin.FromV7FilterEstimate(s.State.ThisEpochQAPowerSmoothed), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) MinerCounts() (uint64, uint64, error) {
|
||||||
|
return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) ListAllMiners() ([]address.Address, error) {
|
||||||
|
claims, err := s.claims()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var miners []address.Address
|
||||||
|
err = claims.ForEach(nil, func(k string) error {
|
||||||
|
a, err := address.NewFromBytes([]byte(k))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
miners = append(miners, a)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return miners, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) ForEachClaim(cb func(miner address.Address, claim Claim) error) error {
|
||||||
|
claims, err := s.claims()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var claim power7.Claim
|
||||||
|
return claims.ForEach(&claim, func(k string) error {
|
||||||
|
a, err := address.NewFromBytes([]byte(k))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return cb(a, Claim{
|
||||||
|
RawBytePower: claim.RawBytePower,
|
||||||
|
QualityAdjPower: claim.QualityAdjPower,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) ClaimsChanged(other State) (bool, error) {
|
||||||
|
other7, ok := other.(*state7)
|
||||||
|
if !ok {
|
||||||
|
// treat an upgrade as a change, always
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return !s.State.Claims.Equals(other7.State.Claims), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) SetTotalQualityAdjPower(p abi.StoragePower) error {
|
||||||
|
s.State.TotalQualityAdjPower = p
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) SetTotalRawBytePower(p abi.StoragePower) error {
|
||||||
|
s.State.TotalRawBytePower = p
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) SetThisEpochQualityAdjPower(p abi.StoragePower) error {
|
||||||
|
s.State.ThisEpochQualityAdjPower = p
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) SetThisEpochRawBytePower(p abi.StoragePower) error {
|
||||||
|
s.State.ThisEpochRawBytePower = p
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) GetState() interface{} {
|
||||||
|
return &s.State
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) claims() (adt.Map, error) {
|
||||||
|
return adt7.AsMap(s.store, s.Claims, builtin7.DefaultHamtBitwidth)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) decodeClaim(val *cbg.Deferred) (Claim, error) {
|
||||||
|
var ci power7.Claim
|
||||||
|
if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
|
||||||
|
return Claim{}, err
|
||||||
|
}
|
||||||
|
return fromV7Claim(ci), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func fromV7Claim(v7 power7.Claim) Claim {
|
||||||
|
return Claim{
|
||||||
|
RawBytePower: v7.RawBytePower,
|
||||||
|
QualityAdjPower: v7.QualityAdjPower,
|
||||||
|
}
|
||||||
|
}
|
@ -21,6 +21,8 @@ import (
|
|||||||
|
|
||||||
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||||
|
|
||||||
|
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
@ -51,11 +53,15 @@ func init() {
|
|||||||
builtin.RegisterActorState(builtin6.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
builtin.RegisterActorState(builtin6.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||||
return load6(store, root)
|
return load6(store, root)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
builtin.RegisterActorState(builtin7.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||||
|
return load7(store, root)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
Address = builtin6.RewardActorAddr
|
Address = builtin7.RewardActorAddr
|
||||||
Methods = builtin6.MethodsReward
|
Methods = builtin7.MethodsReward
|
||||||
)
|
)
|
||||||
|
|
||||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||||
@ -79,6 +85,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
|||||||
case builtin6.RewardActorCodeID:
|
case builtin6.RewardActorCodeID:
|
||||||
return load6(store, act.Head)
|
return load6(store, act.Head)
|
||||||
|
|
||||||
|
case builtin7.RewardActorCodeID:
|
||||||
|
return load7(store, act.Head)
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||||
}
|
}
|
||||||
@ -104,6 +113,9 @@ func MakeState(store adt.Store, av actors.Version, currRealizedPower abi.Storage
|
|||||||
case actors.Version6:
|
case actors.Version6:
|
||||||
return make6(store, currRealizedPower)
|
return make6(store, currRealizedPower)
|
||||||
|
|
||||||
|
case actors.Version7:
|
||||||
|
return make7(store, currRealizedPower)
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||||
}
|
}
|
||||||
@ -129,6 +141,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
|
|||||||
case actors.Version6:
|
case actors.Version6:
|
||||||
return builtin6.RewardActorCodeID, nil
|
return builtin6.RewardActorCodeID, nil
|
||||||
|
|
||||||
|
case actors.Version7:
|
||||||
|
return builtin7.RewardActorCodeID, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
||||||
|
98
chain/actors/builtin/reward/v7.go
Normal file
98
chain/actors/builtin/reward/v7.go
Normal file
@ -0,0 +1,98 @@
|
|||||||
|
package reward
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||||
|
|
||||||
|
miner7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/miner"
|
||||||
|
reward7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/reward"
|
||||||
|
smoothing7 "github.com/filecoin-project/specs-actors/v7/actors/util/smoothing"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ State = (*state7)(nil)
|
||||||
|
|
||||||
|
func load7(store adt.Store, root cid.Cid) (State, error) {
|
||||||
|
out := state7{store: store}
|
||||||
|
err := store.Get(store.Context(), root, &out)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func make7(store adt.Store, currRealizedPower abi.StoragePower) (State, error) {
|
||||||
|
out := state7{store: store}
|
||||||
|
out.State = *reward7.ConstructState(currRealizedPower)
|
||||||
|
return &out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type state7 struct {
|
||||||
|
reward7.State
|
||||||
|
store adt.Store
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) ThisEpochReward() (abi.TokenAmount, error) {
|
||||||
|
return s.State.ThisEpochReward, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) {
|
||||||
|
|
||||||
|
return builtin.FilterEstimate{
|
||||||
|
PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate,
|
||||||
|
VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate,
|
||||||
|
}, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) ThisEpochBaselinePower() (abi.StoragePower, error) {
|
||||||
|
return s.State.ThisEpochBaselinePower, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) TotalStoragePowerReward() (abi.TokenAmount, error) {
|
||||||
|
return s.State.TotalStoragePowerReward, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) EffectiveBaselinePower() (abi.StoragePower, error) {
|
||||||
|
return s.State.EffectiveBaselinePower, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) EffectiveNetworkTime() (abi.ChainEpoch, error) {
|
||||||
|
return s.State.EffectiveNetworkTime, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) CumsumBaseline() (reward7.Spacetime, error) {
|
||||||
|
return s.State.CumsumBaseline, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) CumsumRealized() (reward7.Spacetime, error) {
|
||||||
|
return s.State.CumsumRealized, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) InitialPledgeForPower(qaPower abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) {
|
||||||
|
return miner7.InitialPledgeForPower(
|
||||||
|
qaPower,
|
||||||
|
s.State.ThisEpochBaselinePower,
|
||||||
|
s.State.ThisEpochRewardSmoothed,
|
||||||
|
smoothing7.FilterEstimate{
|
||||||
|
PositionEstimate: networkQAPower.PositionEstimate,
|
||||||
|
VelocityEstimate: networkQAPower.VelocityEstimate,
|
||||||
|
},
|
||||||
|
circSupply,
|
||||||
|
), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) {
|
||||||
|
return miner7.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed,
|
||||||
|
smoothing7.FilterEstimate{
|
||||||
|
PositionEstimate: networkQAPower.PositionEstimate,
|
||||||
|
VelocityEstimate: networkQAPower.VelocityEstimate,
|
||||||
|
},
|
||||||
|
sectorWeight), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) GetState() interface{} {
|
||||||
|
return &s.State
|
||||||
|
}
|
@ -17,10 +17,12 @@ import (
|
|||||||
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
|
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
|
||||||
|
|
||||||
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||||
|
|
||||||
|
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
Address = builtin6.SystemActorAddr
|
Address = builtin7.SystemActorAddr
|
||||||
)
|
)
|
||||||
|
|
||||||
func MakeState(store adt.Store, av actors.Version) (State, error) {
|
func MakeState(store adt.Store, av actors.Version) (State, error) {
|
||||||
@ -44,6 +46,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) {
|
|||||||
case actors.Version6:
|
case actors.Version6:
|
||||||
return make6(store)
|
return make6(store)
|
||||||
|
|
||||||
|
case actors.Version7:
|
||||||
|
return make7(store)
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||||
}
|
}
|
||||||
@ -69,6 +74,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
|
|||||||
case actors.Version6:
|
case actors.Version6:
|
||||||
return builtin6.SystemActorCodeID, nil
|
return builtin6.SystemActorCodeID, nil
|
||||||
|
|
||||||
|
case actors.Version7:
|
||||||
|
return builtin7.SystemActorCodeID, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
||||||
|
35
chain/actors/builtin/system/v7.go
Normal file
35
chain/actors/builtin/system/v7.go
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
package system
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||||
|
|
||||||
|
system7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ State = (*state7)(nil)
|
||||||
|
|
||||||
|
func load7(store adt.Store, root cid.Cid) (State, error) {
|
||||||
|
out := state7{store: store}
|
||||||
|
err := store.Get(store.Context(), root, &out)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func make7(store adt.Store) (State, error) {
|
||||||
|
out := state7{store: store}
|
||||||
|
out.State = system7.State{}
|
||||||
|
return &out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type state7 struct {
|
||||||
|
system7.State
|
||||||
|
store adt.Store
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) GetState() interface{} {
|
||||||
|
return &s.State
|
||||||
|
}
|
75
chain/actors/builtin/verifreg/v7.go
Normal file
75
chain/actors/builtin/verifreg/v7.go
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
package verifreg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||||
|
|
||||||
|
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
|
||||||
|
verifreg7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/verifreg"
|
||||||
|
adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ State = (*state7)(nil)
|
||||||
|
|
||||||
|
func load7(store adt.Store, root cid.Cid) (State, error) {
|
||||||
|
out := state7{store: store}
|
||||||
|
err := store.Get(store.Context(), root, &out)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func make7(store adt.Store, rootKeyAddress address.Address) (State, error) {
|
||||||
|
out := state7{store: store}
|
||||||
|
|
||||||
|
s, err := verifreg7.ConstructState(store, rootKeyAddress)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
out.State = *s
|
||||||
|
|
||||||
|
return &out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type state7 struct {
|
||||||
|
verifreg7.State
|
||||||
|
store adt.Store
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) RootKey() (address.Address, error) {
|
||||||
|
return s.State.RootKey, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) {
|
||||||
|
return getDataCap(s.store, actors.Version7, s.verifiedClients, addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) {
|
||||||
|
return getDataCap(s.store, actors.Version7, s.verifiers, addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||||
|
return forEachCap(s.store, actors.Version7, s.verifiers, cb)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||||
|
return forEachCap(s.store, actors.Version7, s.verifiedClients, cb)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) verifiedClients() (adt.Map, error) {
|
||||||
|
return adt7.AsMap(s.store, s.VerifiedClients, builtin7.DefaultHamtBitwidth)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) verifiers() (adt.Map, error) {
|
||||||
|
return adt7.AsMap(s.store, s.Verifiers, builtin7.DefaultHamtBitwidth)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state7) GetState() interface{} {
|
||||||
|
return &s.State
|
||||||
|
}
|
@ -21,6 +21,8 @@ import (
|
|||||||
|
|
||||||
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||||
|
|
||||||
|
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors"
|
"github.com/filecoin-project/lotus/chain/actors"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||||
@ -53,11 +55,15 @@ func init() {
|
|||||||
return load6(store, root)
|
return load6(store, root)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
builtin.RegisterActorState(builtin7.VerifiedRegistryActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||||
|
return load7(store, root)
|
||||||
|
})
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
Address = builtin6.VerifiedRegistryActorAddr
|
Address = builtin7.VerifiedRegistryActorAddr
|
||||||
Methods = builtin6.MethodsVerifiedRegistry
|
Methods = builtin7.MethodsVerifiedRegistry
|
||||||
)
|
)
|
||||||
|
|
||||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||||
@ -81,6 +87,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
|||||||
case builtin6.VerifiedRegistryActorCodeID:
|
case builtin6.VerifiedRegistryActorCodeID:
|
||||||
return load6(store, act.Head)
|
return load6(store, act.Head)
|
||||||
|
|
||||||
|
case builtin7.VerifiedRegistryActorCodeID:
|
||||||
|
return load7(store, act.Head)
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||||
}
|
}
|
||||||
@ -106,6 +115,9 @@ func MakeState(store adt.Store, av actors.Version, rootKeyAddress address.Addres
|
|||||||
case actors.Version6:
|
case actors.Version6:
|
||||||
return make6(store, rootKeyAddress)
|
return make6(store, rootKeyAddress)
|
||||||
|
|
||||||
|
case actors.Version7:
|
||||||
|
return make7(store, rootKeyAddress)
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||||
}
|
}
|
||||||
@ -131,6 +143,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
|
|||||||
case actors.Version6:
|
case actors.Version6:
|
||||||
return builtin6.VerifiedRegistryActorCodeID, nil
|
return builtin6.VerifiedRegistryActorCodeID, nil
|
||||||
|
|
||||||
|
case actors.Version7:
|
||||||
|
return builtin7.VerifiedRegistryActorCodeID, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
||||||
|
@ -40,14 +40,19 @@ import (
|
|||||||
miner6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/miner"
|
miner6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/miner"
|
||||||
verifreg6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/verifreg"
|
verifreg6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/verifreg"
|
||||||
|
|
||||||
paych6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/paych"
|
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
|
||||||
|
market7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/market"
|
||||||
|
miner7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/miner"
|
||||||
|
verifreg7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/verifreg"
|
||||||
|
|
||||||
|
paych7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/paych"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
ChainFinality = miner6.ChainFinality
|
ChainFinality = miner7.ChainFinality
|
||||||
SealRandomnessLookback = ChainFinality
|
SealRandomnessLookback = ChainFinality
|
||||||
PaychSettleDelay = paych6.SettleDelay
|
PaychSettleDelay = paych7.SettleDelay
|
||||||
MaxPreCommitRandomnessLookback = builtin6.EpochsInDay + SealRandomnessLookback
|
MaxPreCommitRandomnessLookback = builtin7.EpochsInDay + SealRandomnessLookback
|
||||||
)
|
)
|
||||||
|
|
||||||
// SetSupportedProofTypes sets supported proof types, across all actor versions.
|
// SetSupportedProofTypes sets supported proof types, across all actor versions.
|
||||||
@ -72,6 +77,8 @@ func SetSupportedProofTypes(types ...abi.RegisteredSealProof) {
|
|||||||
|
|
||||||
miner6.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
|
miner6.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
|
||||||
|
|
||||||
|
miner7.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
|
||||||
|
|
||||||
AddSupportedProofTypes(types...)
|
AddSupportedProofTypes(types...)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -119,6 +126,15 @@ func AddSupportedProofTypes(types ...abi.RegisteredSealProof) {
|
|||||||
|
|
||||||
miner6.WindowPoStProofTypes[wpp] = struct{}{}
|
miner6.WindowPoStProofTypes[wpp] = struct{}{}
|
||||||
|
|
||||||
|
miner7.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
|
||||||
|
wpp, err = t.RegisteredWindowPoStProof()
|
||||||
|
if err != nil {
|
||||||
|
// Fine to panic, this is a test-only method
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
miner7.WindowPoStProofTypes[wpp] = struct{}{}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -139,11 +155,13 @@ func SetPreCommitChallengeDelay(delay abi.ChainEpoch) {
|
|||||||
|
|
||||||
miner6.PreCommitChallengeDelay = delay
|
miner6.PreCommitChallengeDelay = delay
|
||||||
|
|
||||||
|
miner7.PreCommitChallengeDelay = delay
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: this function shouldn't really exist. Instead, the API should expose the precommit delay.
|
// TODO: this function shouldn't really exist. Instead, the API should expose the precommit delay.
|
||||||
func GetPreCommitChallengeDelay() abi.ChainEpoch {
|
func GetPreCommitChallengeDelay() abi.ChainEpoch {
|
||||||
return miner6.PreCommitChallengeDelay
|
return miner7.PreCommitChallengeDelay
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetConsensusMinerMinPower sets the minimum power of an individual miner must
|
// SetConsensusMinerMinPower sets the minimum power of an individual miner must
|
||||||
@ -173,6 +191,10 @@ func SetConsensusMinerMinPower(p abi.StoragePower) {
|
|||||||
policy.ConsensusMinerMinPower = p
|
policy.ConsensusMinerMinPower = p
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, policy := range builtin7.PoStProofPolicies {
|
||||||
|
policy.ConsensusMinerMinPower = p
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetMinVerifiedDealSize sets the minimum size of a verified deal. This should
|
// SetMinVerifiedDealSize sets the minimum size of a verified deal. This should
|
||||||
@ -191,6 +213,8 @@ func SetMinVerifiedDealSize(size abi.StoragePower) {
|
|||||||
|
|
||||||
verifreg6.MinVerifiedDealSize = size
|
verifreg6.MinVerifiedDealSize = size
|
||||||
|
|
||||||
|
verifreg7.MinVerifiedDealSize = size
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) (abi.ChainEpoch, error) {
|
func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) (abi.ChainEpoch, error) {
|
||||||
@ -220,6 +244,10 @@ func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) (a
|
|||||||
|
|
||||||
return miner6.MaxProveCommitDuration[t], nil
|
return miner6.MaxProveCommitDuration[t], nil
|
||||||
|
|
||||||
|
case actors.Version7:
|
||||||
|
|
||||||
|
return miner7.MaxProveCommitDuration[t], nil
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return 0, xerrors.Errorf("unsupported actors version")
|
return 0, xerrors.Errorf("unsupported actors version")
|
||||||
}
|
}
|
||||||
@ -255,6 +283,11 @@ func SetProviderCollateralSupplyTarget(num, denom big.Int) {
|
|||||||
Denominator: denom,
|
Denominator: denom,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
market7.ProviderCollateralSupplyTarget = builtin7.BigFrac{
|
||||||
|
Numerator: num,
|
||||||
|
Denominator: denom,
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func DealProviderCollateralBounds(
|
func DealProviderCollateralBounds(
|
||||||
@ -298,13 +331,18 @@ func DealProviderCollateralBounds(
|
|||||||
min, max := market6.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
|
min, max := market6.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
|
||||||
return min, max, nil
|
return min, max, nil
|
||||||
|
|
||||||
|
case actors.Version7:
|
||||||
|
|
||||||
|
min, max := market7.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
|
||||||
|
return min, max, nil
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return big.Zero(), big.Zero(), xerrors.Errorf("unsupported actors version")
|
return big.Zero(), big.Zero(), xerrors.Errorf("unsupported actors version")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func DealDurationBounds(pieceSize abi.PaddedPieceSize) (min, max abi.ChainEpoch) {
|
func DealDurationBounds(pieceSize abi.PaddedPieceSize) (min, max abi.ChainEpoch) {
|
||||||
return market6.DealDurationBounds(pieceSize)
|
return market7.DealDurationBounds(pieceSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sets the challenge window and scales the proving period to match (such that
|
// Sets the challenge window and scales the proving period to match (such that
|
||||||
@ -345,6 +383,13 @@ func SetWPoStChallengeWindow(period abi.ChainEpoch) {
|
|||||||
// scale it if we're scaling the challenge period.
|
// scale it if we're scaling the challenge period.
|
||||||
miner6.WPoStDisputeWindow = period * 30
|
miner6.WPoStDisputeWindow = period * 30
|
||||||
|
|
||||||
|
miner7.WPoStChallengeWindow = period
|
||||||
|
miner7.WPoStProvingPeriod = period * abi.ChainEpoch(miner7.WPoStPeriodDeadlines)
|
||||||
|
|
||||||
|
// by default, this is 2x finality which is 30 periods.
|
||||||
|
// scale it if we're scaling the challenge period.
|
||||||
|
miner7.WPoStDisputeWindow = period * 30
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch {
|
func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch {
|
||||||
@ -357,15 +402,15 @@ func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func GetMaxSectorExpirationExtension() abi.ChainEpoch {
|
func GetMaxSectorExpirationExtension() abi.ChainEpoch {
|
||||||
return miner6.MaxSectorExpirationExtension
|
return miner7.MaxSectorExpirationExtension
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetMinSectorExpiration() abi.ChainEpoch {
|
func GetMinSectorExpiration() abi.ChainEpoch {
|
||||||
return miner6.MinSectorExpiration
|
return miner7.MinSectorExpiration
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetMaxPoStPartitions(nv network.Version, p abi.RegisteredPoStProof) (int, error) {
|
func GetMaxPoStPartitions(nv network.Version, p abi.RegisteredPoStProof) (int, error) {
|
||||||
sectorsPerPart, err := builtin6.PoStProofWindowPoStPartitionSectors(p)
|
sectorsPerPart, err := builtin7.PoStProofWindowPoStPartitionSectors(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@ -378,8 +423,8 @@ func GetMaxPoStPartitions(nv network.Version, p abi.RegisteredPoStProof) (int, e
|
|||||||
|
|
||||||
func GetDefaultSectorSize() abi.SectorSize {
|
func GetDefaultSectorSize() abi.SectorSize {
|
||||||
// supported sector sizes are the same across versions.
|
// supported sector sizes are the same across versions.
|
||||||
szs := make([]abi.SectorSize, 0, len(miner6.PreCommitSealProofTypesV8))
|
szs := make([]abi.SectorSize, 0, len(miner7.PreCommitSealProofTypesV8))
|
||||||
for spt := range miner6.PreCommitSealProofTypesV8 {
|
for spt := range miner7.PreCommitSealProofTypesV8 {
|
||||||
ss, err := spt.SectorSize()
|
ss, err := spt.SectorSize()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -404,7 +449,7 @@ func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version)
|
|||||||
return builtin4.SealProofPoliciesV0[proof].SectorMaxLifetime
|
return builtin4.SealProofPoliciesV0[proof].SectorMaxLifetime
|
||||||
}
|
}
|
||||||
|
|
||||||
return builtin6.SealProofPoliciesV11[proof].SectorMaxLifetime
|
return builtin7.SealProofPoliciesV11[proof].SectorMaxLifetime
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetAddressedSectorsMax(nwVer network.Version) (int, error) {
|
func GetAddressedSectorsMax(nwVer network.Version) (int, error) {
|
||||||
@ -432,6 +477,9 @@ func GetAddressedSectorsMax(nwVer network.Version) (int, error) {
|
|||||||
case actors.Version6:
|
case actors.Version6:
|
||||||
return miner6.AddressedSectorsMax, nil
|
return miner6.AddressedSectorsMax, nil
|
||||||
|
|
||||||
|
case actors.Version7:
|
||||||
|
return miner7.AddressedSectorsMax, nil
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return 0, xerrors.Errorf("unsupported network version")
|
return 0, xerrors.Errorf("unsupported network version")
|
||||||
}
|
}
|
||||||
@ -469,6 +517,10 @@ func GetDeclarationsMax(nwVer network.Version) (int, error) {
|
|||||||
|
|
||||||
return miner6.DeclarationsMax, nil
|
return miner6.DeclarationsMax, nil
|
||||||
|
|
||||||
|
case actors.Version7:
|
||||||
|
|
||||||
|
return miner7.DeclarationsMax, nil
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return 0, xerrors.Errorf("unsupported network version")
|
return 0, xerrors.Errorf("unsupported network version")
|
||||||
}
|
}
|
||||||
@ -505,6 +557,10 @@ func AggregateProveCommitNetworkFee(nwVer network.Version, aggregateSize int, ba
|
|||||||
|
|
||||||
return miner6.AggregateProveCommitNetworkFee(aggregateSize, baseFee), nil
|
return miner6.AggregateProveCommitNetworkFee(aggregateSize, baseFee), nil
|
||||||
|
|
||||||
|
case actors.Version7:
|
||||||
|
|
||||||
|
return miner7.AggregateProveCommitNetworkFee(aggregateSize, baseFee), nil
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return big.Zero(), xerrors.Errorf("unsupported network version")
|
return big.Zero(), xerrors.Errorf("unsupported network version")
|
||||||
}
|
}
|
||||||
@ -541,6 +597,10 @@ func AggregatePreCommitNetworkFee(nwVer network.Version, aggregateSize int, base
|
|||||||
|
|
||||||
return miner6.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil
|
return miner6.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil
|
||||||
|
|
||||||
|
case actors.Version7:
|
||||||
|
|
||||||
|
return miner7.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return big.Zero(), xerrors.Errorf("unsupported network version")
|
return big.Zero(), xerrors.Errorf("unsupported network version")
|
||||||
}
|
}
|
||||||
|
@ -20,9 +20,9 @@ const ({{range .actorVersions}}
|
|||||||
|
|
||||||
/* inline-gen start */
|
/* inline-gen start */
|
||||||
|
|
||||||
var LatestVersion = 6
|
var LatestVersion = 7
|
||||||
|
|
||||||
var Versions = []int{0, 2, 3, 4, 5, 6}
|
var Versions = []int{0, 2, 3, 4, 5, 6, 7}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
Version0 Version = 0
|
Version0 Version = 0
|
||||||
@ -31,6 +31,7 @@ const (
|
|||||||
Version4 Version = 4
|
Version4 Version = 4
|
||||||
Version5 Version = 5
|
Version5 Version = 5
|
||||||
Version6 Version = 6
|
Version6 Version = 6
|
||||||
|
Version7 Version = 7
|
||||||
)
|
)
|
||||||
|
|
||||||
/* inline-gen end */
|
/* inline-gen end */
|
||||||
@ -50,6 +51,8 @@ func VersionForNetwork(version network.Version) (Version, error) {
|
|||||||
return Version5, nil
|
return Version5, nil
|
||||||
case network.Version14:
|
case network.Version14:
|
||||||
return Version6, nil
|
return Version6, nil
|
||||||
|
case network.Version15:
|
||||||
|
return Version7, nil
|
||||||
default:
|
default:
|
||||||
return -1, fmt.Errorf("unsupported network version %d", version)
|
return -1, fmt.Errorf("unsupported network version %d", version)
|
||||||
}
|
}
|
||||||
|
@ -13,7 +13,7 @@ func (syncer *Syncer) SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) e
|
|||||||
return xerrors.Errorf("called with empty tsk")
|
return xerrors.Errorf("called with empty tsk")
|
||||||
}
|
}
|
||||||
|
|
||||||
ts, err := syncer.ChainStore().LoadTipSet(tsk)
|
ts, err := syncer.ChainStore().LoadTipSet(ctx, tsk)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tss, err := syncer.Exchange.GetBlocks(ctx, tsk, 1)
|
tss, err := syncer.Exchange.GetBlocks(ctx, tsk, 1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -28,7 +28,7 @@ func (syncer *Syncer) SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) e
|
|||||||
return xerrors.Errorf("failed to switch chain when syncing checkpoint: %w", err)
|
return xerrors.Errorf("failed to switch chain when syncing checkpoint: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := syncer.ChainStore().SetCheckpoint(ts); err != nil {
|
if err := syncer.ChainStore().SetCheckpoint(ctx, ts); err != nil {
|
||||||
return xerrors.Errorf("failed to set the chain checkpoint: %w", err)
|
return xerrors.Errorf("failed to set the chain checkpoint: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -41,7 +41,7 @@ func (syncer *Syncer) switchChain(ctx context.Context, ts *types.TipSet) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if anc, err := syncer.store.IsAncestorOf(ts, hts); err == nil && anc {
|
if anc, err := syncer.store.IsAncestorOf(ctx, ts, hts); err == nil && anc {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -50,7 +50,7 @@ func (syncer *Syncer) switchChain(ctx context.Context, ts *types.TipSet) error {
|
|||||||
return xerrors.Errorf("failed to collect chain for checkpoint: %w", err)
|
return xerrors.Errorf("failed to collect chain for checkpoint: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := syncer.ChainStore().SetHead(ts); err != nil {
|
if err := syncer.ChainStore().SetHead(ctx, ts); err != nil {
|
||||||
return xerrors.Errorf("failed to set the chain head: %w", err)
|
return xerrors.Errorf("failed to set the chain head: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -28,6 +28,7 @@ import (
|
|||||||
exported4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/exported"
|
exported4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/exported"
|
||||||
exported5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/exported"
|
exported5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/exported"
|
||||||
exported6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/exported"
|
exported6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/exported"
|
||||||
|
exported7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/exported"
|
||||||
|
|
||||||
/* inline-gen end */
|
/* inline-gen end */
|
||||||
|
|
||||||
@ -59,6 +60,7 @@ func NewActorRegistry() *vm.ActorRegistry {
|
|||||||
inv.Register(vm.ActorsVersionPredicate(actors.Version4), exported4.BuiltinActors()...)
|
inv.Register(vm.ActorsVersionPredicate(actors.Version4), exported4.BuiltinActors()...)
|
||||||
inv.Register(vm.ActorsVersionPredicate(actors.Version5), exported5.BuiltinActors()...)
|
inv.Register(vm.ActorsVersionPredicate(actors.Version5), exported5.BuiltinActors()...)
|
||||||
inv.Register(vm.ActorsVersionPredicate(actors.Version6), exported6.BuiltinActors()...)
|
inv.Register(vm.ActorsVersionPredicate(actors.Version6), exported6.BuiltinActors()...)
|
||||||
|
inv.Register(vm.ActorsVersionPredicate(actors.Version7), exported7.BuiltinActors()...)
|
||||||
|
|
||||||
/* inline-gen end */
|
/* inline-gen end */
|
||||||
|
|
||||||
@ -289,7 +291,7 @@ func (t *TipSetExecutor) ExecuteTipSet(ctx context.Context, sm *stmgr.StateManag
|
|||||||
var parentEpoch abi.ChainEpoch
|
var parentEpoch abi.ChainEpoch
|
||||||
pstate := blks[0].ParentStateRoot
|
pstate := blks[0].ParentStateRoot
|
||||||
if blks[0].Height > 0 {
|
if blks[0].Height > 0 {
|
||||||
parent, err := sm.ChainStore().GetBlock(blks[0].Parents[0])
|
parent, err := sm.ChainStore().GetBlock(ctx, blks[0].Parents[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cid.Undef, cid.Undef, xerrors.Errorf("getting parent block: %w", err)
|
return cid.Undef, cid.Undef, xerrors.Errorf("getting parent block: %w", err)
|
||||||
}
|
}
|
||||||
@ -299,7 +301,7 @@ func (t *TipSetExecutor) ExecuteTipSet(ctx context.Context, sm *stmgr.StateManag
|
|||||||
|
|
||||||
r := rand.NewStateRand(sm.ChainStore(), ts.Cids(), sm.Beacon())
|
r := rand.NewStateRand(sm.ChainStore(), ts.Cids(), sm.Beacon())
|
||||||
|
|
||||||
blkmsgs, err := sm.ChainStore().BlockMsgsForTipset(ts)
|
blkmsgs, err := sm.ChainStore().BlockMsgsForTipset(ctx, ts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cid.Undef, cid.Undef, xerrors.Errorf("getting block messages for tipset: %w", err)
|
return cid.Undef, cid.Undef, xerrors.Errorf("getting block messages for tipset: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -90,7 +90,7 @@ func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock)
|
|||||||
|
|
||||||
h := b.Header
|
h := b.Header
|
||||||
|
|
||||||
baseTs, err := filec.store.LoadTipSet(types.NewTipSetKey(h.Parents...))
|
baseTs, err := filec.store.LoadTipSet(ctx, types.NewTipSetKey(h.Parents...))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("load parent tipset failed (%s): %w", h.Parents, err)
|
return xerrors.Errorf("load parent tipset failed (%s): %w", h.Parents, err)
|
||||||
}
|
}
|
||||||
@ -102,7 +102,7 @@ func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock)
|
|||||||
return xerrors.Errorf("failed to get lookback tipset for block: %w", err)
|
return xerrors.Errorf("failed to get lookback tipset for block: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
prevBeacon, err := filec.store.GetLatestBeaconEntry(baseTs)
|
prevBeacon, err := filec.store.GetLatestBeaconEntry(ctx, baseTs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("failed to get latest beacon entry: %w", err)
|
return xerrors.Errorf("failed to get latest beacon entry: %w", err)
|
||||||
}
|
}
|
||||||
@ -171,7 +171,7 @@ func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if stateroot != h.ParentStateRoot {
|
if stateroot != h.ParentStateRoot {
|
||||||
msgs, err := filec.store.MessagesForTipset(baseTs)
|
msgs, err := filec.store.MessagesForTipset(ctx, baseTs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("failed to load messages for tipset during tipset state mismatch error: ", err)
|
log.Error("failed to load messages for tipset during tipset state mismatch error: ", err)
|
||||||
} else {
|
} else {
|
||||||
@ -519,7 +519,7 @@ func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBl
|
|||||||
return xerrors.Errorf("block had invalid bls message at index %d: %w", i, err)
|
return xerrors.Errorf("block had invalid bls message at index %d: %w", i, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
c, err := store.PutMessage(tmpbs, m)
|
c, err := store.PutMessage(ctx, tmpbs, m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err)
|
return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err)
|
||||||
}
|
}
|
||||||
@ -553,7 +553,7 @@ func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBl
|
|||||||
return xerrors.Errorf("secpk message %s has invalid signature: %w", m.Cid(), err)
|
return xerrors.Errorf("secpk message %s has invalid signature: %w", m.Cid(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
c, err := store.PutMessage(tmpbs, m)
|
c, err := store.PutMessage(ctx, tmpbs, m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err)
|
return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err)
|
||||||
}
|
}
|
||||||
|
@ -15,7 +15,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func (filec *FilecoinEC) CreateBlock(ctx context.Context, w api.Wallet, bt *api.BlockTemplate) (*types.FullBlock, error) {
|
func (filec *FilecoinEC) CreateBlock(ctx context.Context, w api.Wallet, bt *api.BlockTemplate) (*types.FullBlock, error) {
|
||||||
pts, err := filec.sm.ChainStore().LoadTipSet(bt.Parents)
|
pts, err := filec.sm.ChainStore().LoadTipSet(ctx, bt.Parents)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("failed to load parent tipset: %w", err)
|
return nil, xerrors.Errorf("failed to load parent tipset: %w", err)
|
||||||
}
|
}
|
||||||
@ -59,14 +59,14 @@ func (filec *FilecoinEC) CreateBlock(ctx context.Context, w api.Wallet, bt *api.
|
|||||||
blsSigs = append(blsSigs, msg.Signature)
|
blsSigs = append(blsSigs, msg.Signature)
|
||||||
blsMessages = append(blsMessages, &msg.Message)
|
blsMessages = append(blsMessages, &msg.Message)
|
||||||
|
|
||||||
c, err := filec.sm.ChainStore().PutMessage(&msg.Message)
|
c, err := filec.sm.ChainStore().PutMessage(ctx, &msg.Message)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
blsMsgCids = append(blsMsgCids, c)
|
blsMsgCids = append(blsMsgCids, c)
|
||||||
} else {
|
} else if msg.Signature.Type == crypto.SigTypeSecp256k1 {
|
||||||
c, err := filec.sm.ChainStore().PutMessage(msg)
|
c, err := filec.sm.ChainStore().PutMessage(ctx, msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -74,6 +74,8 @@ func (filec *FilecoinEC) CreateBlock(ctx context.Context, w api.Wallet, bt *api.
|
|||||||
secpkMsgCids = append(secpkMsgCids, c)
|
secpkMsgCids = append(secpkMsgCids, c)
|
||||||
secpkMessages = append(secpkMessages, msg)
|
secpkMessages = append(secpkMessages, msg)
|
||||||
|
|
||||||
|
} else {
|
||||||
|
return nil, xerrors.Errorf("unknown sig type: %d", msg.Signature.Type)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6,6 +6,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/v6/actors/migration/nv14"
|
"github.com/filecoin-project/specs-actors/v6/actors/migration/nv14"
|
||||||
|
"github.com/filecoin-project/specs-actors/v7/actors/migration/nv15"
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
cbor "github.com/ipfs/go-ipld-cbor"
|
cbor "github.com/ipfs/go-ipld-cbor"
|
||||||
@ -156,6 +157,22 @@ func DefaultUpgradeSchedule() stmgr.UpgradeSchedule {
|
|||||||
StopWithin: 5,
|
StopWithin: 5,
|
||||||
}},
|
}},
|
||||||
Expensive: true,
|
Expensive: true,
|
||||||
|
}, {
|
||||||
|
Height: build.UpgradeSnapDealsHeight,
|
||||||
|
Network: network.Version15,
|
||||||
|
Migration: UpgradeActorsV7,
|
||||||
|
PreMigrations: []stmgr.PreMigration{{
|
||||||
|
PreMigration: PreUpgradeActorsV7,
|
||||||
|
StartWithin: 120,
|
||||||
|
DontStartWithin: 60,
|
||||||
|
StopWithin: 35,
|
||||||
|
}, {
|
||||||
|
PreMigration: PreUpgradeActorsV7,
|
||||||
|
StartWithin: 30,
|
||||||
|
DontStartWithin: 15,
|
||||||
|
StopWithin: 5,
|
||||||
|
}},
|
||||||
|
Expensive: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -625,7 +642,7 @@ func splitGenesisMultisig0(ctx context.Context, em stmgr.ExecMonitor, addr addre
|
|||||||
|
|
||||||
// TODO: After the Liftoff epoch, refactor this to use resetMultisigVesting
|
// TODO: After the Liftoff epoch, refactor this to use resetMultisigVesting
|
||||||
func resetGenesisMsigs0(ctx context.Context, sm *stmgr.StateManager, store adt0.Store, tree *state.StateTree, startEpoch abi.ChainEpoch) error {
|
func resetGenesisMsigs0(ctx context.Context, sm *stmgr.StateManager, store adt0.Store, tree *state.StateTree, startEpoch abi.ChainEpoch) error {
|
||||||
gb, err := sm.ChainStore().GetGenesis()
|
gb, err := sm.ChainStore().GetGenesis(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("getting genesis block: %w", err)
|
return xerrors.Errorf("getting genesis block: %w", err)
|
||||||
}
|
}
|
||||||
@ -1170,7 +1187,94 @@ func upgradeActorsV6Common(
|
|||||||
// Perform the migration
|
// Perform the migration
|
||||||
newHamtRoot, err := nv14.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache)
|
newHamtRoot, err := nv14.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cid.Undef, xerrors.Errorf("upgrading to actors v5: %w", err)
|
return cid.Undef, xerrors.Errorf("upgrading to actors v6: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Persist the result.
|
||||||
|
newRoot, err := store.Put(ctx, &types.StateRoot{
|
||||||
|
Version: types.StateTreeVersion4,
|
||||||
|
Actors: newHamtRoot,
|
||||||
|
Info: stateRoot.Info,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Persist the new tree.
|
||||||
|
|
||||||
|
{
|
||||||
|
from := buf
|
||||||
|
to := buf.Read()
|
||||||
|
|
||||||
|
if err := vm.Copy(ctx, from, to, newRoot); err != nil {
|
||||||
|
return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return newRoot, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func UpgradeActorsV7(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||||
|
// Use all the CPUs except 3.
|
||||||
|
workerCount := runtime.NumCPU() - 3
|
||||||
|
if workerCount <= 0 {
|
||||||
|
workerCount = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
config := nv15.Config{
|
||||||
|
MaxWorkers: uint(workerCount),
|
||||||
|
JobQueueSize: 1000,
|
||||||
|
ResultQueueSize: 100,
|
||||||
|
ProgressLogPeriod: 10 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
newRoot, err := upgradeActorsV7Common(ctx, sm, cache, root, epoch, ts, config)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, xerrors.Errorf("migrating actors v6 state: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return newRoot, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func PreUpgradeActorsV7(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
|
||||||
|
// Use half the CPUs for pre-migration, but leave at least 3.
|
||||||
|
workerCount := runtime.NumCPU()
|
||||||
|
if workerCount <= 4 {
|
||||||
|
workerCount = 1
|
||||||
|
} else {
|
||||||
|
workerCount /= 2
|
||||||
|
}
|
||||||
|
|
||||||
|
config := nv15.Config{MaxWorkers: uint(workerCount)}
|
||||||
|
_, err := upgradeActorsV7Common(ctx, sm, cache, root, epoch, ts, config)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func upgradeActorsV7Common(
|
||||||
|
ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache,
|
||||||
|
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
|
||||||
|
config nv15.Config,
|
||||||
|
) (cid.Cid, error) {
|
||||||
|
buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), blockstore.NewMemorySync())
|
||||||
|
store := store.ActorStore(ctx, buf)
|
||||||
|
|
||||||
|
// Load the state root.
|
||||||
|
var stateRoot types.StateRoot
|
||||||
|
if err := store.Get(ctx, root, &stateRoot); err != nil {
|
||||||
|
return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if stateRoot.Version != types.StateTreeVersion4 {
|
||||||
|
return cid.Undef, xerrors.Errorf(
|
||||||
|
"expected state root version 4 for actors v7 upgrade, got %d",
|
||||||
|
stateRoot.Version,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform the migration
|
||||||
|
newHamtRoot, err := nv15.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, xerrors.Errorf("upgrading to actors v7: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Persist the result.
|
// Persist the result.
|
||||||
|
@ -87,7 +87,7 @@ func (fcs *fakeCS) ChainGetPath(ctx context.Context, from, to types.TipSetKey) (
|
|||||||
}
|
}
|
||||||
|
|
||||||
// copied from the chainstore
|
// copied from the chainstore
|
||||||
revert, apply, err := store.ReorgOps(func(tsk types.TipSetKey) (*types.TipSet, error) {
|
revert, apply, err := store.ReorgOps(ctx, func(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) {
|
||||||
return fcs.ChainGetTipSet(ctx, tsk)
|
return fcs.ChainGetTipSet(ctx, tsk)
|
||||||
}, fromTs, toTs)
|
}, fromTs, toTs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -27,11 +27,11 @@ func NewMockAPI(bs blockstore.Blockstore) *MockAPI {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *MockAPI) ChainHasObj(ctx context.Context, c cid.Cid) (bool, error) {
|
func (m *MockAPI) ChainHasObj(ctx context.Context, c cid.Cid) (bool, error) {
|
||||||
return m.bs.Has(c)
|
return m.bs.Has(ctx, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MockAPI) ChainReadObj(ctx context.Context, c cid.Cid) ([]byte, error) {
|
func (m *MockAPI) ChainReadObj(ctx context.Context, c cid.Cid) ([]byte, error) {
|
||||||
blk, err := m.bs.Get(c)
|
blk, err := m.bs.Get(ctx, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("blockstore get: %w", err)
|
return nil, xerrors.Errorf("blockstore get: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -136,7 +136,7 @@ func (s *server) serviceRequest(ctx context.Context, req *validatedRequest) (*Re
|
|||||||
_, span := trace.StartSpan(ctx, "chainxchg.ServiceRequest")
|
_, span := trace.StartSpan(ctx, "chainxchg.ServiceRequest")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
chain, err := collectChainSegment(s.cs, req)
|
chain, err := collectChainSegment(ctx, s.cs, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("block sync request: collectChainSegment failed: ", err)
|
log.Warn("block sync request: collectChainSegment failed: ", err)
|
||||||
return &Response{
|
return &Response{
|
||||||
@ -156,13 +156,13 @@ func (s *server) serviceRequest(ctx context.Context, req *validatedRequest) (*Re
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func collectChainSegment(cs *store.ChainStore, req *validatedRequest) ([]*BSTipSet, error) {
|
func collectChainSegment(ctx context.Context, cs *store.ChainStore, req *validatedRequest) ([]*BSTipSet, error) {
|
||||||
var bstips []*BSTipSet
|
var bstips []*BSTipSet
|
||||||
|
|
||||||
cur := req.head
|
cur := req.head
|
||||||
for {
|
for {
|
||||||
var bst BSTipSet
|
var bst BSTipSet
|
||||||
ts, err := cs.LoadTipSet(cur)
|
ts, err := cs.LoadTipSet(ctx, cur)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("failed loading tipset %s: %w", cur, err)
|
return nil, xerrors.Errorf("failed loading tipset %s: %w", cur, err)
|
||||||
}
|
}
|
||||||
@ -172,7 +172,7 @@ func collectChainSegment(cs *store.ChainStore, req *validatedRequest) ([]*BSTipS
|
|||||||
}
|
}
|
||||||
|
|
||||||
if req.options.IncludeMessages {
|
if req.options.IncludeMessages {
|
||||||
bmsgs, bmincl, smsgs, smincl, err := gatherMessages(cs, ts)
|
bmsgs, bmincl, smsgs, smincl, err := gatherMessages(ctx, cs, ts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("gather messages failed: %w", err)
|
return nil, xerrors.Errorf("gather messages failed: %w", err)
|
||||||
}
|
}
|
||||||
@ -197,14 +197,14 @@ func collectChainSegment(cs *store.ChainStore, req *validatedRequest) ([]*BSTipS
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func gatherMessages(cs *store.ChainStore, ts *types.TipSet) ([]*types.Message, [][]uint64, []*types.SignedMessage, [][]uint64, error) {
|
func gatherMessages(ctx context.Context, cs *store.ChainStore, ts *types.TipSet) ([]*types.Message, [][]uint64, []*types.SignedMessage, [][]uint64, error) {
|
||||||
blsmsgmap := make(map[cid.Cid]uint64)
|
blsmsgmap := make(map[cid.Cid]uint64)
|
||||||
secpkmsgmap := make(map[cid.Cid]uint64)
|
secpkmsgmap := make(map[cid.Cid]uint64)
|
||||||
var secpkincl, blsincl [][]uint64
|
var secpkincl, blsincl [][]uint64
|
||||||
|
|
||||||
var blscids, secpkcids []cid.Cid
|
var blscids, secpkcids []cid.Cid
|
||||||
for _, block := range ts.Blocks() {
|
for _, block := range ts.Blocks() {
|
||||||
bc, sc, err := cs.ReadMsgMetaCids(block.Messages)
|
bc, sc, err := cs.ReadMsgMetaCids(ctx, block.Messages)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, nil, err
|
return nil, nil, nil, nil, err
|
||||||
}
|
}
|
||||||
@ -237,12 +237,12 @@ func gatherMessages(cs *store.ChainStore, ts *types.TipSet) ([]*types.Message, [
|
|||||||
secpkincl = append(secpkincl, smi)
|
secpkincl = append(secpkincl, smi)
|
||||||
}
|
}
|
||||||
|
|
||||||
blsmsgs, err := cs.LoadMessagesFromCids(blscids)
|
blsmsgs, err := cs.LoadMessagesFromCids(ctx, blscids)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, nil, err
|
return nil, nil, nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
secpkmsgs, err := cs.LoadSignedMessagesFromCids(secpkcids)
|
secpkmsgs, err := cs.LoadSignedMessagesFromCids(ctx, secpkcids)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, nil, err
|
return nil, nil, nil, nil, err
|
||||||
}
|
}
|
||||||
|
@ -9,6 +9,8 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/rand"
|
"github.com/filecoin-project/lotus/chain/rand"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
@ -239,7 +241,7 @@ func NewGeneratorWithSectorsAndUpgradeSchedule(numSectors int, us stmgr.UpgradeS
|
|||||||
genfb := &types.FullBlock{Header: genb.Genesis}
|
genfb := &types.FullBlock{Header: genb.Genesis}
|
||||||
gents := store.NewFullTipSet([]*types.FullBlock{genfb})
|
gents := store.NewFullTipSet([]*types.FullBlock{genfb})
|
||||||
|
|
||||||
if err := cs.SetGenesis(genb.Genesis); err != nil {
|
if err := cs.SetGenesis(context.TODO(), genb.Genesis); err != nil {
|
||||||
return nil, xerrors.Errorf("set genesis failed: %w", err)
|
return nil, xerrors.Errorf("set genesis failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -469,7 +471,7 @@ func (cg *ChainGen) NextTipSetFromMinersWithMessagesAndNulls(base *types.TipSet,
|
|||||||
return nil, xerrors.Errorf("making a block for next tipset failed: %w", err)
|
return nil, xerrors.Errorf("making a block for next tipset failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := cg.cs.PersistBlockHeaders(fblk.Header); err != nil {
|
if err := cg.cs.PersistBlockHeaders(context.TODO(), fblk.Header); err != nil {
|
||||||
return nil, xerrors.Errorf("chainstore AddBlock: %w", err)
|
return nil, xerrors.Errorf("chainstore AddBlock: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -686,6 +688,10 @@ func (m genFakeVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVeri
|
|||||||
panic("not supported")
|
panic("not supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m genFakeVerifier) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool, error) {
|
||||||
|
panic("not supported")
|
||||||
|
}
|
||||||
|
|
||||||
func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) {
|
func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) {
|
||||||
panic("not supported")
|
panic("not supported")
|
||||||
}
|
}
|
||||||
|
@ -479,6 +479,10 @@ func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, sys vm.Sysca
|
|||||||
verifNeeds := make(map[address.Address]abi.PaddedPieceSize)
|
verifNeeds := make(map[address.Address]abi.PaddedPieceSize)
|
||||||
var sum abi.PaddedPieceSize
|
var sum abi.PaddedPieceSize
|
||||||
|
|
||||||
|
csc := func(context.Context, abi.ChainEpoch, *state.StateTree) (abi.TokenAmount, error) {
|
||||||
|
return big.Zero(), nil
|
||||||
|
}
|
||||||
|
|
||||||
vmopt := vm.VMOpts{
|
vmopt := vm.VMOpts{
|
||||||
StateBase: stateroot,
|
StateBase: stateroot,
|
||||||
Epoch: 0,
|
Epoch: 0,
|
||||||
@ -486,7 +490,7 @@ func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, sys vm.Sysca
|
|||||||
Bstore: cs.StateBlockstore(),
|
Bstore: cs.StateBlockstore(),
|
||||||
Actors: filcns.NewActorRegistry(),
|
Actors: filcns.NewActorRegistry(),
|
||||||
Syscalls: mkFakedSigSyscalls(sys),
|
Syscalls: mkFakedSigSyscalls(sys),
|
||||||
CircSupplyCalc: nil,
|
CircSupplyCalc: csc,
|
||||||
NtwkVersion: func(_ context.Context, _ abi.ChainEpoch) network.Version {
|
NtwkVersion: func(_ context.Context, _ abi.ChainEpoch) network.Version {
|
||||||
return nv
|
return nv
|
||||||
},
|
},
|
||||||
@ -591,7 +595,7 @@ func MakeGenesisBlock(ctx context.Context, j journal.Journal, bs bstore.Blocksto
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("serializing msgmeta failed: %w", err)
|
return nil, xerrors.Errorf("serializing msgmeta failed: %w", err)
|
||||||
}
|
}
|
||||||
if err := bs.Put(mmb); err != nil {
|
if err := bs.Put(ctx, mmb); err != nil {
|
||||||
return nil, xerrors.Errorf("putting msgmeta block to blockstore: %w", err)
|
return nil, xerrors.Errorf("putting msgmeta block to blockstore: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -621,7 +625,7 @@ func MakeGenesisBlock(ctx context.Context, j journal.Journal, bs bstore.Blocksto
|
|||||||
return nil, xerrors.Errorf("filecoinGenesisCid != gblk.Cid")
|
return nil, xerrors.Errorf("filecoinGenesisCid != gblk.Cid")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := bs.Put(gblk); err != nil {
|
if err := bs.Put(ctx, gblk); err != nil {
|
||||||
return nil, xerrors.Errorf("failed writing filecoin genesis block to blockstore: %w", err)
|
return nil, xerrors.Errorf("failed writing filecoin genesis block to blockstore: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -652,7 +656,7 @@ func MakeGenesisBlock(ctx context.Context, j journal.Journal, bs bstore.Blocksto
|
|||||||
return nil, xerrors.Errorf("serializing block header failed: %w", err)
|
return nil, xerrors.Errorf("serializing block header failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := bs.Put(sb); err != nil {
|
if err := bs.Put(ctx, sb); err != nil {
|
||||||
return nil, xerrors.Errorf("putting header to blockstore: %w", err)
|
return nil, xerrors.Errorf("putting header to blockstore: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6,6 +6,8 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
|
||||||
|
runtime7 "github.com/filecoin-project/specs-actors/v7/actors/runtime"
|
||||||
|
|
||||||
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
@ -29,7 +31,6 @@ import (
|
|||||||
market4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/market"
|
market4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/market"
|
||||||
power4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/power"
|
power4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/power"
|
||||||
reward4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/reward"
|
reward4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/reward"
|
||||||
runtime5 "github.com/filecoin-project/specs-actors/v5/actors/runtime"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors"
|
"github.com/filecoin-project/lotus/chain/actors"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||||
@ -57,7 +58,7 @@ func MinerAddress(genesisIndex uint64) address.Address {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type fakedSigSyscalls struct {
|
type fakedSigSyscalls struct {
|
||||||
runtime5.Syscalls
|
runtime7.Syscalls
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fss *fakedSigSyscalls) VerifySignature(signature crypto.Signature, signer address.Address, plaintext []byte) error {
|
func (fss *fakedSigSyscalls) VerifySignature(signature crypto.Signature, signer address.Address, plaintext []byte) error {
|
||||||
@ -65,7 +66,7 @@ func (fss *fakedSigSyscalls) VerifySignature(signature crypto.Signature, signer
|
|||||||
}
|
}
|
||||||
|
|
||||||
func mkFakedSigSyscalls(base vm.SyscallBuilder) vm.SyscallBuilder {
|
func mkFakedSigSyscalls(base vm.SyscallBuilder) vm.SyscallBuilder {
|
||||||
return func(ctx context.Context, rt *vm.Runtime) runtime5.Syscalls {
|
return func(ctx context.Context, rt *vm.Runtime) runtime7.Syscalls {
|
||||||
return &fakedSigSyscalls{
|
return &fakedSigSyscalls{
|
||||||
base(ctx, rt),
|
base(ctx, rt),
|
||||||
}
|
}
|
||||||
@ -509,31 +510,13 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.Syscal
|
|||||||
// TODO: copied from actors test harness, deduplicate or remove from here
|
// TODO: copied from actors test harness, deduplicate or remove from here
|
||||||
type fakeRand struct{}
|
type fakeRand struct{}
|
||||||
|
|
||||||
func (fr *fakeRand) GetChainRandomnessV2(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
|
func (fr *fakeRand) GetChainRandomness(ctx context.Context, rnv network.Version, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
|
||||||
out := make([]byte, 32)
|
out := make([]byte, 32)
|
||||||
_, _ = rand.New(rand.NewSource(int64(randEpoch * 1000))).Read(out) //nolint
|
_, _ = rand.New(rand.NewSource(int64(randEpoch * 1000))).Read(out) //nolint
|
||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fr *fakeRand) GetChainRandomnessV1(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
|
func (fr *fakeRand) GetBeaconRandomness(ctx context.Context, rnv network.Version, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
|
||||||
out := make([]byte, 32)
|
|
||||||
_, _ = rand.New(rand.NewSource(int64(randEpoch * 1000))).Read(out) //nolint
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fr *fakeRand) GetBeaconRandomnessV3(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
|
|
||||||
out := make([]byte, 32)
|
|
||||||
_, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) //nolint
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fr *fakeRand) GetBeaconRandomnessV2(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
|
|
||||||
out := make([]byte, 32)
|
|
||||||
_, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) //nolint
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fr *fakeRand) GetBeaconRandomnessV1(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
|
|
||||||
out := make([]byte, 32)
|
out := make([]byte, 32)
|
||||||
_, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) //nolint
|
_, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) //nolint
|
||||||
return out, nil
|
return out, nil
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package slashfilter
|
package slashfilter
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
@ -27,7 +28,7 @@ func New(dstore ds.Batching) *SlashFilter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *SlashFilter) MinedBlock(bh *types.BlockHeader, parentEpoch abi.ChainEpoch) error {
|
func (f *SlashFilter) MinedBlock(ctx context.Context, bh *types.BlockHeader, parentEpoch abi.ChainEpoch) error {
|
||||||
if build.IsNearUpgrade(bh.Height, build.UpgradeOrangeHeight) {
|
if build.IsNearUpgrade(bh.Height, build.UpgradeOrangeHeight) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -35,7 +36,7 @@ func (f *SlashFilter) MinedBlock(bh *types.BlockHeader, parentEpoch abi.ChainEpo
|
|||||||
epochKey := ds.NewKey(fmt.Sprintf("/%s/%d", bh.Miner, bh.Height))
|
epochKey := ds.NewKey(fmt.Sprintf("/%s/%d", bh.Miner, bh.Height))
|
||||||
{
|
{
|
||||||
// double-fork mining (2 blocks at one epoch)
|
// double-fork mining (2 blocks at one epoch)
|
||||||
if err := checkFault(f.byEpoch, epochKey, bh, "double-fork mining faults"); err != nil {
|
if err := checkFault(ctx, f.byEpoch, epochKey, bh, "double-fork mining faults"); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -43,7 +44,7 @@ func (f *SlashFilter) MinedBlock(bh *types.BlockHeader, parentEpoch abi.ChainEpo
|
|||||||
parentsKey := ds.NewKey(fmt.Sprintf("/%s/%x", bh.Miner, types.NewTipSetKey(bh.Parents...).Bytes()))
|
parentsKey := ds.NewKey(fmt.Sprintf("/%s/%x", bh.Miner, types.NewTipSetKey(bh.Parents...).Bytes()))
|
||||||
{
|
{
|
||||||
// time-offset mining faults (2 blocks with the same parents)
|
// time-offset mining faults (2 blocks with the same parents)
|
||||||
if err := checkFault(f.byParents, parentsKey, bh, "time-offset mining faults"); err != nil {
|
if err := checkFault(ctx, f.byParents, parentsKey, bh, "time-offset mining faults"); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -53,14 +54,14 @@ func (f *SlashFilter) MinedBlock(bh *types.BlockHeader, parentEpoch abi.ChainEpo
|
|||||||
|
|
||||||
// First check if we have mined a block on the parent epoch
|
// First check if we have mined a block on the parent epoch
|
||||||
parentEpochKey := ds.NewKey(fmt.Sprintf("/%s/%d", bh.Miner, parentEpoch))
|
parentEpochKey := ds.NewKey(fmt.Sprintf("/%s/%d", bh.Miner, parentEpoch))
|
||||||
have, err := f.byEpoch.Has(parentEpochKey)
|
have, err := f.byEpoch.Has(ctx, parentEpochKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if have {
|
if have {
|
||||||
// If we had, make sure it's in our parent tipset
|
// If we had, make sure it's in our parent tipset
|
||||||
cidb, err := f.byEpoch.Get(parentEpochKey)
|
cidb, err := f.byEpoch.Get(ctx, parentEpochKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("getting other block cid: %w", err)
|
return xerrors.Errorf("getting other block cid: %w", err)
|
||||||
}
|
}
|
||||||
@ -83,25 +84,25 @@ func (f *SlashFilter) MinedBlock(bh *types.BlockHeader, parentEpoch abi.ChainEpo
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := f.byParents.Put(parentsKey, bh.Cid().Bytes()); err != nil {
|
if err := f.byParents.Put(ctx, parentsKey, bh.Cid().Bytes()); err != nil {
|
||||||
return xerrors.Errorf("putting byEpoch entry: %w", err)
|
return xerrors.Errorf("putting byEpoch entry: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := f.byEpoch.Put(epochKey, bh.Cid().Bytes()); err != nil {
|
if err := f.byEpoch.Put(ctx, epochKey, bh.Cid().Bytes()); err != nil {
|
||||||
return xerrors.Errorf("putting byEpoch entry: %w", err)
|
return xerrors.Errorf("putting byEpoch entry: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkFault(t ds.Datastore, key ds.Key, bh *types.BlockHeader, faultType string) error {
|
func checkFault(ctx context.Context, t ds.Datastore, key ds.Key, bh *types.BlockHeader, faultType string) error {
|
||||||
fault, err := t.Has(key)
|
fault, err := t.Has(ctx, key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if fault {
|
if fault {
|
||||||
cidb, err := t.Get(key)
|
cidb, err := t.Get(ctx, key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("getting other block cid: %w", err)
|
return xerrors.Errorf("getting other block cid: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -89,7 +89,7 @@ func (fm *FundManager) Start() error {
|
|||||||
// - in State() only load addresses with in-progress messages
|
// - in State() only load addresses with in-progress messages
|
||||||
// - load the others just-in-time from getFundedAddress
|
// - load the others just-in-time from getFundedAddress
|
||||||
// - delete(fm.fundedAddrs, addr) when the queue has been processed
|
// - delete(fm.fundedAddrs, addr) when the queue has been processed
|
||||||
return fm.str.forEach(func(state *FundedAddressState) {
|
return fm.str.forEach(fm.ctx, func(state *FundedAddressState) {
|
||||||
fa := newFundedAddress(fm, state.Addr)
|
fa := newFundedAddress(fm, state.Addr)
|
||||||
fa.state = state
|
fa.state = state
|
||||||
fm.fundedAddrs[fa.state.Addr] = fa
|
fm.fundedAddrs[fa.state.Addr] = fa
|
||||||
@ -322,7 +322,7 @@ func (a *fundedAddress) clearWaitState() {
|
|||||||
// Save state to datastore
|
// Save state to datastore
|
||||||
func (a *fundedAddress) saveState() {
|
func (a *fundedAddress) saveState() {
|
||||||
// Not much we can do if saving to the datastore fails, just log
|
// Not much we can do if saving to the datastore fails, just log
|
||||||
err := a.str.save(a.state)
|
err := a.str.save(a.ctx, a.state)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("saving state to store for addr %s: %v", a.state.Addr, err)
|
log.Errorf("saving state to store for addr %s: %v", a.state.Addr, err)
|
||||||
}
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user