Merge pull request #10996 from filecoin-project/sbansal/1.23.1_to_releases
merge v1.23.1 to releases
This commit is contained in:
commit
26dbe515d7
@ -63,7 +63,7 @@ commands:
|
||||
name: Install Rust
|
||||
command: |
|
||||
curl https://sh.rustup.rs -sSf | sh -s -- -y
|
||||
- run: make deps lotus
|
||||
- run: make deps
|
||||
download-params:
|
||||
steps:
|
||||
- restore_cache:
|
||||
@ -304,9 +304,7 @@ jobs:
|
||||
darwin: true
|
||||
darwin-architecture: arm64
|
||||
- run: |
|
||||
export CPATH=$(brew --prefix)/include
|
||||
export LIBRARY_PATH=$(brew --prefix)/lib
|
||||
make lotus lotus-miner lotus-worker
|
||||
export CPATH=$(brew --prefix)/include && export LIBRARY_PATH=$(brew --prefix)/lib && make lotus lotus-miner lotus-worker
|
||||
- run: otool -hv lotus
|
||||
- run:
|
||||
name: check tag and version output match
|
||||
@ -812,6 +810,12 @@ workflows:
|
||||
- build
|
||||
suite: itest-mpool_push_with_uuid
|
||||
target: "./itests/mpool_push_with_uuid_test.go"
|
||||
- test:
|
||||
name: test-itest-msgindex
|
||||
requires:
|
||||
- build
|
||||
suite: itest-msgindex
|
||||
target: "./itests/msgindex_test.go"
|
||||
- test:
|
||||
name: test-itest-multisig
|
||||
requires:
|
||||
@ -878,6 +882,12 @@ workflows:
|
||||
- build
|
||||
suite: itest-sdr_upgrade
|
||||
target: "./itests/sdr_upgrade_test.go"
|
||||
- test:
|
||||
name: test-itest-sealing_resources
|
||||
requires:
|
||||
- build
|
||||
suite: itest-sealing_resources
|
||||
target: "./itests/sealing_resources_test.go"
|
||||
- test:
|
||||
name: test-itest-sector_finalize_early
|
||||
requires:
|
||||
@ -1063,6 +1073,7 @@ workflows:
|
||||
branches:
|
||||
only:
|
||||
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- /^ci\/.*$/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
@ -1072,6 +1083,7 @@ workflows:
|
||||
branches:
|
||||
only:
|
||||
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- /^ci\/.*$/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
@ -1081,6 +1093,7 @@ workflows:
|
||||
branches:
|
||||
only:
|
||||
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- /^ci\/.*$/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
@ -1093,7 +1106,7 @@ workflows:
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
- /.*/
|
||||
- /^.*$/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
@ -1108,6 +1121,7 @@ workflows:
|
||||
branches:
|
||||
only:
|
||||
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- /^ci\/.*$/
|
||||
- build-docker:
|
||||
name: "Docker push (lotus-all-in-one / stable / mainnet)"
|
||||
image: lotus-all-in-one
|
||||
|
@ -63,7 +63,7 @@ commands:
|
||||
name: Install Rust
|
||||
command: |
|
||||
curl https://sh.rustup.rs -sSf | sh -s -- -y
|
||||
- run: make deps lotus
|
||||
- run: make deps
|
||||
download-params:
|
||||
steps:
|
||||
- restore_cache:
|
||||
@ -304,9 +304,7 @@ jobs:
|
||||
darwin: true
|
||||
darwin-architecture: arm64
|
||||
- run: |
|
||||
export CPATH=$(brew --prefix)/include
|
||||
export LIBRARY_PATH=$(brew --prefix)/lib
|
||||
make lotus lotus-miner lotus-worker
|
||||
export CPATH=$(brew --prefix)/include && export LIBRARY_PATH=$(brew --prefix)/lib && make lotus lotus-miner lotus-worker
|
||||
- run: otool -hv lotus
|
||||
- run:
|
||||
name: check tag and version output match
|
||||
@ -583,6 +581,7 @@ workflows:
|
||||
branches:
|
||||
only:
|
||||
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- /^ci\/.*$/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
@ -592,6 +591,7 @@ workflows:
|
||||
branches:
|
||||
only:
|
||||
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- /^ci\/.*$/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
@ -601,6 +601,7 @@ workflows:
|
||||
branches:
|
||||
only:
|
||||
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- /^ci\/.*$/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
@ -613,7 +614,7 @@ workflows:
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
- /.*/
|
||||
- /^.*$/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
@ -628,6 +629,7 @@ workflows:
|
||||
branches:
|
||||
only:
|
||||
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- /^ci\/.*$/
|
||||
[[- range .Networks]]
|
||||
- build-docker:
|
||||
name: "Docker push (lotus-all-in-one / stable / [[.]])"
|
||||
|
168
CHANGELOG.md
168
CHANGELOG.md
@ -1,5 +1,169 @@
|
||||
# Lotus changelog
|
||||
|
||||
# v1.23.1 / 2023-06-20
|
||||
|
||||
This is an optional feature release of Lotus. This feature release includes numerous improvements and enhancements for node operators, ETH RPC-providers and storage providers.
|
||||
|
||||
**☢️ Upgrade Warnings ☢️**
|
||||
|
||||
If you are upgrading to this release candidate from Lotus v1.22.1, please make sure to read the upgrade warnings section in the [v1.23.0 release first.](https://github.com/filecoin-project/lotus/releases/tag/v1.23.0)
|
||||
|
||||
- *Storage providers:* The Lotus-Miner legacy-markets has been disbled by default in this feature release and will be removed in the near term future. Users are adviced to migrate to [Boost](https://boost.filecoin.io) or other SP markets systems.
|
||||
|
||||
## Highlights
|
||||
|
||||
**🛣 Execution Lanes 🛣**
|
||||
This feature release introduces VM Execution Lanes! Execution lanes efficiently divide the workload between system processes (chainsync) and RPC requests. This way syncing the chain will not be at the mercy of responding to users' requests and RPC providers nodes should have less problems catching up.
|
||||
|
||||
To take advantage of VM Execution Lanes, you need to set up two environment variables:
|
||||
- `LOTUS_FVM_CONCURRENCY` - read more about how this value should be set to [here](https://lotus.filecoin.io/lotus/configure/ethereum-rpc/#environment-variables)
|
||||
- `LOTUS_FVM_CONCURRENCY_RESERVED = 4`
|
||||
|
||||
**🧱 Aggregation / Batching fixes 🔨**
|
||||
|
||||
Numerous aggregation and batching fixes has been included in the feature release. Large `ProveCommitAggregate` and `PreCommitBatching` messages that exceeds the block limit will now automatically be split into smaller messages when sent to the chain.
|
||||
|
||||
Additionally we have added a new feature that staggers the amount of ProveCommit messages sent simulatanously to the chain if a storage provider has been aggregating many sectors in ProveCommitAggregate message, but at the time of publishing the BaseFee is below the aggregation threshold. This stagger feature prevents issues where some of the ProveCommit messages fail with the SysErrorOutOfGas message. You can tweak how many messages will be staggered per epoch by changing `MaxSectorProveCommitsSubmittedPerEpoch` in the [sealing section of the config.toml file.](https://lotus.filecoin.io/storage-providers/advanced-configurations/sealing/#sealing-section)
|
||||
|
||||
*NB:* While these fixes are great for the reliability of aggregation and batching on the Lotus side, it has been uncovered that aggregating ProveCommit messages for sectors containing verified deals are currently more expensive then single messages due to an issue on the actors side. We therefore do not reccomend our users to aggregate ProveCommit messages when doing verified deals until that issue has been resolved. You can follow the discussion on resolving the issue on the [actors side here.](https://github.com/filecoin-project/FIPs/discussions/689)
|
||||
|
||||
**Unsealing CLI/API**
|
||||
|
||||
This feature release adds a dedicated `lotus-miner sectors unseal` command and API, allowing you to unseal specific sealed sectors easily.
|
||||
|
||||
## New features
|
||||
- feat: VM Execution Lanes ([filecoin-project/lotus#10551](https://github.com/filecoin-project/lotus/pull/10551))
|
||||
- Adds VM exections lanes, efficiently dividing the workload between system processes and RPC-requests.
|
||||
- Add API and CLI to unseal sector (#10626) ([filecoin-project/lotus#10626](https://github.com/filecoin-project/lotus/pull/10626))
|
||||
- Adds `lotus-miner sectors unseal` cmd, and a API-method to unseal a sector.
|
||||
- feat: sealing: Split PCA/PCB batches if gas used exceeds block limit ([filecoin-project/lotus#10647](https://github.com/filecoin-project/lotus/pull/10647))
|
||||
- Splits ProveCommitAggregate and PreCommitBatch messages into multiple messages if the message exceeds the block limit.
|
||||
- Add feature to stagger sector prove commit submission (#10543) ([filecoin-project/lotus#10543](https://github.com/filecoin-project/lotus/pull/10543))
|
||||
- Staggers the amount of ProveCommit messages sent simultanously if a storage provider has been aggregating many message, but at the moment of publishing the BaseFee is below the threshold for aggregation to prevent unwanted SysErrorOutOfGas issues.
|
||||
- Set default for MaxSectorProveCommitsSubmittedPerEpoch ([filecoin-project/lotus#10728](https://github.com/filecoin-project/lotus/pull/10728))
|
||||
- Sets the default amount of ProveCommits submitted per epoch to 20.
|
||||
- feat: worker: Ensure tempdir exists (#10433) ([filecoin-project/lotus#10433](https://github.com/filecoin-project/lotus/pull/10433))
|
||||
- Ensures that a temporary directory exists on start of a lotus-worker with a custom TMPDIR set.
|
||||
- feat: sync: harden chain sync (#10756) ([filecoin-project/lotus#10756](https://github.com/filecoin-project/lotus/pull/10756))
|
||||
- feat: populate the index on snapshot import ([filecoin-project/lotus#10556](https://github.com/filecoin-project/lotus/pull/10556))
|
||||
- feat:chain: Message Index (**HIGHLY EXPERIMENTAL**) ([filecoin-project/lotus#10452](https://github.com/filecoin-project/lotus/pull/10452))
|
||||
- MVP of a message index that allows us to accelrate StateSearchMessage and related functionality, and eventually accelerate critical chain calls (follow up).
|
||||
- feat: Add small cache to execution traces ([filecoin-project/lotus#10517](https://github.com/filecoin-project/lotus/pull/10517))
|
||||
- feat: shed: incoming block-sub chainwatch tool ([filecoin-project/lotus#10513](https://github.com/filecoin-project/lotus/pull/10513))
|
||||
|
||||
## Improvements
|
||||
- feat: daemon: Auto-resume interrupted snapshot imports ([filecoin-project/lotus#10636](https://github.com/filecoin-project/lotus/pull/10636))
|
||||
- Auto-resumes interrupted snapshot imports when using an URL.
|
||||
- fix: storage: Remove temp fetching files after failed fetch ([filecoin-project/lotus#10661](https://github.com/filecoin-project/lotus/pull/10661))
|
||||
- Clean up partially fetched failed after a failed fetch on a lotus-worker.
|
||||
- feat: chainstore: batch writes of tipsets ([filecoin-project/lotus#10800](https://github.com/filecoin-project/lotus/pull/10800))
|
||||
- Reduces the time to persist all headers from 4-5 minutes, to < 15 seconds.
|
||||
- Check if epoch is negative in GetTipsetByHeight
|
||||
- fix: sched: Address GET_32G_MAX_CONCURRENT regression
|
||||
- fix: cli: Hide legacy markets cmds
|
||||
- Hides the lotus-miner legacy markets commands from the lotus-miner CLI.
|
||||
- fix: ci: Debugging m1 build
|
||||
- Disable lotus markets by default (#10809) ([filecoin-project/lotus#10809](https://github.com/filecoin-project/lotus/pull/10809))
|
||||
- Disables lotus-miner legacy markets [EOL] by default.
|
||||
- perf: mempool: lower priority optimizations (#10693) ([filecoin-project/lotus#10693](https://github.com/filecoin-project/lotus/pull/10693))
|
||||
- perf: message pool: change locks to RWMutexes for performance ([filecoin-project/lotus#10561](https://github.com/filecoin-project/lotus/pull/10561))
|
||||
- perf: eth: gas estimate set applyTsMessages false (#10546) ([filecoin-project/lotus#10546](https://github.com/filecoin-project/lotus/pull/10546))
|
||||
- Change args check ([filecoin-project/lotus#10812](https://github.com/filecoin-project/lotus/pull/10812))
|
||||
- fix: sealing: Make lotus-worker report GPU usage to miner during ReplicaUpdate task (#10806) ([filecoin-project/lotus#10806](https://github.com/filecoin-project/lotus/pull/10806))
|
||||
- fix:splitstore:Don't block when potentially holding txnLk as writer ([filecoin-project/lotus#10811](https://github.com/filecoin-project/lotus/pull/10811))
|
||||
- fix: prover: Propagate skipped sectors in local PoSt
|
||||
- fix: unseal: check if sealed/update sector exists ([filecoin-project/lotus#10639](https://github.com/filecoin-project/lotus/pull/10639))
|
||||
- fix: sealing pipeline: Allow nil message in TerminateWait ([filecoin-project/lotus#10696](https://github.com/filecoin-project/lotus/pull/10696))
|
||||
- fix: cli: Check if the sectorID exists before removing ([filecoin-project/lotus#10611](https://github.com/filecoin-project/lotus/pull/10611))
|
||||
- feat:splitstore:limit moving gc threads ([filecoin-project/lotus#10621](https://github.com/filecoin-project/lotus/pull/10621))
|
||||
- fix: cli: Make `net connect` to miner address work ([filecoin-project/lotus#10599](https://github.com/filecoin-project/lotus/pull/10599))
|
||||
- fix: log: Stop logging `file does not exists` ([filecoin-project/lotus#10588](https://github.com/filecoin-project/lotus/pull/10588))
|
||||
- Update config default value (#10605) ([filecoin-project/lotus#10605](https://github.com/filecoin-project/lotus/pull/10605))
|
||||
- fix: cap the message gas limit at the block gas limit (#10637) ([filecoin-project/lotus#10637](https://github.com/filecoin-project/lotus/pull/10637))
|
||||
- fix: miner: correctly count sector extensions ([filecoin-project/lotus#10544](https://github.com/filecoin-project/lotus/pull/10544))
|
||||
- fix:mpool: prune excess messages before selection ([filecoin-project/lotus#10648](https://github.com/filecoin-project/lotus/pull/10648))
|
||||
- fix: proving: Initialize slice with with same length as partition ([filecoin-project/lotus#10569](https://github.com/filecoin-project/lotus/pull/10569))
|
||||
- perf: Address performance of EthGetTransactionCount ([filecoin-project/lotus#10700](https://github.com/filecoin-project/lotus/pull/10700))
|
||||
- fix: sync: reduce log from error to info ([filecoin-project/lotus#10759](https://github.com/filecoin-project/lotus/pull/10759))
|
||||
- fix: state: lotus-miner info should show deals info without admin permission ([filecoin-project/lotus#10323](https://github.com/filecoin-project/lotus/pull/10323))
|
||||
- fix: tvx: make extract-multiple support the FVM ([filecoin-project/lotus#10714](https://github.com/filecoin-project/lotus/pull/10714))
|
||||
- feat: badger: add a has check before writing to reduce duplicates ([filecoin-project/lotus#10680](https://github.com/filecoin-project/lotus/pull/10680))
|
||||
- fix: chain: record heaviest tipset before notifying (#10694) ([filecoin-project/lotus#10694](https://github.com/filecoin-project/lotus/pull/10694))
|
||||
- fix: Eth JSON-RPC api: handle messages with gasFeeCap less than baseFee (#10614) ([filecoin-project/lotus#10614](https://github.com/filecoin-project/lotus/pull/10614))
|
||||
- feat: chainstore: optimize BlockMsgsForTipset ([filecoin-project/lotus#10552](https://github.com/filecoin-project/lotus/pull/10552))
|
||||
- refactor: stop using deprecated io/ioutil ([filecoin-project/lotus#10596](https://github.com/filecoin-project/lotus/pull/10596))
|
||||
- feat: shed: refactor market cron-state command ([filecoin-project/lotus#10746](https://github.com/filecoin-project/lotus/pull/10746))
|
||||
- fix: events: don't set GC confidence to 1 ([filecoin-project/lotus#10713](https://github.com/filecoin-project/lotus/pull/10713))
|
||||
- feat: sync: validate (early) that blocks fall within range (#10691) ([filecoin-project/lotus#10691](https://github.com/filecoin-project/lotus/pull/10691))
|
||||
- chainstore: Fix raw blocks getting scanned for links during snapshots (#10684) ([filecoin-project/lotus#10684](https://github.com/filecoin-project/lotus/pull/10684))
|
||||
- fix: remove pointless panic ([filecoin-project/lotus#10690](https://github.com/filecoin-project/lotus/pull/10690))
|
||||
- fix: check for nil bcastDict (#10646) ([filecoin-project/lotus#10646](https://github.com/filecoin-project/lotus/pull/10646))
|
||||
- fix: make state compute --html work with unknown methods ([filecoin-project/lotus#10619](https://github.com/filecoin-project/lotus/pull/10619))
|
||||
- shed: get balances of evm accounts ([filecoin-project/lotus#10489](https://github.com/filecoin-project/lotus/pull/10489))
|
||||
- feat: Use MessageIndex in WaitForMessage ([filecoin-project/lotus#10587](https://github.com/filecoin-project/lotus/pull/10587))
|
||||
- fix: searchForIndexedMsg always returns an error ([filecoin-project/lotus#10586](https://github.com/filecoin-project/lotus/pull/10586))
|
||||
- Fix: export-range: Ignore ipld Blocks not found in Receipts. ([filecoin-project/lotus#10535](https://github.com/filecoin-project/lotus/pull/10535))
|
||||
- feat: stmgr: speed up calculation of genesis circ supply ([filecoin-project/lotus#10553](https://github.com/filecoin-project/lotus/pull/10553))
|
||||
- fix: gas estimation: don't special case paych collects ([filecoin-project/lotus#10549](https://github.com/filecoin-project/lotus/pull/10549))
|
||||
- fix: tracer: emit raw peer ids for compatibility with libp2p tracer ([filecoin-project/lotus#10271](https://github.com/filecoin-project/lotus/pull/10271))
|
||||
- Merge branch 'feat/new-gw-methods'
|
||||
|
||||
## Dependencies
|
||||
- chore: deps: update to go-libp2p 0.27.5
|
||||
- devs: update libp2p #10937
|
||||
- chore: deps: update to FVM 3.3.1 ([filecoin-project/lotus#10786](https://github.com/filecoin-project/lotus/pull/10786))
|
||||
- chore: boxo: migrate from go-libipfs to boxo ([filecoin-project/lotus#10562](https://github.com/filecoin-project/lotus/pull/10562))
|
||||
- chore: deps: update to go-state-types v0.11.0-alpha-3 ([filecoin-project/lotus#10606](https://github.com/filecoin-project/lotus/pull/10606))
|
||||
- chore: bump go-libipfs ([filecoin-project/lotus#10531](https://github.com/filecoin-project/lotus/pull/10531))
|
||||
|
||||
## Others
|
||||
- feat:networking: (Synchronous) Consistent Broadcast for Filecoin EC ([filecoin-project/lotus#9858](https://github.com/filecoin-project/lotus/pull/9858))
|
||||
- Revert #9858 (consistent broadcast changes) ([filecoin-project/lotus#10777](https://github.com/filecoin-project/lotus/pull/10777))
|
||||
- Update build version for release/v1.23.1
|
||||
- chore: drop flaky TestBatchDealInput subcase ([filecoin-project/lotus#10810](https://github.com/filecoin-project/lotus/pull/10810))
|
||||
- chore: changelog clean up ([filecoin-project/lotus#10744](https://github.com/filecoin-project/lotus/pull/10744))
|
||||
- chore: refactor: drop unused IsTicketWinner (#10801) ([filecoin-project/lotus#10801](https://github.com/filecoin-project/lotus/pull/10801))
|
||||
- chore: build: bump matser version to v1.23.1-dev ([filecoin-project/lotus#10709](https://github.com/filecoin-project/lotus/pull/10709))
|
||||
- fix: deflake: use 2 miners for flaky tests ([filecoin-project/lotus#10764](https://github.com/filecoin-project/lotus/pull/10764))
|
||||
- test: eth: deflake multiblock lookup test (#10769) ([filecoin-project/lotus#10769](https://github.com/filecoin-project/lotus/pull/10769))
|
||||
- shed: migrations: add reminder about continuity testing tool ([filecoin-project/lotus#10762](https://github.com/filecoin-project/lotus/pull/10762))
|
||||
- chore: merge releases into master ([filecoin-project/lotus#10742](https://github.com/filecoin-project/lotus/pull/10742))
|
||||
- test: events: fix race when recording tipsets (#10665) ([filecoin-project/lotus#10665](https://github.com/filecoin-project/lotus/pull/10665))
|
||||
- fix: build: add CBDeliveryDelay to testground ([filecoin-project/lotus#10613](https://github.com/filecoin-project/lotus/pull/10613))
|
||||
- fix: build: Fixed incorrect words that could not be compiled ([filecoin-project/lotus#10610](https://github.com/filecoin-project/lotus/pull/10610))
|
||||
- build: docker: Update GO-version ([filecoin-project/lotus#10581](https://github.com/filecoin-project/lotus/pull/10581))
|
||||
- fix: itests: Don't call t.Error in MineBlocks goroutine ([filecoin-project/lotus#10572](https://github.com/filecoin-project/lotus/pull/10572))
|
||||
- docs: api: clarify MpoolClear params ([filecoin-project/lotus#10550](https://github.com/filecoin-project/lotus/pull/10550))
|
||||
|
||||
Contributors
|
||||
|
||||
| Contributor | Commits | Lines ± | Files Changed |
|
||||
|-------------|---------|---------|---------------|
|
||||
| vyzo | 70 | +1990/-429 | 135 |
|
||||
| Alfonso de la Rocha | 25 | +814/-299 | 56 |
|
||||
| Steven Allen | 14 | +125/-539 | 28 |
|
||||
| Shrenuj Bansal | 13 | +482/-138 | 52 |
|
||||
| Aayush | 17 | +317/-301 | 90 |
|
||||
| Łukasz Magiera | 13 | +564/-26 | 16 |
|
||||
| Jennifer Wang | 7 | +401/-140 | 10 |
|
||||
| Fridrik Asmundsson | 14 | +315/-84 | 20 |
|
||||
| Jorropo | 2 | +139/-137 | 74 |
|
||||
| Mikers | 6 | +114/-43 | 14 |
|
||||
| Hector Sanjuan | 5 | +92/-44 | 5 |
|
||||
| Ales Dumikau | 1 | +117/-0 | 10 |
|
||||
| Mike Seiler | 4 | +51/-51 | 6 |
|
||||
| zenground0 | 6 | +33/-25 | 8 |
|
||||
| Phi | 8 | +32/-10 | 10 |
|
||||
| Aayush Rajasekaran | 1 | +1/-32 | 2 |
|
||||
| Ian Davis | 2 | +7/-10 | 3 |
|
||||
| Marcel Telka | 1 | +5/-7 | 1 |
|
||||
| ychiao | 1 | +8/-3 | 2 |
|
||||
| jennijuju | 1 | +4/-4 | 8 |
|
||||
| adlrocha | 2 | +2/-2 | 2 |
|
||||
| Jiaying Wang | 1 | +0/-4 | 1 |
|
||||
| ZenGround0 | 1 | +2/-1 | 2 |
|
||||
| Zeng Li | 1 | +1/-1 | 1 |
|
||||
|
||||
# v1.23.0 / 2023-04-21
|
||||
|
||||
This is the stable feature release for the upcoming MANDATORY network upgrade at `2023-04-27T13:00:00Z`, epoch `2809800`. This feature release delivers the nv19 Lighting and nv20 Thunder network upgrade for mainnet, and includes numerous improvements and enhancements for node operators, ETH RPC-providers and storage providers.
|
||||
@ -606,10 +770,6 @@ verifiedregistry bafk2bzacedej3dnr62g2je2abmyjg3xqv4otvh6e26du5fcrhvw7zgcaaez3a
|
||||
### Dependencies
|
||||
github.com/filecoin-project/go-state-types (v0.11.0-rc1 -> v0.11.1):
|
||||
|
||||
<<<<<<< HEAD
|
||||
=======
|
||||
|
||||
>>>>>>> releases
|
||||
# v1.20.4 / 2023-03-17
|
||||
|
||||
This is a patch release intended to alleviate performance issues reported by some users since the nv18 upgrade.
|
||||
|
@ -7,8 +7,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
@ -297,8 +297,10 @@ type FullNode interface {
|
||||
MpoolGetNonce(context.Context, address.Address) (uint64, error) //perm:read
|
||||
MpoolSub(context.Context) (<-chan MpoolUpdate, error) //perm:read
|
||||
|
||||
// MpoolClear clears pending messages from the mpool
|
||||
MpoolClear(context.Context, bool) error //perm:write
|
||||
// MpoolClear clears pending messages from the mpool.
|
||||
// If clearLocal is true, ALL messages will be cleared.
|
||||
// If clearLocal is false, local messages will be protected, all others will be cleared.
|
||||
MpoolClear(ctx context.Context, clearLocal bool) error //perm:write
|
||||
|
||||
// MpoolGetConfig returns (a copy of) the current mpool config
|
||||
MpoolGetConfig(context.Context) (*types.MpoolConfig, error) //perm:read
|
||||
|
@ -3,8 +3,8 @@ package api
|
||||
import (
|
||||
"context"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-jsonrpc"
|
||||
@ -33,6 +33,9 @@ import (
|
||||
// * Generate openrpc blobs
|
||||
|
||||
type Gateway interface {
|
||||
StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (MinerSectors, error)
|
||||
GasEstimateGasPremium(context.Context, uint64, address.Address, int64, types.TipSetKey) (types.BigInt, error)
|
||||
StateReplay(context.Context, types.TipSetKey, cid.Cid) (*InvocResult, error)
|
||||
ChainHasObj(context.Context, cid.Cid) (bool, error)
|
||||
ChainPutObj(context.Context, blocks.Block) error
|
||||
ChainHead(ctx context.Context) (*types.TipSet, error)
|
||||
|
@ -73,5 +73,5 @@ type CommonNet interface {
|
||||
|
||||
type NatInfo struct {
|
||||
Reachability network.Reachability
|
||||
PublicAddr string
|
||||
PublicAddrs []string
|
||||
}
|
||||
|
@ -129,6 +129,8 @@ type StorageMiner interface {
|
||||
SectorMatchPendingPiecesToOpenSectors(ctx context.Context) error //perm:admin
|
||||
// SectorAbortUpgrade can be called on sectors that are in the process of being upgraded to abort it
|
||||
SectorAbortUpgrade(context.Context, abi.SectorNumber) error //perm:admin
|
||||
// SectorUnseal unseals the provided sector
|
||||
SectorUnseal(ctx context.Context, number abi.SectorNumber) error //perm:admin
|
||||
|
||||
// SectorNumAssignerMeta returns sector number assigner metadata - reserved/allocated
|
||||
SectorNumAssignerMeta(ctx context.Context) (NumAssignerMeta, error) //perm:read
|
||||
|
@ -14,9 +14,9 @@ import (
|
||||
"unicode"
|
||||
|
||||
"github.com/google/uuid"
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-graphsync"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
textselector "github.com/ipld/go-ipld-selector-text-lite"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/metrics"
|
||||
|
@ -12,8 +12,8 @@ import (
|
||||
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
uuid "github.com/google/uuid"
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
cid "github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
metrics "github.com/libp2p/go-libp2p/core/metrics"
|
||||
network0 "github.com/libp2p/go-libp2p/core/network"
|
||||
peer "github.com/libp2p/go-libp2p/core/peer"
|
||||
|
@ -8,8 +8,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
"github.com/libp2p/go-libp2p/core/metrics"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
@ -726,6 +726,8 @@ type GatewayMethods struct {
|
||||
|
||||
EthUnsubscribe func(p0 context.Context, p1 ethtypes.EthSubscriptionID) (bool, error) ``
|
||||
|
||||
GasEstimateGasPremium func(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) ``
|
||||
|
||||
GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) ``
|
||||
|
||||
MpoolGetNonce func(p0 context.Context, p1 address.Address) (uint64, error) ``
|
||||
@ -768,12 +770,16 @@ type GatewayMethods struct {
|
||||
|
||||
StateMinerProvingDeadline func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) ``
|
||||
|
||||
StateMinerSectorCount func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MinerSectors, error) ``
|
||||
|
||||
StateNetworkName func(p0 context.Context) (dtypes.NetworkName, error) ``
|
||||
|
||||
StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) ``
|
||||
|
||||
StateReadState func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*ActorState, error) ``
|
||||
|
||||
StateReplay func(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid) (*InvocResult, error) ``
|
||||
|
||||
StateSearchMsg func(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) ``
|
||||
|
||||
StateSectorGetInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) ``
|
||||
@ -1079,6 +1085,8 @@ type StorageMinerMethods struct {
|
||||
|
||||
SectorTerminatePending func(p0 context.Context) ([]abi.SectorID, error) `perm:"admin"`
|
||||
|
||||
SectorUnseal func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"`
|
||||
|
||||
SectorsList func(p0 context.Context) ([]abi.SectorNumber, error) `perm:"read"`
|
||||
|
||||
SectorsListInStates func(p0 context.Context, p1 []SectorState) ([]abi.SectorNumber, error) `perm:"read"`
|
||||
@ -4614,6 +4622,17 @@ func (s *GatewayStub) EthUnsubscribe(p0 context.Context, p1 ethtypes.EthSubscrip
|
||||
return false, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) GasEstimateGasPremium(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) {
|
||||
if s.Internal.GasEstimateGasPremium == nil {
|
||||
return *new(types.BigInt), ErrNotSupported
|
||||
}
|
||||
return s.Internal.GasEstimateGasPremium(p0, p1, p2, p3, p4)
|
||||
}
|
||||
|
||||
func (s *GatewayStub) GasEstimateGasPremium(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) {
|
||||
return *new(types.BigInt), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) GasEstimateMessageGas(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) {
|
||||
if s.Internal.GasEstimateMessageGas == nil {
|
||||
return nil, ErrNotSupported
|
||||
@ -4845,6 +4864,17 @@ func (s *GatewayStub) StateMinerProvingDeadline(p0 context.Context, p1 address.A
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) StateMinerSectorCount(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MinerSectors, error) {
|
||||
if s.Internal.StateMinerSectorCount == nil {
|
||||
return *new(MinerSectors), ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateMinerSectorCount(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *GatewayStub) StateMinerSectorCount(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MinerSectors, error) {
|
||||
return *new(MinerSectors), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) StateNetworkName(p0 context.Context) (dtypes.NetworkName, error) {
|
||||
if s.Internal.StateNetworkName == nil {
|
||||
return *new(dtypes.NetworkName), ErrNotSupported
|
||||
@ -4878,6 +4908,17 @@ func (s *GatewayStub) StateReadState(p0 context.Context, p1 address.Address, p2
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) StateReplay(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid) (*InvocResult, error) {
|
||||
if s.Internal.StateReplay == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateReplay(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *GatewayStub) StateReplay(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid) (*InvocResult, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) StateSearchMsg(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) {
|
||||
if s.Internal.StateSearchMsg == nil {
|
||||
return nil, ErrNotSupported
|
||||
@ -6385,6 +6426,17 @@ func (s *StorageMinerStub) SectorTerminatePending(p0 context.Context) ([]abi.Sec
|
||||
return *new([]abi.SectorID), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) SectorUnseal(p0 context.Context, p1 abi.SectorNumber) error {
|
||||
if s.Internal.SectorUnseal == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.SectorUnseal(p0, p1)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) SectorUnseal(p0 context.Context, p1 abi.SectorNumber) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) SectorsList(p0 context.Context) ([]abi.SectorNumber, error) {
|
||||
if s.Internal.SectorsList == nil {
|
||||
return *new([]abi.SectorNumber), ErrNotSupported
|
||||
|
@ -3,8 +3,8 @@ package v0api
|
||||
import (
|
||||
"context"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
textselector "github.com/ipld/go-ipld-selector-text-lite"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
|
||||
|
@ -3,8 +3,8 @@ package v0api
|
||||
import (
|
||||
"context"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
@ -35,6 +35,9 @@ import (
|
||||
// * Generate openrpc blobs
|
||||
|
||||
type Gateway interface {
|
||||
StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (api.MinerSectors, error)
|
||||
GasEstimateGasPremium(context.Context, uint64, address.Address, int64, types.TipSetKey) (types.BigInt, error)
|
||||
StateReplay(context.Context, types.TipSetKey, cid.Cid) (*api.InvocResult, error)
|
||||
ChainHasObj(context.Context, cid.Cid) (bool, error)
|
||||
ChainPutObj(context.Context, blocks.Block) error
|
||||
ChainHead(ctx context.Context) (*types.TipSet, error)
|
||||
|
@ -5,8 +5,8 @@ package v0api
|
||||
import (
|
||||
"context"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
@ -449,6 +449,8 @@ type GatewayMethods struct {
|
||||
|
||||
ChainReadObj func(p0 context.Context, p1 cid.Cid) ([]byte, error) ``
|
||||
|
||||
GasEstimateGasPremium func(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) ``
|
||||
|
||||
GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) ``
|
||||
|
||||
MpoolGetNonce func(p0 context.Context, p1 address.Address) (uint64, error) ``
|
||||
@ -487,10 +489,14 @@ type GatewayMethods struct {
|
||||
|
||||
StateMinerProvingDeadline func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) ``
|
||||
|
||||
StateMinerSectorCount func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MinerSectors, error) ``
|
||||
|
||||
StateNetworkName func(p0 context.Context) (dtypes.NetworkName, error) ``
|
||||
|
||||
StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (abinetwork.Version, error) ``
|
||||
|
||||
StateReplay func(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid) (*api.InvocResult, error) ``
|
||||
|
||||
StateSearchMsg func(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) ``
|
||||
|
||||
StateSectorGetInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) ``
|
||||
@ -2674,6 +2680,17 @@ func (s *GatewayStub) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, erro
|
||||
return *new([]byte), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) GasEstimateGasPremium(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) {
|
||||
if s.Internal.GasEstimateGasPremium == nil {
|
||||
return *new(types.BigInt), ErrNotSupported
|
||||
}
|
||||
return s.Internal.GasEstimateGasPremium(p0, p1, p2, p3, p4)
|
||||
}
|
||||
|
||||
func (s *GatewayStub) GasEstimateGasPremium(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) {
|
||||
return *new(types.BigInt), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) GasEstimateMessageGas(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) {
|
||||
if s.Internal.GasEstimateMessageGas == nil {
|
||||
return nil, ErrNotSupported
|
||||
@ -2883,6 +2900,17 @@ func (s *GatewayStub) StateMinerProvingDeadline(p0 context.Context, p1 address.A
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) StateMinerSectorCount(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MinerSectors, error) {
|
||||
if s.Internal.StateMinerSectorCount == nil {
|
||||
return *new(api.MinerSectors), ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateMinerSectorCount(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *GatewayStub) StateMinerSectorCount(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MinerSectors, error) {
|
||||
return *new(api.MinerSectors), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) StateNetworkName(p0 context.Context) (dtypes.NetworkName, error) {
|
||||
if s.Internal.StateNetworkName == nil {
|
||||
return *new(dtypes.NetworkName), ErrNotSupported
|
||||
@ -2905,6 +2933,17 @@ func (s *GatewayStub) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey
|
||||
return *new(abinetwork.Version), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) StateReplay(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid) (*api.InvocResult, error) {
|
||||
if s.Internal.StateReplay == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateReplay(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *GatewayStub) StateReplay(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid) (*api.InvocResult, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) StateSearchMsg(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) {
|
||||
if s.Internal.StateSearchMsg == nil {
|
||||
return nil, ErrNotSupported
|
||||
|
@ -11,8 +11,8 @@ import (
|
||||
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
uuid "github.com/google/uuid"
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
cid "github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
metrics "github.com/libp2p/go-libp2p/core/metrics"
|
||||
network0 "github.com/libp2p/go-libp2p/core/network"
|
||||
peer "github.com/libp2p/go-libp2p/core/peer"
|
||||
|
@ -3,8 +3,8 @@ package blockstore
|
||||
import (
|
||||
"context"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
|
@ -5,9 +5,9 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
block "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
block "github.com/ipfs/go-libipfs/blocks"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
|
@ -13,9 +13,9 @@ import (
|
||||
"github.com/dgraph-io/badger/v2"
|
||||
"github.com/dgraph-io/badger/v2/options"
|
||||
"github.com/dgraph-io/badger/v2/pb"
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
logger "github.com/ipfs/go-log/v2"
|
||||
pool "github.com/libp2p/go-buffer-pool"
|
||||
"github.com/multiformats/go-base32"
|
||||
@ -746,6 +746,20 @@ func (b *Blockstore) Put(ctx context.Context, block blocks.Block) error {
|
||||
}
|
||||
|
||||
put := func(db *badger.DB) error {
|
||||
// Check if we have it before writing it.
|
||||
switch err := db.View(func(txn *badger.Txn) error {
|
||||
_, err := txn.Get(k)
|
||||
return err
|
||||
}); err {
|
||||
case badger.ErrKeyNotFound:
|
||||
case nil:
|
||||
// Already exists, skip the put.
|
||||
return nil
|
||||
default:
|
||||
return err
|
||||
}
|
||||
|
||||
// Then write it.
|
||||
err := db.Update(func(txn *badger.Txn) error {
|
||||
return txn.Set(k, block.RawData())
|
||||
})
|
||||
@ -801,12 +815,33 @@ func (b *Blockstore) PutMany(ctx context.Context, blocks []blocks.Block) error {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
||||
err := b.db.View(func(txn *badger.Txn) error {
|
||||
for i, k := range keys {
|
||||
switch _, err := txn.Get(k); err {
|
||||
case badger.ErrKeyNotFound:
|
||||
case nil:
|
||||
keys[i] = nil
|
||||
default:
|
||||
// Something is actually wrong
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
put := func(db *badger.DB) error {
|
||||
batch := db.NewWriteBatch()
|
||||
defer batch.Cancel()
|
||||
|
||||
for i, block := range blocks {
|
||||
k := keys[i]
|
||||
if k == nil {
|
||||
// skipped because we already have it.
|
||||
continue
|
||||
}
|
||||
if err := batch.Set(k, block.RawData()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -10,8 +10,8 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
|
@ -9,10 +9,10 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
u "github.com/ipfs/go-ipfs-util"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/lotus/blockstore"
|
||||
|
@ -4,9 +4,9 @@ import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
block "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
block "github.com/ipfs/go-libipfs/blocks"
|
||||
)
|
||||
|
||||
// buflog is a logger for the buffered blockstore. It is subscoped from the
|
||||
|
@ -4,8 +4,8 @@ import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
)
|
||||
|
||||
var _ Blockstore = (*discardstore)(nil)
|
||||
|
@ -5,9 +5,9 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
|
@ -4,8 +4,8 @@ import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
@ -5,9 +5,9 @@ import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
httpapi "github.com/ipfs/go-ipfs-http-client"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
iface "github.com/ipfs/interface-go-ipfs-core"
|
||||
"github.com/ipfs/interface-go-ipfs-core/options"
|
||||
"github.com/ipfs/interface-go-ipfs-core/path"
|
||||
|
@ -3,9 +3,9 @@ package blockstore
|
||||
import (
|
||||
"context"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
)
|
||||
|
||||
// NewMemory returns a temporary memory-backed blockstore.
|
||||
|
@ -4,8 +4,8 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
@ -8,9 +8,9 @@ import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
"github.com/libp2p/go-msgio"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
@ -5,9 +5,9 @@ import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
|
||||
block "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
block "github.com/ipfs/go-libipfs/blocks"
|
||||
"github.com/libp2p/go-msgio"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
@ -6,8 +6,8 @@ import (
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
block "github.com/ipfs/go-block-format"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
block "github.com/ipfs/go-libipfs/blocks"
|
||||
"github.com/libp2p/go-msgio"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
@ -12,8 +12,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
"go.uber.org/multierr"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
@ -8,10 +8,10 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
dstore "github.com/ipfs/go-datastore"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"go.opencensus.io/stats"
|
||||
"go.uber.org/multierr"
|
||||
|
@ -10,9 +10,9 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"go.opencensus.io/stats"
|
||||
"golang.org/x/sync/errgroup"
|
||||
@ -455,7 +455,7 @@ func (s *SplitStore) protectTxnRefs(markSet MarkSet) error {
|
||||
// transactionally protect a reference by walking the object and marking.
|
||||
// concurrent markings are short circuited by checking the markset.
|
||||
func (s *SplitStore) doTxnProtect(root cid.Cid, markSet MarkSet) (int64, error) {
|
||||
if err := s.checkYield(); err != nil {
|
||||
if err := s.checkClosing(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
|
@ -4,9 +4,9 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
|
||||
bstore "github.com/filecoin-project/lotus/blockstore"
|
||||
)
|
||||
|
@ -5,8 +5,8 @@ import (
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
|
@ -11,11 +11,11 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-datastore"
|
||||
dssync "github.com/ipfs/go-datastore/sync"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
|
||||
|
@ -5,9 +5,9 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
@ -4,8 +4,8 @@ import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
)
|
||||
|
||||
// NewMemorySync returns a thread-safe in-memory blockstore.
|
||||
|
@ -6,9 +6,9 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
"github.com/raulk/clock"
|
||||
"go.uber.org/multierr"
|
||||
)
|
||||
|
@ -6,8 +6,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
"github.com/raulk/clock"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
@ -3,9 +3,9 @@ package blockstore
|
||||
import (
|
||||
"context"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
)
|
||||
|
||||
type unionBlockstore []Blockstore
|
||||
|
@ -5,7 +5,7 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -9,6 +9,7 @@ package build
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
@ -137,3 +138,7 @@ const BootstrapPeerThreshold = 1
|
||||
// ChainId defines the chain ID used in the Ethereum JSON-RPC endpoint.
|
||||
// As per https://github.com/ethereum-lists/chains
|
||||
const Eip155ChainId = 31415926
|
||||
|
||||
// Reducing the delivery delay for equivocation of
|
||||
// consistent broadcast to just half a second.
|
||||
var CBDeliveryDelay = 500 * time.Millisecond
|
||||
|
@ -37,7 +37,7 @@ func BuildTypeString() string {
|
||||
}
|
||||
|
||||
// BuildVersion is the local build version
|
||||
const BuildVersion = "1.23.0"
|
||||
const BuildVersion = "1.23.1"
|
||||
|
||||
func UserVersion() string {
|
||||
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {
|
||||
|
@ -110,6 +110,7 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context,
|
||||
TipSetGetter: stmgr.TipSetGetterForTipset(sm.ChainStore(), ts),
|
||||
Tracing: vmTracing,
|
||||
ReturnEvents: sm.ChainStore().IsStoringEvents(),
|
||||
ExecutionLane: vm.ExecutionLanePriority,
|
||||
}
|
||||
|
||||
return sm.VMConstructor()(ctx, vmopt)
|
||||
|
@ -382,13 +382,21 @@ func (filec *FilecoinEC) VerifyWinningPoStProof(ctx context.Context, nv network.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (filec *FilecoinEC) IsEpochBeyondCurrMax(epoch abi.ChainEpoch) bool {
|
||||
func (filec *FilecoinEC) IsEpochInConsensusRange(epoch abi.ChainEpoch) bool {
|
||||
if filec.genesis == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
// Don't try to sync anything before finality. Don't propagate such blocks either.
|
||||
//
|
||||
// We use _our_ current head, not the expected head, because the network's head can lag on
|
||||
// catch-up (after a network outage).
|
||||
if epoch < filec.store.GetHeaviestTipSet().Height()-build.Finality {
|
||||
return false
|
||||
}
|
||||
|
||||
now := uint64(build.Clock.Now().Unix())
|
||||
return epoch > (abi.ChainEpoch((now-filec.genesis.MinTimestamp())/build.BlockDelaySecs) + MaxHeightDrift)
|
||||
return epoch <= (abi.ChainEpoch((now-filec.genesis.MinTimestamp())/build.BlockDelaySecs) + MaxHeightDrift)
|
||||
}
|
||||
|
||||
func (filec *FilecoinEC) minerIsValid(ctx context.Context, maddr address.Address, baseTs *types.TipSet) error {
|
||||
|
@ -32,9 +32,10 @@ type Consensus interface {
|
||||
// the block (signature verifications, VRF checks, message validity, etc.)
|
||||
ValidateBlock(ctx context.Context, b *types.FullBlock) (err error)
|
||||
|
||||
// IsEpochBeyondCurrMax is used to configure the fork rules for longest-chain
|
||||
// consensus protocols.
|
||||
IsEpochBeyondCurrMax(epoch abi.ChainEpoch) bool
|
||||
// IsEpochInConsensusRange returns true if the epoch is "in range" for consensus. That is:
|
||||
// - It's not before finality.
|
||||
// - It's not too far in the future.
|
||||
IsEpochInConsensusRange(epoch abi.ChainEpoch) bool
|
||||
|
||||
// CreateBlock implements all the logic required to propose and assemble a new Filecoin block.
|
||||
//
|
||||
@ -65,23 +66,24 @@ func ValidateBlockPubsub(ctx context.Context, cns Consensus, self bool, msg *pub
|
||||
|
||||
stats.Record(ctx, metrics.BlockReceived.M(1))
|
||||
|
||||
recordFailureFlagPeer := func(what string) {
|
||||
// bv.Validate will flag the peer in that case
|
||||
panic(what)
|
||||
}
|
||||
|
||||
blk, what, err := decodeAndCheckBlock(msg)
|
||||
if err != nil {
|
||||
log.Error("got invalid block over pubsub: ", err)
|
||||
recordFailureFlagPeer(what)
|
||||
return pubsub.ValidationReject, what
|
||||
}
|
||||
|
||||
if !cns.IsEpochInConsensusRange(blk.Header.Height) {
|
||||
// We ignore these blocks instead of rejecting to avoid breaking the network if
|
||||
// we're recovering from an outage (e.g., where nobody agrees on where "head" is
|
||||
// currently).
|
||||
log.Warnf("received block outside of consensus range (%d)", blk.Header.Height)
|
||||
return pubsub.ValidationIgnore, "invalid_block_height"
|
||||
}
|
||||
|
||||
// validate the block meta: the Message CID in the header must match the included messages
|
||||
err = validateMsgMeta(ctx, blk)
|
||||
if err != nil {
|
||||
log.Warnf("error validating message metadata: %s", err)
|
||||
recordFailureFlagPeer("invalid_block_meta")
|
||||
return pubsub.ValidationReject, "invalid_block_meta"
|
||||
}
|
||||
|
||||
@ -91,7 +93,6 @@ func ValidateBlockPubsub(ctx context.Context, cns Consensus, self bool, msg *pub
|
||||
log.Warn("ignoring block msg: ", err)
|
||||
return pubsub.ValidationIgnore, reject
|
||||
}
|
||||
recordFailureFlagPeer(reject)
|
||||
return pubsub.ValidationReject, reject
|
||||
}
|
||||
|
||||
|
@ -47,7 +47,7 @@ type Events struct {
|
||||
*hcEvents
|
||||
}
|
||||
|
||||
func NewEventsWithConfidence(ctx context.Context, api EventAPI, gcConfidence abi.ChainEpoch) (*Events, error) {
|
||||
func newEventsWithGCConfidence(ctx context.Context, api EventAPI, gcConfidence abi.ChainEpoch) (*Events, error) {
|
||||
cache := newCache(api, gcConfidence)
|
||||
|
||||
ob := newObserver(cache, gcConfidence)
|
||||
@ -63,5 +63,5 @@ func NewEventsWithConfidence(ctx context.Context, api EventAPI, gcConfidence abi
|
||||
|
||||
func NewEvents(ctx context.Context, api EventAPI) (*Events, error) {
|
||||
gcConfidence := 2 * build.ForkLengthThreshold
|
||||
return NewEventsWithConfidence(ctx, api, gcConfidence)
|
||||
return newEventsWithGCConfidence(ctx, api, gcConfidence)
|
||||
}
|
||||
|
@ -174,13 +174,16 @@ func (fcs *fakeCS) makeTs(t *testing.T, parents []cid.Cid, h abi.ChainEpoch, msg
|
||||
},
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
fcs.mu.Lock()
|
||||
defer fcs.mu.Unlock()
|
||||
|
||||
if fcs.tipsets == nil {
|
||||
fcs.tipsets = map[types.TipSetKey]*types.TipSet{}
|
||||
}
|
||||
fcs.tipsets[ts.Key()] = ts
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
return ts
|
||||
}
|
||||
|
||||
|
@ -4,8 +4,8 @@ import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
|
@ -34,6 +34,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/consensus"
|
||||
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||
genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis"
|
||||
"github.com/filecoin-project/lotus/chain/index"
|
||||
"github.com/filecoin-project/lotus/chain/rand"
|
||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
@ -256,7 +257,7 @@ func NewGeneratorWithSectorsAndUpgradeSchedule(numSectors int, us stmgr.UpgradeS
|
||||
//return nil, xerrors.Errorf("creating drand beacon: %w", err)
|
||||
//}
|
||||
|
||||
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), sys, us, beac, ds)
|
||||
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), sys, us, beac, ds, index.DummyMsgIndex)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("initing stmgr: %w", err)
|
||||
}
|
||||
|
@ -3,8 +3,8 @@ package genesis
|
||||
import (
|
||||
"encoding/hex"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
"github.com/multiformats/go-multihash"
|
||||
)
|
||||
|
||||
|
45
chain/index/interface.go
Normal file
45
chain/index/interface.go
Normal file
@ -0,0 +1,45 @@
|
||||
package index
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
)
|
||||
|
||||
var ErrNotFound = errors.New("message not found")
|
||||
var ErrClosed = errors.New("index closed")
|
||||
|
||||
// MsgInfo is the Message metadata the index tracks.
|
||||
type MsgInfo struct {
|
||||
// the message this record refers to
|
||||
Message cid.Cid
|
||||
// the tipset where this message was included
|
||||
TipSet cid.Cid
|
||||
// the epoch where this message was included
|
||||
Epoch abi.ChainEpoch
|
||||
}
|
||||
|
||||
// MsgIndex is the interface to the message index
|
||||
type MsgIndex interface {
|
||||
// GetMsgInfo retrieves the message metadata through the index.
|
||||
// The lookup is done using the onchain message Cid; that is the signed message Cid
|
||||
// for SECP messages and unsigned message Cid for BLS messages.
|
||||
GetMsgInfo(ctx context.Context, m cid.Cid) (MsgInfo, error)
|
||||
// Close closes the index
|
||||
Close() error
|
||||
}
|
||||
|
||||
type dummyMsgIndex struct{}
|
||||
|
||||
func (dummyMsgIndex) GetMsgInfo(ctx context.Context, m cid.Cid) (MsgInfo, error) {
|
||||
return MsgInfo{}, ErrNotFound
|
||||
}
|
||||
|
||||
func (dummyMsgIndex) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var DummyMsgIndex MsgIndex = dummyMsgIndex{}
|
553
chain/index/msgindex.go
Normal file
553
chain/index/msgindex.go
Normal file
@ -0,0 +1,553 @@
|
||||
package index
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
var log = logging.Logger("msgindex")
|
||||
|
||||
var dbName = "msgindex.db"
|
||||
var dbDefs = []string{
|
||||
`CREATE TABLE IF NOT EXISTS messages (
|
||||
cid VARCHAR(80) PRIMARY KEY ON CONFLICT REPLACE,
|
||||
tipset_cid VARCHAR(80) NOT NULL,
|
||||
epoch INTEGER NOT NULL
|
||||
)`,
|
||||
`CREATE INDEX IF NOT EXISTS tipset_cids ON messages (tipset_cid)
|
||||
`,
|
||||
`CREATE TABLE IF NOT EXISTS _meta (
|
||||
version UINT64 NOT NULL UNIQUE
|
||||
)`,
|
||||
`INSERT OR IGNORE INTO _meta (version) VALUES (1)`,
|
||||
}
|
||||
var dbPragmas = []string{}
|
||||
|
||||
const (
|
||||
// prepared stmts
|
||||
dbqGetMessageInfo = "SELECT tipset_cid, epoch FROM messages WHERE cid = ?"
|
||||
dbqInsertMessage = "INSERT INTO messages VALUES (?, ?, ?)"
|
||||
dbqDeleteTipsetMessages = "DELETE FROM messages WHERE tipset_cid = ?"
|
||||
// reconciliation
|
||||
dbqCountMessages = "SELECT COUNT(*) FROM messages"
|
||||
dbqMinEpoch = "SELECT MIN(epoch) FROM messages"
|
||||
dbqCountTipsetMessages = "SELECT COUNT(*) FROM messages WHERE tipset_cid = ?"
|
||||
dbqDeleteMessagesByEpoch = "DELETE FROM messages WHERE epoch >= ?"
|
||||
)
|
||||
|
||||
// coalescer configuration (TODO: use observer instead)
|
||||
// these are exposed to make tests snappy
|
||||
var (
|
||||
CoalesceMinDelay = time.Second
|
||||
CoalesceMaxDelay = 15 * time.Second
|
||||
CoalesceMergeInterval = time.Second
|
||||
)
|
||||
|
||||
// chain store interface; we could use store.ChainStore directly,
|
||||
// but this simplifies unit testing.
|
||||
type ChainStore interface {
|
||||
SubscribeHeadChanges(f store.ReorgNotifee)
|
||||
MessagesForTipset(ctx context.Context, ts *types.TipSet) ([]types.ChainMsg, error)
|
||||
GetHeaviestTipSet() *types.TipSet
|
||||
GetTipSetFromKey(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error)
|
||||
}
|
||||
|
||||
var _ ChainStore = (*store.ChainStore)(nil)
|
||||
|
||||
type msgIndex struct {
|
||||
cs ChainStore
|
||||
|
||||
db *sql.DB
|
||||
selectMsgStmt *sql.Stmt
|
||||
insertMsgStmt *sql.Stmt
|
||||
deleteTipSetStmt *sql.Stmt
|
||||
|
||||
sema chan struct{}
|
||||
mx sync.Mutex
|
||||
pend []headChange
|
||||
|
||||
cancel func()
|
||||
workers sync.WaitGroup
|
||||
closeLk sync.RWMutex
|
||||
closed bool
|
||||
}
|
||||
|
||||
var _ MsgIndex = (*msgIndex)(nil)
|
||||
|
||||
type headChange struct {
|
||||
rev []*types.TipSet
|
||||
app []*types.TipSet
|
||||
}
|
||||
|
||||
func NewMsgIndex(lctx context.Context, basePath string, cs ChainStore) (MsgIndex, error) {
|
||||
var (
|
||||
dbPath string
|
||||
exists bool
|
||||
err error
|
||||
)
|
||||
|
||||
err = os.MkdirAll(basePath, 0755)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("error creating msgindex base directory: %w", err)
|
||||
}
|
||||
|
||||
dbPath = path.Join(basePath, dbName)
|
||||
_, err = os.Stat(dbPath)
|
||||
switch {
|
||||
case err == nil:
|
||||
exists = true
|
||||
|
||||
case errors.Is(err, fs.ErrNotExist):
|
||||
|
||||
case err != nil:
|
||||
return nil, xerrors.Errorf("error stating msgindex database: %w", err)
|
||||
}
|
||||
|
||||
db, err := sql.Open("sqlite3", dbPath)
|
||||
if err != nil {
|
||||
// TODO [nice to have]: automaticaly delete corrupt databases
|
||||
// but for now we can just error and let the operator delete.
|
||||
return nil, xerrors.Errorf("error opening msgindex database: %w", err)
|
||||
}
|
||||
|
||||
if err := prepareDB(db); err != nil {
|
||||
return nil, xerrors.Errorf("error creating msgindex database: %w", err)
|
||||
}
|
||||
|
||||
// TODO we may consider populating the index when first creating the db
|
||||
if exists {
|
||||
if err := reconcileIndex(db, cs); err != nil {
|
||||
return nil, xerrors.Errorf("error reconciling msgindex database: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(lctx)
|
||||
|
||||
msgIndex := &msgIndex{
|
||||
db: db,
|
||||
cs: cs,
|
||||
sema: make(chan struct{}, 1),
|
||||
cancel: cancel,
|
||||
}
|
||||
|
||||
err = msgIndex.prepareStatements()
|
||||
if err != nil {
|
||||
if err := db.Close(); err != nil {
|
||||
log.Errorf("error closing msgindex database: %s", err)
|
||||
}
|
||||
|
||||
return nil, xerrors.Errorf("error preparing msgindex database statements: %w", err)
|
||||
}
|
||||
|
||||
rnf := store.WrapHeadChangeCoalescer(
|
||||
msgIndex.onHeadChange,
|
||||
CoalesceMinDelay,
|
||||
CoalesceMaxDelay,
|
||||
CoalesceMergeInterval,
|
||||
)
|
||||
cs.SubscribeHeadChanges(rnf)
|
||||
|
||||
msgIndex.workers.Add(1)
|
||||
go msgIndex.background(ctx)
|
||||
|
||||
return msgIndex, nil
|
||||
}
|
||||
|
||||
func PopulateAfterSnapshot(lctx context.Context, basePath string, cs ChainStore) error {
|
||||
err := os.MkdirAll(basePath, 0755)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error creating msgindex base directory: %w", err)
|
||||
}
|
||||
|
||||
dbPath := path.Join(basePath, dbName)
|
||||
|
||||
// if a database already exists, we try to delete it and create a new one
|
||||
if _, err := os.Stat(dbPath); err == nil {
|
||||
if err = os.Remove(dbPath); err != nil {
|
||||
return xerrors.Errorf("msgindex already exists at %s and can't be deleted", dbPath)
|
||||
}
|
||||
}
|
||||
|
||||
db, err := sql.Open("sqlite3", dbPath)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error opening msgindex database: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := db.Close(); err != nil {
|
||||
log.Errorf("error closing msgindex database: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := prepareDB(db); err != nil {
|
||||
return xerrors.Errorf("error creating msgindex database: %w", err)
|
||||
}
|
||||
|
||||
tx, err := db.Begin()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error when starting transaction: %w", err)
|
||||
}
|
||||
|
||||
rollback := func() {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
log.Errorf("error in rollback: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
insertStmt, err := tx.Prepare(dbqInsertMessage)
|
||||
if err != nil {
|
||||
rollback()
|
||||
return xerrors.Errorf("error preparing insertStmt: %w", err)
|
||||
}
|
||||
|
||||
curTs := cs.GetHeaviestTipSet()
|
||||
startHeight := curTs.Height()
|
||||
for curTs != nil {
|
||||
tscid, err := curTs.Key().Cid()
|
||||
if err != nil {
|
||||
rollback()
|
||||
return xerrors.Errorf("error computing tipset cid: %w", err)
|
||||
}
|
||||
|
||||
tskey := tscid.String()
|
||||
epoch := int64(curTs.Height())
|
||||
|
||||
msgs, err := cs.MessagesForTipset(lctx, curTs)
|
||||
if err != nil {
|
||||
log.Infof("stopping import after %d tipsets", startHeight-curTs.Height())
|
||||
break
|
||||
}
|
||||
|
||||
for _, msg := range msgs {
|
||||
key := msg.Cid().String()
|
||||
if _, err := insertStmt.Exec(key, tskey, epoch); err != nil {
|
||||
rollback()
|
||||
return xerrors.Errorf("error inserting message: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
curTs, err = cs.GetTipSetFromKey(lctx, curTs.Parents())
|
||||
if err != nil {
|
||||
rollback()
|
||||
return xerrors.Errorf("error walking chain: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error committing transaction: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// init utilities
|
||||
func prepareDB(db *sql.DB) error {
|
||||
for _, stmt := range dbDefs {
|
||||
if _, err := db.Exec(stmt); err != nil {
|
||||
return xerrors.Errorf("error executing sql statement '%s': %w", stmt, err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, stmt := range dbPragmas {
|
||||
if _, err := db.Exec(stmt); err != nil {
|
||||
return xerrors.Errorf("error executing sql statement '%s': %w", stmt, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func reconcileIndex(db *sql.DB, cs ChainStore) error {
|
||||
// Invariant: after reconciliation, every tipset in the index is in the current chain; ie either
|
||||
// the chain head or reachable by walking the chain.
|
||||
// Algorithm:
|
||||
// 1. Count messages in index; if none, trivially reconciled.
|
||||
// TODO we may consider populating the index in that case
|
||||
// 2. Find the minimum tipset in the index; this will mark the end of the reconciliation walk
|
||||
// 3. Walk from current tipset until we find a tipset in the index.
|
||||
// 4. Delete (revert!) all tipsets above the found tipset.
|
||||
// 5. If the walk ends in the boundary epoch, then delete everything.
|
||||
//
|
||||
|
||||
row := db.QueryRow(dbqCountMessages)
|
||||
|
||||
var result int64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
return xerrors.Errorf("error counting messages: %w", err)
|
||||
}
|
||||
|
||||
if result == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
row = db.QueryRow(dbqMinEpoch)
|
||||
if err := row.Scan(&result); err != nil {
|
||||
return xerrors.Errorf("error finding boundary epoch: %w", err)
|
||||
}
|
||||
|
||||
boundaryEpoch := abi.ChainEpoch(result)
|
||||
|
||||
countMsgsStmt, err := db.Prepare(dbqCountTipsetMessages)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error preparing statement: %w", err)
|
||||
}
|
||||
|
||||
curTs := cs.GetHeaviestTipSet()
|
||||
for curTs != nil && curTs.Height() >= boundaryEpoch {
|
||||
tsCid, err := curTs.Key().Cid()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error computing tipset cid: %w", err)
|
||||
}
|
||||
|
||||
key := tsCid.String()
|
||||
row = countMsgsStmt.QueryRow(key)
|
||||
if err := row.Scan(&result); err != nil {
|
||||
return xerrors.Errorf("error counting messages: %w", err)
|
||||
}
|
||||
|
||||
if result > 0 {
|
||||
// found it!
|
||||
boundaryEpoch = curTs.Height() + 1
|
||||
break
|
||||
}
|
||||
|
||||
// walk up
|
||||
parents := curTs.Parents()
|
||||
curTs, err = cs.GetTipSetFromKey(context.TODO(), parents)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error walking chain: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// delete everything above the minEpoch
|
||||
if _, err = db.Exec(dbqDeleteMessagesByEpoch, int64(boundaryEpoch)); err != nil {
|
||||
return xerrors.Errorf("error deleting stale reorged out message: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *msgIndex) prepareStatements() error {
|
||||
stmt, err := x.db.Prepare(dbqGetMessageInfo)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("prepare selectMsgStmt: %w", err)
|
||||
}
|
||||
x.selectMsgStmt = stmt
|
||||
|
||||
stmt, err = x.db.Prepare(dbqInsertMessage)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("prepare insertMsgStmt: %w", err)
|
||||
}
|
||||
x.insertMsgStmt = stmt
|
||||
|
||||
stmt, err = x.db.Prepare(dbqDeleteTipsetMessages)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("prepare deleteTipSetStmt: %w", err)
|
||||
}
|
||||
x.deleteTipSetStmt = stmt
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// head change notifee
|
||||
func (x *msgIndex) onHeadChange(rev, app []*types.TipSet) error {
|
||||
x.closeLk.RLock()
|
||||
defer x.closeLk.RUnlock()
|
||||
|
||||
if x.closed {
|
||||
return nil
|
||||
}
|
||||
|
||||
// do it in the background to avoid blocking head change processing
|
||||
x.mx.Lock()
|
||||
x.pend = append(x.pend, headChange{rev: rev, app: app})
|
||||
pendLen := len(x.pend)
|
||||
x.mx.Unlock()
|
||||
|
||||
// complain loudly if this is building backlog
|
||||
if pendLen > 10 {
|
||||
log.Warnf("message index head change processing is building backlog: %d pending head changes", pendLen)
|
||||
}
|
||||
|
||||
select {
|
||||
case x.sema <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *msgIndex) background(ctx context.Context) {
|
||||
defer x.workers.Done()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-x.sema:
|
||||
err := x.processHeadChanges(ctx)
|
||||
if err != nil {
|
||||
// we can't rely on an inconsistent index, so shut it down.
|
||||
log.Errorf("error processing head change notifications: %s; shutting down message index", err)
|
||||
if err2 := x.Close(); err2 != nil {
|
||||
log.Errorf("error shutting down index: %s", err2)
|
||||
}
|
||||
}
|
||||
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (x *msgIndex) processHeadChanges(ctx context.Context) error {
|
||||
x.mx.Lock()
|
||||
pend := x.pend
|
||||
x.pend = nil
|
||||
x.mx.Unlock()
|
||||
|
||||
tx, err := x.db.Begin()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error creating transaction: %w", err)
|
||||
}
|
||||
|
||||
for _, hc := range pend {
|
||||
for _, ts := range hc.rev {
|
||||
if err := x.doRevert(ctx, tx, ts); err != nil {
|
||||
if err2 := tx.Rollback(); err2 != nil {
|
||||
log.Errorf("error rolling back transaction: %s", err2)
|
||||
}
|
||||
return xerrors.Errorf("error reverting %s: %w", ts, err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, ts := range hc.app {
|
||||
if err := x.doApply(ctx, tx, ts); err != nil {
|
||||
if err2 := tx.Rollback(); err2 != nil {
|
||||
log.Errorf("error rolling back transaction: %s", err2)
|
||||
}
|
||||
return xerrors.Errorf("error applying %s: %w", ts, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (x *msgIndex) doRevert(ctx context.Context, tx *sql.Tx, ts *types.TipSet) error {
|
||||
tskey, err := ts.Key().Cid()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error computing tipset cid: %w", err)
|
||||
}
|
||||
|
||||
key := tskey.String()
|
||||
_, err = tx.Stmt(x.deleteTipSetStmt).Exec(key)
|
||||
return err
|
||||
}
|
||||
|
||||
func (x *msgIndex) doApply(ctx context.Context, tx *sql.Tx, ts *types.TipSet) error {
|
||||
tscid, err := ts.Key().Cid()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error computing tipset cid: %w", err)
|
||||
}
|
||||
|
||||
tskey := tscid.String()
|
||||
epoch := int64(ts.Height())
|
||||
|
||||
msgs, err := x.cs.MessagesForTipset(ctx, ts)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error retrieving messages for tipset %s: %w", ts, err)
|
||||
}
|
||||
|
||||
insertStmt := tx.Stmt(x.insertMsgStmt)
|
||||
for _, msg := range msgs {
|
||||
key := msg.Cid().String()
|
||||
if _, err := insertStmt.Exec(key, tskey, epoch); err != nil {
|
||||
return xerrors.Errorf("error inserting message: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// interface
|
||||
func (x *msgIndex) GetMsgInfo(ctx context.Context, m cid.Cid) (MsgInfo, error) {
|
||||
x.closeLk.RLock()
|
||||
defer x.closeLk.RUnlock()
|
||||
|
||||
if x.closed {
|
||||
return MsgInfo{}, ErrClosed
|
||||
}
|
||||
|
||||
var (
|
||||
tipset string
|
||||
epoch int64
|
||||
)
|
||||
|
||||
key := m.String()
|
||||
row := x.selectMsgStmt.QueryRow(key)
|
||||
err := row.Scan(&tipset, &epoch)
|
||||
switch {
|
||||
case err == sql.ErrNoRows:
|
||||
return MsgInfo{}, ErrNotFound
|
||||
|
||||
case err != nil:
|
||||
return MsgInfo{}, xerrors.Errorf("error querying msgindex database: %w", err)
|
||||
}
|
||||
|
||||
tipsetCid, err := cid.Decode(tipset)
|
||||
if err != nil {
|
||||
return MsgInfo{}, xerrors.Errorf("error decoding tipset cid: %w", err)
|
||||
}
|
||||
|
||||
return MsgInfo{
|
||||
Message: m,
|
||||
TipSet: tipsetCid,
|
||||
Epoch: abi.ChainEpoch(epoch),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (x *msgIndex) Close() error {
|
||||
x.closeLk.Lock()
|
||||
defer x.closeLk.Unlock()
|
||||
|
||||
if x.closed {
|
||||
return nil
|
||||
}
|
||||
|
||||
x.closed = true
|
||||
|
||||
x.cancel()
|
||||
x.workers.Wait()
|
||||
|
||||
return x.db.Close()
|
||||
}
|
||||
|
||||
// informal apis for itests; not exposed in the main interface
|
||||
func (x *msgIndex) CountMessages() (int64, error) {
|
||||
x.closeLk.RLock()
|
||||
defer x.closeLk.RUnlock()
|
||||
|
||||
if x.closed {
|
||||
return 0, ErrClosed
|
||||
}
|
||||
|
||||
var result int64
|
||||
row := x.db.QueryRow(dbqCountMessages)
|
||||
err := row.Scan(&result)
|
||||
return result, err
|
||||
}
|
298
chain/index/msgindex_test.go
Normal file
298
chain/index/msgindex_test.go
Normal file
@ -0,0 +1,298 @@
|
||||
package index
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/rand"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/types/mock"
|
||||
)
|
||||
|
||||
func TestBasicMsgIndex(t *testing.T) {
|
||||
// the most basic of tests:
|
||||
// 1. Create an index with mock chain store
|
||||
// 2. Advance the chain for a few tipsets
|
||||
// 3. Verify that the index contains all messages with the correct tipset/epoch
|
||||
cs := newMockChainStore()
|
||||
cs.genesis()
|
||||
|
||||
tmp := t.TempDir()
|
||||
t.Cleanup(func() { _ = os.RemoveAll(tmp) })
|
||||
|
||||
msgIndex, err := NewMsgIndex(context.Background(), tmp, cs)
|
||||
require.NoError(t, err)
|
||||
|
||||
defer msgIndex.Close() //nolint
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
t.Logf("advance to epoch %d", i+1)
|
||||
err := cs.advance()
|
||||
require.NoError(t, err)
|
||||
// wait for the coalescer to notify
|
||||
time.Sleep(CoalesceMinDelay + 10*time.Millisecond)
|
||||
}
|
||||
|
||||
t.Log("verifying index")
|
||||
verifyIndex(t, cs, msgIndex)
|
||||
}
|
||||
|
||||
func TestReorgMsgIndex(t *testing.T) {
|
||||
// slightly more nuanced test that includes reorgs
|
||||
// 1. Create an index with mock chain store
|
||||
// 2. Advance/Reorg the chain for a few tipsets
|
||||
// 3. Verify that the index contains all messages with the correct tipst/epoch
|
||||
cs := newMockChainStore()
|
||||
cs.genesis()
|
||||
|
||||
tmp := t.TempDir()
|
||||
t.Cleanup(func() { _ = os.RemoveAll(tmp) })
|
||||
|
||||
msgIndex, err := NewMsgIndex(context.Background(), tmp, cs)
|
||||
require.NoError(t, err)
|
||||
|
||||
defer msgIndex.Close() //nolint
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
t.Logf("advance to epoch %d", i+1)
|
||||
err := cs.advance()
|
||||
require.NoError(t, err)
|
||||
// wait for the coalescer to notify
|
||||
time.Sleep(CoalesceMinDelay + 10*time.Millisecond)
|
||||
}
|
||||
|
||||
// a simple reorg
|
||||
t.Log("doing reorg")
|
||||
reorgme := cs.curTs
|
||||
reorgmeParent, err := cs.GetTipSetFromKey(context.Background(), reorgme.Parents())
|
||||
require.NoError(t, err)
|
||||
cs.setHead(reorgmeParent)
|
||||
reorgmeChild := cs.makeBlk()
|
||||
err = cs.reorg([]*types.TipSet{reorgme}, []*types.TipSet{reorgmeChild})
|
||||
require.NoError(t, err)
|
||||
time.Sleep(CoalesceMinDelay + 10*time.Millisecond)
|
||||
|
||||
t.Log("verifying index")
|
||||
verifyIndex(t, cs, msgIndex)
|
||||
|
||||
t.Log("verifying that reorged messages are not present")
|
||||
verifyMissing(t, cs, msgIndex, reorgme)
|
||||
}
|
||||
|
||||
func TestReconcileMsgIndex(t *testing.T) {
|
||||
// test that exercises the reconciliation code paths
|
||||
// 1. Create and populate a basic msgindex, similar to TestBasicMsgIndex.
|
||||
// 2. Close it
|
||||
// 3. Reorg the mock chain store
|
||||
// 4. Reopen the index to trigger reconciliation
|
||||
// 5. Enxure that only the stable messages remain.
|
||||
cs := newMockChainStore()
|
||||
cs.genesis()
|
||||
|
||||
tmp := t.TempDir()
|
||||
t.Cleanup(func() { _ = os.RemoveAll(tmp) })
|
||||
|
||||
msgIndex, err := NewMsgIndex(context.Background(), tmp, cs)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
t.Logf("advance to epoch %d", i+1)
|
||||
err := cs.advance()
|
||||
require.NoError(t, err)
|
||||
// wait for the coalescer to notify
|
||||
time.Sleep(CoalesceMinDelay + 10*time.Millisecond)
|
||||
}
|
||||
|
||||
// Close it and reorg
|
||||
err = msgIndex.Close()
|
||||
require.NoError(t, err)
|
||||
cs.notify = nil
|
||||
|
||||
// a simple reorg
|
||||
t.Log("doing reorg")
|
||||
reorgme := cs.curTs
|
||||
reorgmeParent, err := cs.GetTipSetFromKey(context.Background(), reorgme.Parents())
|
||||
require.NoError(t, err)
|
||||
cs.setHead(reorgmeParent)
|
||||
reorgmeChild := cs.makeBlk()
|
||||
err = cs.reorg([]*types.TipSet{reorgme}, []*types.TipSet{reorgmeChild})
|
||||
require.NoError(t, err)
|
||||
|
||||
// reopen to reconcile
|
||||
msgIndex, err = NewMsgIndex(context.Background(), tmp, cs)
|
||||
require.NoError(t, err)
|
||||
|
||||
defer msgIndex.Close() //nolint
|
||||
|
||||
t.Log("verifying index")
|
||||
// need to step one up because the last tipset is not known by the index
|
||||
cs.setHead(reorgmeParent)
|
||||
verifyIndex(t, cs, msgIndex)
|
||||
|
||||
t.Log("verifying that reorged and unknown messages are not present")
|
||||
verifyMissing(t, cs, msgIndex, reorgme, reorgmeChild)
|
||||
}
|
||||
|
||||
func verifyIndex(t *testing.T, cs *mockChainStore, msgIndex MsgIndex) {
|
||||
for ts := cs.curTs; ts.Height() > 0; {
|
||||
t.Logf("verify at height %d", ts.Height())
|
||||
blks := ts.Blocks()
|
||||
if len(blks) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
tsCid, err := ts.Key().Cid()
|
||||
require.NoError(t, err)
|
||||
|
||||
msgs, err := cs.MessagesForTipset(context.Background(), ts)
|
||||
require.NoError(t, err)
|
||||
for _, m := range msgs {
|
||||
minfo, err := msgIndex.GetMsgInfo(context.Background(), m.Cid())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tsCid, minfo.TipSet)
|
||||
require.Equal(t, ts.Height(), minfo.Epoch)
|
||||
}
|
||||
|
||||
parents := ts.Parents()
|
||||
ts, err = cs.GetTipSetFromKey(context.Background(), parents)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func verifyMissing(t *testing.T, cs *mockChainStore, msgIndex MsgIndex, missing ...*types.TipSet) {
|
||||
for _, ts := range missing {
|
||||
msgs, err := cs.MessagesForTipset(context.Background(), ts)
|
||||
require.NoError(t, err)
|
||||
for _, m := range msgs {
|
||||
_, err := msgIndex.GetMsgInfo(context.Background(), m.Cid())
|
||||
require.Equal(t, ErrNotFound, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type mockChainStore struct {
|
||||
notify store.ReorgNotifee
|
||||
|
||||
curTs *types.TipSet
|
||||
tipsets map[types.TipSetKey]*types.TipSet
|
||||
msgs map[types.TipSetKey][]types.ChainMsg
|
||||
|
||||
nonce uint64
|
||||
}
|
||||
|
||||
var _ ChainStore = (*mockChainStore)(nil)
|
||||
|
||||
var systemAddr address.Address
|
||||
var rng *rand.Rand
|
||||
|
||||
func init() {
|
||||
systemAddr, _ = address.NewIDAddress(0)
|
||||
rng = rand.New(rand.NewSource(314159))
|
||||
|
||||
// adjust those to make tests snappy
|
||||
CoalesceMinDelay = 100 * time.Millisecond
|
||||
CoalesceMaxDelay = time.Second
|
||||
CoalesceMergeInterval = 100 * time.Millisecond
|
||||
}
|
||||
|
||||
func newMockChainStore() *mockChainStore {
|
||||
return &mockChainStore{
|
||||
tipsets: make(map[types.TipSetKey]*types.TipSet),
|
||||
msgs: make(map[types.TipSetKey][]types.ChainMsg),
|
||||
}
|
||||
}
|
||||
|
||||
func (cs *mockChainStore) genesis() {
|
||||
genBlock := mock.MkBlock(nil, 0, 0)
|
||||
genTs := mock.TipSet(genBlock)
|
||||
cs.msgs[genTs.Key()] = nil
|
||||
cs.setHead(genTs)
|
||||
}
|
||||
|
||||
func (cs *mockChainStore) setHead(ts *types.TipSet) {
|
||||
cs.curTs = ts
|
||||
cs.tipsets[ts.Key()] = ts
|
||||
}
|
||||
|
||||
func (cs *mockChainStore) advance() error {
|
||||
ts := cs.makeBlk()
|
||||
return cs.reorg(nil, []*types.TipSet{ts})
|
||||
}
|
||||
|
||||
func (cs *mockChainStore) reorg(rev, app []*types.TipSet) error {
|
||||
for _, ts := range rev {
|
||||
parents := ts.Parents()
|
||||
cs.curTs = cs.tipsets[parents]
|
||||
}
|
||||
|
||||
for _, ts := range app {
|
||||
cs.tipsets[ts.Key()] = ts
|
||||
cs.curTs = ts
|
||||
}
|
||||
|
||||
if cs.notify != nil {
|
||||
return cs.notify(rev, app)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cs *mockChainStore) makeBlk() *types.TipSet {
|
||||
height := cs.curTs.Height() + 1
|
||||
|
||||
blk := mock.MkBlock(cs.curTs, uint64(height), uint64(height))
|
||||
blk.Messages = cs.makeGarbageCid()
|
||||
|
||||
ts := mock.TipSet(blk)
|
||||
msg1 := cs.makeMsg()
|
||||
msg2 := cs.makeMsg()
|
||||
cs.msgs[ts.Key()] = []types.ChainMsg{msg1, msg2}
|
||||
|
||||
return ts
|
||||
}
|
||||
|
||||
func (cs *mockChainStore) makeMsg() *types.Message {
|
||||
nonce := cs.nonce
|
||||
cs.nonce++
|
||||
return &types.Message{To: systemAddr, From: systemAddr, Nonce: nonce}
|
||||
}
|
||||
|
||||
func (cs *mockChainStore) makeGarbageCid() cid.Cid {
|
||||
garbage := blocks.NewBlock([]byte{byte(rng.Intn(256)), byte(rng.Intn(256)), byte(rng.Intn(256))})
|
||||
return garbage.Cid()
|
||||
}
|
||||
|
||||
func (cs *mockChainStore) SubscribeHeadChanges(f store.ReorgNotifee) {
|
||||
cs.notify = f
|
||||
}
|
||||
|
||||
func (cs *mockChainStore) MessagesForTipset(ctx context.Context, ts *types.TipSet) ([]types.ChainMsg, error) {
|
||||
msgs, ok := cs.msgs[ts.Key()]
|
||||
if !ok {
|
||||
return nil, errors.New("unknown tipset")
|
||||
}
|
||||
|
||||
return msgs, nil
|
||||
}
|
||||
|
||||
func (cs *mockChainStore) GetHeaviestTipSet() *types.TipSet {
|
||||
return cs.curTs
|
||||
}
|
||||
|
||||
func (cs *mockChainStore) GetTipSetFromKey(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) {
|
||||
ts, ok := cs.tipsets[tsk]
|
||||
if !ok {
|
||||
return nil, errors.New("unknown tipset")
|
||||
}
|
||||
return ts, nil
|
||||
}
|
@ -32,14 +32,19 @@ func (mp *MessagePool) CheckMessages(ctx context.Context, protos []*api.MessageP
|
||||
// CheckPendingMessages performs a set of logical sets for all messages pending from a given actor
|
||||
func (mp *MessagePool) CheckPendingMessages(ctx context.Context, from address.Address) ([][]api.MessageCheckStatus, error) {
|
||||
var msgs []*types.Message
|
||||
mp.lk.Lock()
|
||||
mset, ok := mp.pending[from]
|
||||
mp.lk.RLock()
|
||||
mset, ok, err := mp.getPendingMset(ctx, from)
|
||||
if err != nil {
|
||||
log.Warnf("errored while getting pending mset: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
if ok {
|
||||
msgs = make([]*types.Message, 0, len(mset.msgs))
|
||||
for _, sm := range mset.msgs {
|
||||
msgs = append(msgs, &sm.Message)
|
||||
}
|
||||
}
|
||||
mp.lk.Unlock()
|
||||
mp.lk.RUnlock()
|
||||
|
||||
if len(msgs) == 0 {
|
||||
return nil, nil
|
||||
@ -58,13 +63,17 @@ func (mp *MessagePool) CheckReplaceMessages(ctx context.Context, replace []*type
|
||||
msgMap := make(map[address.Address]map[uint64]*types.Message)
|
||||
count := 0
|
||||
|
||||
mp.lk.Lock()
|
||||
mp.lk.RLock()
|
||||
for _, m := range replace {
|
||||
mmap, ok := msgMap[m.From]
|
||||
if !ok {
|
||||
mmap = make(map[uint64]*types.Message)
|
||||
msgMap[m.From] = mmap
|
||||
mset, ok := mp.pending[m.From]
|
||||
mset, ok, err := mp.getPendingMset(ctx, m.From)
|
||||
if err != nil {
|
||||
log.Warnf("errored while getting pending mset: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
if ok {
|
||||
count += len(mset.msgs)
|
||||
for _, sm := range mset.msgs {
|
||||
@ -76,7 +85,7 @@ func (mp *MessagePool) CheckReplaceMessages(ctx context.Context, replace []*type
|
||||
}
|
||||
mmap[m.Nonce] = m
|
||||
}
|
||||
mp.lk.Unlock()
|
||||
mp.lk.RUnlock()
|
||||
|
||||
msgs := make([]*types.Message, 0, count)
|
||||
start := 0
|
||||
@ -103,9 +112,9 @@ func (mp *MessagePool) checkMessages(ctx context.Context, msgs []*types.Message,
|
||||
if mp.api.IsLite() {
|
||||
return nil, nil
|
||||
}
|
||||
mp.curTsLk.Lock()
|
||||
mp.curTsLk.RLock()
|
||||
curTs := mp.curTs
|
||||
mp.curTsLk.Unlock()
|
||||
mp.curTsLk.RUnlock()
|
||||
|
||||
epoch := curTs.Height() + 1
|
||||
|
||||
@ -143,22 +152,26 @@ func (mp *MessagePool) checkMessages(ctx context.Context, msgs []*types.Message,
|
||||
|
||||
st, ok := state[m.From]
|
||||
if !ok {
|
||||
mp.lk.Lock()
|
||||
mset, ok := mp.pending[m.From]
|
||||
mp.lk.RLock()
|
||||
mset, ok, err := mp.getPendingMset(ctx, m.From)
|
||||
if err != nil {
|
||||
log.Warnf("errored while getting pending mset: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
if ok && !interned {
|
||||
st = &actorState{nextNonce: mset.nextNonce, requiredFunds: mset.requiredFunds}
|
||||
for _, m := range mset.msgs {
|
||||
st.requiredFunds = new(stdbig.Int).Add(st.requiredFunds, m.Message.Value.Int)
|
||||
}
|
||||
state[m.From] = st
|
||||
mp.lk.Unlock()
|
||||
mp.lk.RUnlock()
|
||||
|
||||
check.OK = true
|
||||
check.Hint = map[string]interface{}{
|
||||
"nonce": st.nextNonce,
|
||||
}
|
||||
} else {
|
||||
mp.lk.Unlock()
|
||||
mp.lk.RUnlock()
|
||||
|
||||
stateNonce, err := mp.getStateNonce(ctx, m.From, curTs)
|
||||
if err != nil {
|
||||
|
@ -118,7 +118,7 @@ func init() {
|
||||
}
|
||||
|
||||
type MessagePool struct {
|
||||
lk sync.Mutex
|
||||
lk sync.RWMutex
|
||||
|
||||
ds dtypes.MetadataDS
|
||||
|
||||
@ -137,9 +137,9 @@ type MessagePool struct {
|
||||
// do NOT access this map directly, use getPendingMset, setPendingMset, deletePendingMset, forEachPending, and clearPending respectively
|
||||
pending map[address.Address]*msgSet
|
||||
|
||||
keyCache map[address.Address]address.Address
|
||||
keyCache *lru.Cache[address.Address, address.Address]
|
||||
|
||||
curTsLk sync.Mutex // DO NOT LOCK INSIDE lk
|
||||
curTsLk sync.RWMutex // DO NOT LOCK INSIDE lk
|
||||
curTs *types.TipSet
|
||||
|
||||
cfgLk sync.RWMutex
|
||||
@ -169,13 +169,13 @@ type MessagePool struct {
|
||||
|
||||
sigValCache *lru.TwoQueueCache[string, struct{}]
|
||||
|
||||
nonceCache *lru.Cache[nonceCacheKey, uint64]
|
||||
stateNonceCache *lru.Cache[stateNonceCacheKey, uint64]
|
||||
|
||||
evtTypes [3]journal.EventType
|
||||
journal journal.Journal
|
||||
}
|
||||
|
||||
type nonceCacheKey struct {
|
||||
type stateNonceCacheKey struct {
|
||||
tsk types.TipSetKey
|
||||
addr address.Address
|
||||
}
|
||||
@ -371,7 +371,8 @@ func (ms *msgSet) toSlice() []*types.SignedMessage {
|
||||
func New(ctx context.Context, api Provider, ds dtypes.MetadataDS, us stmgr.UpgradeSchedule, netName dtypes.NetworkName, j journal.Journal) (*MessagePool, error) {
|
||||
cache, _ := lru.New2Q[cid.Cid, crypto.Signature](build.BlsSignatureCacheSize)
|
||||
verifcache, _ := lru.New2Q[string, struct{}](build.VerifSigCacheSize)
|
||||
noncecache, _ := lru.New[nonceCacheKey, uint64](256)
|
||||
stateNonceCache, _ := lru.New[stateNonceCacheKey, uint64](32768) // 32k * ~200 bytes = 6MB
|
||||
keycache, _ := lru.New[address.Address, address.Address](1_000_000)
|
||||
|
||||
cfg, err := loadConfig(ctx, ds)
|
||||
if err != nil {
|
||||
@ -383,26 +384,26 @@ func New(ctx context.Context, api Provider, ds dtypes.MetadataDS, us stmgr.Upgra
|
||||
}
|
||||
|
||||
mp := &MessagePool{
|
||||
ds: ds,
|
||||
addSema: make(chan struct{}, 1),
|
||||
closer: make(chan struct{}),
|
||||
repubTk: build.Clock.Ticker(RepublishInterval),
|
||||
repubTrigger: make(chan struct{}, 1),
|
||||
localAddrs: make(map[address.Address]struct{}),
|
||||
pending: make(map[address.Address]*msgSet),
|
||||
keyCache: make(map[address.Address]address.Address),
|
||||
minGasPrice: types.NewInt(0),
|
||||
getNtwkVersion: us.GetNtwkVersion,
|
||||
pruneTrigger: make(chan struct{}, 1),
|
||||
pruneCooldown: make(chan struct{}, 1),
|
||||
blsSigCache: cache,
|
||||
sigValCache: verifcache,
|
||||
nonceCache: noncecache,
|
||||
changes: lps.New(50),
|
||||
localMsgs: namespace.Wrap(ds, datastore.NewKey(localMsgsDs)),
|
||||
api: api,
|
||||
netName: netName,
|
||||
cfg: cfg,
|
||||
ds: ds,
|
||||
addSema: make(chan struct{}, 1),
|
||||
closer: make(chan struct{}),
|
||||
repubTk: build.Clock.Ticker(RepublishInterval),
|
||||
repubTrigger: make(chan struct{}, 1),
|
||||
localAddrs: make(map[address.Address]struct{}),
|
||||
pending: make(map[address.Address]*msgSet),
|
||||
keyCache: keycache,
|
||||
minGasPrice: types.NewInt(0),
|
||||
getNtwkVersion: us.GetNtwkVersion,
|
||||
pruneTrigger: make(chan struct{}, 1),
|
||||
pruneCooldown: make(chan struct{}, 1),
|
||||
blsSigCache: cache,
|
||||
sigValCache: verifcache,
|
||||
stateNonceCache: stateNonceCache,
|
||||
changes: lps.New(50),
|
||||
localMsgs: namespace.Wrap(ds, datastore.NewKey(localMsgsDs)),
|
||||
api: api,
|
||||
netName: netName,
|
||||
cfg: cfg,
|
||||
evtTypes: [...]journal.EventType{
|
||||
evtTypeMpoolAdd: j.RegisterEventType("mpool", "add"),
|
||||
evtTypeMpoolRemove: j.RegisterEventType("mpool", "remove"),
|
||||
@ -473,9 +474,18 @@ func (mp *MessagePool) TryForEachPendingMessage(f func(cid.Cid) error) error {
|
||||
}
|
||||
|
||||
func (mp *MessagePool) resolveToKey(ctx context.Context, addr address.Address) (address.Address, error) {
|
||||
//if addr is not an ID addr, then it is already resolved to a key
|
||||
if addr.Protocol() != address.ID {
|
||||
return addr, nil
|
||||
}
|
||||
return mp.resolveToKeyFromID(ctx, addr)
|
||||
}
|
||||
|
||||
func (mp *MessagePool) resolveToKeyFromID(ctx context.Context, addr address.Address) (address.Address, error) {
|
||||
|
||||
// check the cache
|
||||
a, f := mp.keyCache[addr]
|
||||
if f {
|
||||
a, ok := mp.keyCache.Get(addr)
|
||||
if ok {
|
||||
return a, nil
|
||||
}
|
||||
|
||||
@ -486,9 +496,7 @@ func (mp *MessagePool) resolveToKey(ctx context.Context, addr address.Address) (
|
||||
}
|
||||
|
||||
// place both entries in the cache (may both be key addresses, which is fine)
|
||||
mp.keyCache[addr] = ka
|
||||
mp.keyCache[ka] = ka
|
||||
|
||||
mp.keyCache.Add(addr, ka)
|
||||
return ka, nil
|
||||
}
|
||||
|
||||
@ -763,7 +771,28 @@ func (mp *MessagePool) Add(ctx context.Context, m *types.SignedMessage) error {
|
||||
<-mp.addSema
|
||||
}()
|
||||
|
||||
mp.curTsLk.RLock()
|
||||
tmpCurTs := mp.curTs
|
||||
mp.curTsLk.RUnlock()
|
||||
|
||||
//ensures computations are cached without holding lock
|
||||
_, _ = mp.api.GetActorAfter(m.Message.From, tmpCurTs)
|
||||
_, _ = mp.getStateNonce(ctx, m.Message.From, tmpCurTs)
|
||||
|
||||
mp.curTsLk.Lock()
|
||||
if tmpCurTs == mp.curTs {
|
||||
//with the lock enabled, mp.curTs is the same Ts as we just had, so we know that our computations are cached
|
||||
} else {
|
||||
//curTs has been updated so we want to cache the new one:
|
||||
tmpCurTs = mp.curTs
|
||||
//we want to release the lock, cache the computations then grab it again
|
||||
mp.curTsLk.Unlock()
|
||||
_, _ = mp.api.GetActorAfter(m.Message.From, tmpCurTs)
|
||||
_, _ = mp.getStateNonce(ctx, m.Message.From, tmpCurTs)
|
||||
mp.curTsLk.Lock()
|
||||
//now that we have the lock, we continue, we could do this as a loop forever, but that's bad to loop forever, and this was added as an optimization and it seems once is enough because the computation < block time
|
||||
}
|
||||
|
||||
defer mp.curTsLk.Unlock()
|
||||
|
||||
_, err = mp.addTs(ctx, m, mp.curTs, false, false)
|
||||
@ -852,9 +881,6 @@ func (mp *MessagePool) addTs(ctx context.Context, m *types.SignedMessage, curTs
|
||||
return false, xerrors.Errorf("minimum expected nonce is %d: %w", snonce, ErrNonceTooLow)
|
||||
}
|
||||
|
||||
mp.lk.Lock()
|
||||
defer mp.lk.Unlock()
|
||||
|
||||
senderAct, err := mp.api.GetActorAfter(m.Message.From, curTs)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("failed to get sender actor: %w", err)
|
||||
@ -869,6 +895,9 @@ func (mp *MessagePool) addTs(ctx context.Context, m *types.SignedMessage, curTs
|
||||
return false, xerrors.Errorf("sender actor %s is not a valid top-level sender", m.Message.From)
|
||||
}
|
||||
|
||||
mp.lk.Lock()
|
||||
defer mp.lk.Unlock()
|
||||
|
||||
publish, err := mp.verifyMsgBeforeAdd(ctx, m, curTs, local)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("verify msg failed: %w", err)
|
||||
@ -1001,19 +1030,19 @@ func (mp *MessagePool) addLocked(ctx context.Context, m *types.SignedMessage, st
|
||||
}
|
||||
|
||||
func (mp *MessagePool) GetNonce(ctx context.Context, addr address.Address, _ types.TipSetKey) (uint64, error) {
|
||||
mp.curTsLk.Lock()
|
||||
defer mp.curTsLk.Unlock()
|
||||
mp.curTsLk.RLock()
|
||||
defer mp.curTsLk.RUnlock()
|
||||
|
||||
mp.lk.Lock()
|
||||
defer mp.lk.Unlock()
|
||||
mp.lk.RLock()
|
||||
defer mp.lk.RUnlock()
|
||||
|
||||
return mp.getNonceLocked(ctx, addr, mp.curTs)
|
||||
}
|
||||
|
||||
// GetActor should not be used. It is only here to satisfy interface mess caused by lite node handling
|
||||
func (mp *MessagePool) GetActor(_ context.Context, addr address.Address, _ types.TipSetKey) (*types.Actor, error) {
|
||||
mp.curTsLk.Lock()
|
||||
defer mp.curTsLk.Unlock()
|
||||
mp.curTsLk.RLock()
|
||||
defer mp.curTsLk.RUnlock()
|
||||
return mp.api.GetActorAfter(addr, mp.curTs)
|
||||
}
|
||||
|
||||
@ -1046,24 +1075,52 @@ func (mp *MessagePool) getStateNonce(ctx context.Context, addr address.Address,
|
||||
done := metrics.Timer(ctx, metrics.MpoolGetNonceDuration)
|
||||
defer done()
|
||||
|
||||
nk := nonceCacheKey{
|
||||
nk := stateNonceCacheKey{
|
||||
tsk: ts.Key(),
|
||||
addr: addr,
|
||||
}
|
||||
|
||||
n, ok := mp.nonceCache.Get(nk)
|
||||
n, ok := mp.stateNonceCache.Get(nk)
|
||||
if ok {
|
||||
return n, nil
|
||||
}
|
||||
|
||||
act, err := mp.api.GetActorAfter(addr, ts)
|
||||
// get the nonce from the actor before ts
|
||||
actor, err := mp.api.GetActorBefore(addr, ts)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
nextNonce := actor.Nonce
|
||||
|
||||
raddr, err := mp.resolveToKey(ctx, addr)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
mp.nonceCache.Add(nk, act.Nonce)
|
||||
// loop over all messages sent by 'addr' and find the highest nonce
|
||||
messages, err := mp.api.MessagesForTipset(ctx, ts)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
for _, message := range messages {
|
||||
msg := message.VMMessage()
|
||||
|
||||
return act.Nonce, nil
|
||||
maddr, err := mp.resolveToKey(ctx, msg.From)
|
||||
if err != nil {
|
||||
log.Warnf("failed to resolve message from address: %s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if maddr == raddr {
|
||||
if n := msg.Nonce + 1; n > nextNonce {
|
||||
nextNonce = n
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mp.stateNonceCache.Add(nk, nextNonce)
|
||||
|
||||
return nextNonce, nil
|
||||
}
|
||||
|
||||
func (mp *MessagePool) getStateBalance(ctx context.Context, addr address.Address, ts *types.TipSet) (types.BigInt, error) {
|
||||
@ -1164,11 +1221,11 @@ func (mp *MessagePool) remove(ctx context.Context, from address.Address, nonce u
|
||||
}
|
||||
|
||||
func (mp *MessagePool) Pending(ctx context.Context) ([]*types.SignedMessage, *types.TipSet) {
|
||||
mp.curTsLk.Lock()
|
||||
defer mp.curTsLk.Unlock()
|
||||
mp.curTsLk.RLock()
|
||||
defer mp.curTsLk.RUnlock()
|
||||
|
||||
mp.lk.Lock()
|
||||
defer mp.lk.Unlock()
|
||||
mp.lk.RLock()
|
||||
defer mp.lk.RUnlock()
|
||||
|
||||
return mp.allPending(ctx)
|
||||
}
|
||||
@ -1184,11 +1241,11 @@ func (mp *MessagePool) allPending(ctx context.Context) ([]*types.SignedMessage,
|
||||
}
|
||||
|
||||
func (mp *MessagePool) PendingFor(ctx context.Context, a address.Address) ([]*types.SignedMessage, *types.TipSet) {
|
||||
mp.curTsLk.Lock()
|
||||
defer mp.curTsLk.Unlock()
|
||||
mp.curTsLk.RLock()
|
||||
defer mp.curTsLk.RUnlock()
|
||||
|
||||
mp.lk.Lock()
|
||||
defer mp.lk.Unlock()
|
||||
mp.lk.RLock()
|
||||
defer mp.lk.RUnlock()
|
||||
return mp.pendingFor(ctx, a), mp.curTs
|
||||
}
|
||||
|
||||
@ -1237,9 +1294,9 @@ func (mp *MessagePool) HeadChange(ctx context.Context, revert []*types.TipSet, a
|
||||
|
||||
maybeRepub := func(cid cid.Cid) {
|
||||
if !repubTrigger {
|
||||
mp.lk.Lock()
|
||||
mp.lk.RLock()
|
||||
_, republished := mp.republished[cid]
|
||||
mp.lk.Unlock()
|
||||
mp.lk.RUnlock()
|
||||
if republished {
|
||||
repubTrigger = true
|
||||
}
|
||||
@ -1310,9 +1367,9 @@ func (mp *MessagePool) HeadChange(ctx context.Context, revert []*types.TipSet, a
|
||||
}
|
||||
|
||||
if len(revert) > 0 && futureDebug {
|
||||
mp.lk.Lock()
|
||||
mp.lk.RLock()
|
||||
msgs, ts := mp.allPending(ctx)
|
||||
mp.lk.Unlock()
|
||||
mp.lk.RUnlock()
|
||||
|
||||
buckets := map[address.Address]*statBucket{}
|
||||
|
||||
|
@ -120,6 +120,22 @@ func (tma *testMpoolAPI) PubSubPublish(string, []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tma *testMpoolAPI) GetActorBefore(addr address.Address, ts *types.TipSet) (*types.Actor, error) {
|
||||
balance, ok := tma.balance[addr]
|
||||
if !ok {
|
||||
balance = types.NewInt(1000e6)
|
||||
tma.balance[addr] = balance
|
||||
}
|
||||
|
||||
nonce := tma.statenonce[addr]
|
||||
|
||||
return &types.Actor{
|
||||
Code: builtin2.AccountActorCodeID,
|
||||
Nonce: nonce,
|
||||
Balance: balance,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (tma *testMpoolAPI) GetActorAfter(addr address.Address, ts *types.TipSet) (*types.Actor, error) {
|
||||
// regression check for load bug
|
||||
if ts == nil {
|
||||
|
@ -2,6 +2,7 @@ package messagepool
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
@ -27,6 +28,7 @@ type Provider interface {
|
||||
SubscribeHeadChanges(func(rev, app []*types.TipSet) error) *types.TipSet
|
||||
PutMessage(ctx context.Context, m types.ChainMsg) (cid.Cid, error)
|
||||
PubSubPublish(string, []byte) error
|
||||
GetActorBefore(address.Address, *types.TipSet) (*types.Actor, error)
|
||||
GetActorAfter(address.Address, *types.TipSet) (*types.Actor, error)
|
||||
StateDeterministicAddressAtFinality(context.Context, address.Address, *types.TipSet) (address.Address, error)
|
||||
StateNetworkVersion(context.Context, abi.ChainEpoch) network.Version
|
||||
@ -58,6 +60,23 @@ func (mpp *mpoolProvider) IsLite() bool {
|
||||
return mpp.lite != nil
|
||||
}
|
||||
|
||||
func (mpp *mpoolProvider) getActorLite(addr address.Address, ts *types.TipSet) (*types.Actor, error) {
|
||||
if !mpp.IsLite() {
|
||||
return nil, errors.New("should not use getActorLite on non lite Provider")
|
||||
}
|
||||
|
||||
n, err := mpp.lite.GetNonce(context.TODO(), addr, ts.Key())
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting nonce over lite: %w", err)
|
||||
}
|
||||
a, err := mpp.lite.GetActor(context.TODO(), addr, ts.Key())
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting actor over lite: %w", err)
|
||||
}
|
||||
a.Nonce = n
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func (mpp *mpoolProvider) SubscribeHeadChanges(cb func(rev, app []*types.TipSet) error) *types.TipSet {
|
||||
mpp.sm.ChainStore().SubscribeHeadChanges(
|
||||
store.WrapHeadChangeCoalescer(
|
||||
@ -77,18 +96,17 @@ func (mpp *mpoolProvider) PubSubPublish(k string, v []byte) error {
|
||||
return mpp.ps.Publish(k, v) // nolint
|
||||
}
|
||||
|
||||
func (mpp *mpoolProvider) GetActorBefore(addr address.Address, ts *types.TipSet) (*types.Actor, error) {
|
||||
if mpp.IsLite() {
|
||||
return mpp.getActorLite(addr, ts)
|
||||
}
|
||||
|
||||
return mpp.sm.LoadActor(context.TODO(), addr, ts)
|
||||
}
|
||||
|
||||
func (mpp *mpoolProvider) GetActorAfter(addr address.Address, ts *types.TipSet) (*types.Actor, error) {
|
||||
if mpp.IsLite() {
|
||||
n, err := mpp.lite.GetNonce(context.TODO(), addr, ts.Key())
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting nonce over lite: %w", err)
|
||||
}
|
||||
a, err := mpp.lite.GetActor(context.TODO(), addr, ts.Key())
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting actor over lite: %w", err)
|
||||
}
|
||||
a.Nonce = n
|
||||
return a, nil
|
||||
return mpp.getActorLite(addr, ts)
|
||||
}
|
||||
|
||||
stcid, _, err := mpp.sm.TipSetState(context.TODO(), ts)
|
||||
|
@ -20,19 +20,23 @@ const repubMsgLimit = 30
|
||||
var RepublishBatchDelay = 100 * time.Millisecond
|
||||
|
||||
func (mp *MessagePool) republishPendingMessages(ctx context.Context) error {
|
||||
mp.curTsLk.Lock()
|
||||
mp.curTsLk.RLock()
|
||||
ts := mp.curTs
|
||||
mp.curTsLk.RUnlock()
|
||||
|
||||
baseFee, err := mp.api.ChainComputeBaseFee(context.TODO(), ts)
|
||||
if err != nil {
|
||||
mp.curTsLk.Unlock()
|
||||
return xerrors.Errorf("computing basefee: %w", err)
|
||||
}
|
||||
baseFeeLowerBound := getBaseFeeLowerBound(baseFee, baseFeeLowerBoundFactor)
|
||||
|
||||
pending := make(map[address.Address]map[uint64]*types.SignedMessage)
|
||||
|
||||
mp.lk.Lock()
|
||||
mp.republished = nil // clear this to avoid races triggering an early republish
|
||||
mp.lk.Unlock()
|
||||
|
||||
mp.lk.RLock()
|
||||
mp.forEachLocal(ctx, func(ctx context.Context, actor address.Address) {
|
||||
mset, ok, err := mp.getPendingMset(ctx, actor)
|
||||
if err != nil {
|
||||
@ -53,9 +57,7 @@ func (mp *MessagePool) republishPendingMessages(ctx context.Context) error {
|
||||
}
|
||||
pending[actor] = pend
|
||||
})
|
||||
|
||||
mp.lk.Unlock()
|
||||
mp.curTsLk.Unlock()
|
||||
mp.lk.RUnlock()
|
||||
|
||||
if len(pending) == 0 {
|
||||
return nil
|
||||
@ -176,8 +178,8 @@ loop:
|
||||
republished[m.Cid()] = struct{}{}
|
||||
}
|
||||
|
||||
mp.lk.Lock()
|
||||
// update the republished set so that we can trigger early republish from head changes
|
||||
mp.lk.Lock()
|
||||
mp.republished = republished
|
||||
mp.lk.Unlock()
|
||||
|
||||
|
@ -40,11 +40,21 @@ type msgChain struct {
|
||||
}
|
||||
|
||||
func (mp *MessagePool) SelectMessages(ctx context.Context, ts *types.TipSet, tq float64) ([]*types.SignedMessage, error) {
|
||||
mp.curTsLk.Lock()
|
||||
defer mp.curTsLk.Unlock()
|
||||
mp.curTsLk.RLock()
|
||||
defer mp.curTsLk.RUnlock()
|
||||
|
||||
mp.lk.Lock()
|
||||
defer mp.lk.Unlock()
|
||||
mp.lk.RLock()
|
||||
defer mp.lk.RUnlock()
|
||||
|
||||
// See if we need to prune before selection; excessive buildup can lead to slow selection,
|
||||
// so prune if we have too many messages (ignoring the cooldown).
|
||||
mpCfg := mp.getConfig()
|
||||
if mp.currentSize > mpCfg.SizeLimitHigh {
|
||||
log.Infof("too many messages; pruning before selection")
|
||||
if err := mp.pruneMessages(ctx, ts); err != nil {
|
||||
log.Warnf("error pruning excess messages: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// if the ticket quality is high enough that the first block has higher probability
|
||||
// than any other block, then we don't bother with optimal selection because the
|
||||
|
@ -128,10 +128,43 @@ func (sm *StateManager) ExecutionTraceWithMonitor(ctx context.Context, ts *types
|
||||
}
|
||||
|
||||
func (sm *StateManager) ExecutionTrace(ctx context.Context, ts *types.TipSet) (cid.Cid, []*api.InvocResult, error) {
|
||||
tsKey := ts.Key()
|
||||
|
||||
// check if we have the trace for this tipset in the cache
|
||||
sm.execTraceCacheLock.Lock()
|
||||
if entry, ok := sm.execTraceCache.Get(tsKey); ok {
|
||||
// we have to make a deep copy since caller can modify the invocTrace
|
||||
// and we don't want that to change what we store in cache
|
||||
invocTraceCopy := makeDeepCopy(entry.invocTrace)
|
||||
sm.execTraceCacheLock.Unlock()
|
||||
return entry.postStateRoot, invocTraceCopy, nil
|
||||
}
|
||||
sm.execTraceCacheLock.Unlock()
|
||||
|
||||
var invocTrace []*api.InvocResult
|
||||
st, err := sm.ExecutionTraceWithMonitor(ctx, ts, &InvocationTracer{trace: &invocTrace})
|
||||
if err != nil {
|
||||
return cid.Undef, nil, err
|
||||
}
|
||||
|
||||
invocTraceCopy := makeDeepCopy(invocTrace)
|
||||
|
||||
sm.execTraceCacheLock.Lock()
|
||||
sm.execTraceCache.Add(tsKey, tipSetCacheEntry{st, invocTraceCopy})
|
||||
sm.execTraceCacheLock.Unlock()
|
||||
|
||||
return st, invocTrace, nil
|
||||
}
|
||||
|
||||
func makeDeepCopy(invocTrace []*api.InvocResult) []*api.InvocResult {
|
||||
c := make([]*api.InvocResult, len(invocTrace))
|
||||
for i, ir := range invocTrace {
|
||||
if ir == nil {
|
||||
continue
|
||||
}
|
||||
tmp := *ir
|
||||
c[i] = &tmp
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
@ -36,6 +36,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/consensus"
|
||||
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||
"github.com/filecoin-project/lotus/chain/gen"
|
||||
"github.com/filecoin-project/lotus/chain/index"
|
||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||
. "github.com/filecoin-project/lotus/chain/stmgr"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
@ -168,7 +169,7 @@ func TestForkHeightTriggers(t *testing.T) {
|
||||
}
|
||||
|
||||
return st.Flush(ctx)
|
||||
}}}, cg.BeaconSchedule(), datastore.NewMapDatastore())
|
||||
}}}, cg.BeaconSchedule(), datastore.NewMapDatastore(), index.DummyMsgIndex)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -286,7 +287,7 @@ func testForkRefuseCall(t *testing.T, nullsBefore, nullsAfter int) {
|
||||
root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
migrationCount++
|
||||
return root, nil
|
||||
}}}, cg.BeaconSchedule(), datastore.NewMapDatastore())
|
||||
}}}, cg.BeaconSchedule(), datastore.NewMapDatastore(), index.DummyMsgIndex)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -504,7 +505,7 @@ func TestForkPreMigration(t *testing.T) {
|
||||
return nil
|
||||
},
|
||||
}}},
|
||||
}, cg.BeaconSchedule(), datastore.NewMapDatastore())
|
||||
}, cg.BeaconSchedule(), datastore.NewMapDatastore(), index.DummyMsgIndex)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -579,6 +580,7 @@ func TestDisablePreMigration(t *testing.T) {
|
||||
},
|
||||
cg.BeaconSchedule(),
|
||||
datastore.NewMapDatastore(),
|
||||
index.DummyMsgIndex,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, sm.Start(context.Background()))
|
||||
@ -633,6 +635,7 @@ func TestMigrtionCache(t *testing.T) {
|
||||
},
|
||||
cg.BeaconSchedule(),
|
||||
metadataDs,
|
||||
index.DummyMsgIndex,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, sm.Start(context.Background()))
|
||||
@ -685,6 +688,7 @@ func TestMigrtionCache(t *testing.T) {
|
||||
},
|
||||
cg.BeaconSchedule(),
|
||||
metadataDs,
|
||||
index.DummyMsgIndex,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (vm.Interface, error) {
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/index"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
@ -18,6 +19,7 @@ import (
|
||||
// happened, with an optional limit to how many epochs it will search. It guarantees that the message has been on
|
||||
// chain for at least confidence epochs without being reverted before returning.
|
||||
func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confidence uint64, lookbackLimit abi.ChainEpoch, allowReplaced bool) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) {
|
||||
// TODO use the index to speed this up.
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
@ -55,10 +57,15 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confid
|
||||
var backFm cid.Cid
|
||||
backSearchWait := make(chan struct{})
|
||||
go func() {
|
||||
fts, r, foundMsg, err := sm.searchBackForMsg(ctx, head[0].Val, msg, lookbackLimit, allowReplaced)
|
||||
if err != nil {
|
||||
log.Warnf("failed to look back through chain for message: %v", err)
|
||||
return
|
||||
fts, r, foundMsg, err := sm.searchForIndexedMsg(ctx, mcid, msg)
|
||||
|
||||
found := (err == nil && r != nil && foundMsg.Defined())
|
||||
if !found {
|
||||
fts, r, foundMsg, err = sm.searchBackForMsg(ctx, head[0].Val, msg, lookbackLimit, allowReplaced)
|
||||
if err != nil {
|
||||
log.Warnf("failed to look back through chain for message: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
backTs = fts
|
||||
@ -145,7 +152,30 @@ func (sm *StateManager) SearchForMessage(ctx context.Context, head *types.TipSet
|
||||
return head, r, foundMsg, nil
|
||||
}
|
||||
|
||||
fts, r, foundMsg, err := sm.searchBackForMsg(ctx, head, msg, lookbackLimit, allowReplaced)
|
||||
fts, r, foundMsg, err := sm.searchForIndexedMsg(ctx, mcid, msg)
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
if r != nil && foundMsg.Defined() {
|
||||
return fts, r, foundMsg, nil
|
||||
}
|
||||
|
||||
// debug log this, it's noteworthy
|
||||
if r == nil {
|
||||
log.Debugf("missing receipt for message in index for %s", mcid)
|
||||
}
|
||||
if !foundMsg.Defined() {
|
||||
log.Debugf("message %s not found", mcid)
|
||||
}
|
||||
|
||||
case errors.Is(err, index.ErrNotFound):
|
||||
// ok for the index to have incomplete data
|
||||
|
||||
default:
|
||||
log.Warnf("error searching message index: %s", err)
|
||||
}
|
||||
|
||||
fts, r, foundMsg, err = sm.searchBackForMsg(ctx, head, msg, lookbackLimit, allowReplaced)
|
||||
|
||||
if err != nil {
|
||||
log.Warnf("failed to look back through chain for message %s", mcid)
|
||||
@ -159,6 +189,44 @@ func (sm *StateManager) SearchForMessage(ctx context.Context, head *types.TipSet
|
||||
return fts, r, foundMsg, nil
|
||||
}
|
||||
|
||||
func (sm *StateManager) searchForIndexedMsg(ctx context.Context, mcid cid.Cid, m types.ChainMsg) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) {
|
||||
minfo, err := sm.msgIndex.GetMsgInfo(ctx, mcid)
|
||||
if err != nil {
|
||||
return nil, nil, cid.Undef, xerrors.Errorf("error looking up message in index: %w", err)
|
||||
}
|
||||
|
||||
// check the height against the current tipset; minimum execution confidence requires that the
|
||||
// inclusion tipset height is lower than the current head + 1
|
||||
curTs := sm.cs.GetHeaviestTipSet()
|
||||
if curTs.Height() <= minfo.Epoch+1 {
|
||||
return nil, nil, cid.Undef, xerrors.Errorf("indexed message does not appear before the current tipset; index epoch: %d, current epoch: %d", minfo.Epoch, curTs.Height())
|
||||
}
|
||||
|
||||
// now get the execution tipset
|
||||
// TODO optimization: the index should have it implicitly so we can return it in the msginfo.
|
||||
xts, err := sm.cs.GetTipsetByHeight(ctx, minfo.Epoch+1, curTs, false)
|
||||
if err != nil {
|
||||
return nil, nil, cid.Undef, xerrors.Errorf("error looking up execution tipset: %w", err)
|
||||
}
|
||||
|
||||
// check that the parent of the execution index is indeed the inclusion tipset
|
||||
parent := xts.Parents()
|
||||
parentCid, err := parent.Cid()
|
||||
if err != nil {
|
||||
return nil, nil, cid.Undef, xerrors.Errorf("error computing tipset cid: %w", err)
|
||||
}
|
||||
|
||||
if !parentCid.Equals(minfo.TipSet) {
|
||||
return nil, nil, cid.Undef, xerrors.Errorf("inclusion tipset mismatch: have %s, expected %s", parentCid, minfo.TipSet)
|
||||
}
|
||||
|
||||
r, foundMsg, err := sm.tipsetExecutedMessage(ctx, xts, mcid, m.VMMessage(), false)
|
||||
if err != nil {
|
||||
return nil, nil, cid.Undef, xerrors.Errorf("error in tipstExecutedMessage: %w", err)
|
||||
}
|
||||
return xts, r, foundMsg, nil
|
||||
}
|
||||
|
||||
// searchBackForMsg searches up to limit tipsets backwards from the given
|
||||
// tipset for a message receipt.
|
||||
// If limit is
|
||||
|
@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru/v2"
|
||||
"github.com/ipfs/go-cid"
|
||||
dstore "github.com/ipfs/go-datastore"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
@ -25,6 +26,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/paych"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
"github.com/filecoin-project/lotus/chain/beacon"
|
||||
"github.com/filecoin-project/lotus/chain/index"
|
||||
"github.com/filecoin-project/lotus/chain/rand"
|
||||
"github.com/filecoin-project/lotus/chain/state"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
@ -38,6 +40,8 @@ import (
|
||||
const LookbackNoLimit = api.LookbackNoLimit
|
||||
const ReceiptAmtBitwidth = 3
|
||||
|
||||
const execTraceCacheSize = 16
|
||||
|
||||
var log = logging.Logger("statemgr")
|
||||
|
||||
type StateManagerAPI interface {
|
||||
@ -135,6 +139,15 @@ type StateManager struct {
|
||||
tsExec Executor
|
||||
tsExecMonitor ExecMonitor
|
||||
beacon beacon.Schedule
|
||||
|
||||
msgIndex index.MsgIndex
|
||||
|
||||
// We keep a small cache for calls to ExecutionTrace which helps improve
|
||||
// performance for node operators like exchanges and block explorers
|
||||
execTraceCache *lru.ARCCache[types.TipSetKey, tipSetCacheEntry]
|
||||
// We need a lock while making the copy as to prevent other callers
|
||||
// overwrite the cache while making the copy
|
||||
execTraceCacheLock sync.Mutex
|
||||
}
|
||||
|
||||
// Caches a single state tree
|
||||
@ -143,7 +156,12 @@ type treeCache struct {
|
||||
tree *state.StateTree
|
||||
}
|
||||
|
||||
func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, beacon beacon.Schedule, metadataDs dstore.Batching) (*StateManager, error) {
|
||||
type tipSetCacheEntry struct {
|
||||
postStateRoot cid.Cid
|
||||
invocTrace []*api.InvocResult
|
||||
}
|
||||
|
||||
func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, beacon beacon.Schedule, metadataDs dstore.Batching, msgIndex index.MsgIndex) (*StateManager, error) {
|
||||
// If we have upgrades, make sure they're in-order and make sense.
|
||||
if err := us.Validate(); err != nil {
|
||||
return nil, err
|
||||
@ -182,6 +200,11 @@ func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder,
|
||||
}
|
||||
}
|
||||
|
||||
execTraceCache, err := lru.NewARC[types.TipSetKey, tipSetCacheEntry](execTraceCacheSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &StateManager{
|
||||
networkVersions: networkVersions,
|
||||
latestVersion: lastVersion,
|
||||
@ -197,12 +220,14 @@ func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder,
|
||||
root: cid.Undef,
|
||||
tree: nil,
|
||||
},
|
||||
compWait: make(map[string]chan struct{}),
|
||||
compWait: make(map[string]chan struct{}),
|
||||
msgIndex: msgIndex,
|
||||
execTraceCache: execTraceCache,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewStateManagerWithUpgradeScheduleAndMonitor(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, b beacon.Schedule, em ExecMonitor, metadataDs dstore.Batching) (*StateManager, error) {
|
||||
sm, err := NewStateManager(cs, exec, sys, us, b, metadataDs)
|
||||
func NewStateManagerWithUpgradeScheduleAndMonitor(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, b beacon.Schedule, em ExecMonitor, metadataDs dstore.Batching, msgIndex index.MsgIndex) (*StateManager, error) {
|
||||
sm, err := NewStateManager(cs, exec, sys, us, b, metadataDs, msgIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -2,18 +2,21 @@ package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"hash/maphash"
|
||||
"os"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/puzpuzpuz/xsync/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/lib/shardedmutex"
|
||||
)
|
||||
|
||||
var DefaultChainIndexCacheSize = 32 << 15
|
||||
// DefaultChainIndexCacheSize no longer sets the maximum size, just the initial size of the map.
|
||||
var DefaultChainIndexCacheSize = 1 << 15
|
||||
|
||||
func init() {
|
||||
if s := os.Getenv("LOTUS_CHAIN_INDEX_CACHE"); s != "" {
|
||||
@ -27,8 +30,9 @@ func init() {
|
||||
}
|
||||
|
||||
type ChainIndex struct {
|
||||
indexCacheLk sync.Mutex
|
||||
indexCache map[types.TipSetKey]*lbEntry
|
||||
indexCache *xsync.MapOf[types.TipSetKey, *lbEntry]
|
||||
|
||||
fillCacheLock shardedmutex.ShardedMutexFor[types.TipSetKey]
|
||||
|
||||
loadTipSet loadTipSetFunc
|
||||
|
||||
@ -36,11 +40,16 @@ type ChainIndex struct {
|
||||
}
|
||||
type loadTipSetFunc func(context.Context, types.TipSetKey) (*types.TipSet, error)
|
||||
|
||||
func maphashTSK(s maphash.Seed, tsk types.TipSetKey) uint64 {
|
||||
return maphash.Bytes(s, tsk.Bytes())
|
||||
}
|
||||
|
||||
func NewChainIndex(lts loadTipSetFunc) *ChainIndex {
|
||||
return &ChainIndex{
|
||||
indexCache: make(map[types.TipSetKey]*lbEntry, DefaultChainIndexCacheSize),
|
||||
loadTipSet: lts,
|
||||
skipLength: 20,
|
||||
indexCache: xsync.NewTypedMapOfPresized[types.TipSetKey, *lbEntry](maphashTSK, DefaultChainIndexCacheSize),
|
||||
fillCacheLock: shardedmutex.NewFor(maphashTSK, 32),
|
||||
loadTipSet: lts,
|
||||
skipLength: 20,
|
||||
}
|
||||
}
|
||||
|
||||
@ -59,17 +68,23 @@ func (ci *ChainIndex) GetTipsetByHeight(ctx context.Context, from *types.TipSet,
|
||||
return nil, xerrors.Errorf("failed to round down: %w", err)
|
||||
}
|
||||
|
||||
ci.indexCacheLk.Lock()
|
||||
defer ci.indexCacheLk.Unlock()
|
||||
cur := rounded.Key()
|
||||
for {
|
||||
lbe, ok := ci.indexCache[cur]
|
||||
lbe, ok := ci.indexCache.Load(cur) // check the cache
|
||||
if !ok {
|
||||
fc, err := ci.fillCache(ctx, cur)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to fill cache: %w", err)
|
||||
lk := ci.fillCacheLock.GetLock(cur)
|
||||
lk.Lock() // if entry is missing, take the lock
|
||||
lbe, ok = ci.indexCache.Load(cur) // check if someone else added it while we waited for lock
|
||||
if !ok {
|
||||
fc, err := ci.fillCache(ctx, cur)
|
||||
if err != nil {
|
||||
lk.Unlock()
|
||||
return nil, xerrors.Errorf("failed to fill cache: %w", err)
|
||||
}
|
||||
lbe = fc
|
||||
ci.indexCache.Store(cur, lbe)
|
||||
}
|
||||
lbe = fc
|
||||
lk.Unlock()
|
||||
}
|
||||
|
||||
if to == lbe.targetHeight {
|
||||
@ -137,7 +152,6 @@ func (ci *ChainIndex) fillCache(ctx context.Context, tsk types.TipSetKey) (*lbEn
|
||||
targetHeight: skipTarget.Height(),
|
||||
target: skipTarget.Key(),
|
||||
}
|
||||
ci.indexCache[tsk] = lbe
|
||||
|
||||
return lbe, nil
|
||||
}
|
||||
|
@ -3,10 +3,10 @@ package store
|
||||
import (
|
||||
"context"
|
||||
|
||||
block "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
block "github.com/ipfs/go-libipfs/blocks"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
@ -114,12 +114,35 @@ func (cs *ChainStore) BlockMsgsForTipset(ctx context.Context, ts *types.TipSet)
|
||||
return nil, xerrors.Errorf("failed to load state tree at tipset %s: %w", ts, err)
|
||||
}
|
||||
|
||||
useIds := false
|
||||
selectMsg := func(m *types.Message) (bool, error) {
|
||||
var sender address.Address
|
||||
if ts.Height() >= build.UpgradeHyperdriveHeight {
|
||||
sender, err = st.LookupID(m.From)
|
||||
if err != nil {
|
||||
return false, err
|
||||
if useIds {
|
||||
sender, err = st.LookupID(m.From)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("failed to resolve sender: %w", err)
|
||||
}
|
||||
} else {
|
||||
if m.From.Protocol() != address.ID {
|
||||
// we haven't been told to use IDs, just use the robust addr
|
||||
sender = m.From
|
||||
} else {
|
||||
// uh-oh, we actually have an ID-sender!
|
||||
useIds = true
|
||||
for robust, nonce := range applied {
|
||||
resolved, err := st.LookupID(robust)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("failed to resolve sender: %w", err)
|
||||
}
|
||||
applied[resolved] = nonce
|
||||
}
|
||||
|
||||
sender, err = st.LookupID(m.From)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("failed to resolve sender: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
sender = m.From
|
||||
|
@ -3,13 +3,15 @@ package store
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
format "github.com/ipfs/go-ipld-format"
|
||||
"github.com/ipld/go-car"
|
||||
carutil "github.com/ipld/go-car/util"
|
||||
carv2 "github.com/ipld/go-car/v2"
|
||||
@ -121,11 +123,9 @@ func (cs *ChainStore) Import(ctx context.Context, r io.Reader) (*types.TipSet, e
|
||||
}
|
||||
|
||||
ts := root
|
||||
tssToPersist := make([]*types.TipSet, 0, TipsetkeyBackfillRange)
|
||||
for i := 0; i < int(TipsetkeyBackfillRange); i++ {
|
||||
err = cs.PersistTipset(ctx, ts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tssToPersist = append(tssToPersist, ts)
|
||||
parentTsKey := ts.Parents()
|
||||
ts, err = cs.LoadTipSet(ctx, parentTsKey)
|
||||
if ts == nil || err != nil {
|
||||
@ -134,6 +134,10 @@ func (cs *ChainStore) Import(ctx context.Context, r io.Reader) (*types.TipSet, e
|
||||
}
|
||||
}
|
||||
|
||||
if err := cs.PersistTipsets(ctx, tssToPersist); err != nil {
|
||||
return nil, xerrors.Errorf("failed to persist tipsets: %w", err)
|
||||
}
|
||||
|
||||
return root, nil
|
||||
}
|
||||
|
||||
@ -167,8 +171,11 @@ func (t walkSchedTaskType) String() string {
|
||||
}
|
||||
|
||||
type walkTask struct {
|
||||
c cid.Cid
|
||||
taskType walkSchedTaskType
|
||||
c cid.Cid
|
||||
taskType walkSchedTaskType
|
||||
topLevelTaskType walkSchedTaskType
|
||||
blockCid cid.Cid
|
||||
epoch abi.ChainEpoch
|
||||
}
|
||||
|
||||
// an ever growing FIFO
|
||||
@ -317,8 +324,11 @@ func newWalkScheduler(ctx context.Context, store bstore.Blockstore, cfg walkSche
|
||||
cancel() // kill workers
|
||||
return nil, ctx.Err()
|
||||
case s.workerTasks.in <- walkTask{
|
||||
c: b.Cid(),
|
||||
taskType: blockTask,
|
||||
c: b.Cid(),
|
||||
taskType: blockTask,
|
||||
topLevelTaskType: blockTask,
|
||||
blockCid: b.Cid(),
|
||||
epoch: cfg.head.Height(),
|
||||
}:
|
||||
}
|
||||
}
|
||||
@ -363,6 +373,9 @@ func (s *walkScheduler) enqueueIfNew(task walkTask) {
|
||||
//log.Infow("ignored", "cid", todo.c.String())
|
||||
return
|
||||
}
|
||||
|
||||
// This lets through RAW and CBOR blocks, the only two types that we
|
||||
// end up writing to the exported CAR.
|
||||
if task.c.Prefix().Codec != cid.Raw && task.c.Prefix().Codec != cid.DagCBOR {
|
||||
//log.Infow("ignored", "cid", todo.c.String())
|
||||
return
|
||||
@ -416,8 +429,17 @@ func (s *walkScheduler) processTask(t walkTask, workerN int) error {
|
||||
}
|
||||
|
||||
blk, err := s.store.Get(s.ctx, t.c)
|
||||
if errors.Is(err, format.ErrNotFound{}) && t.topLevelTaskType == receiptTask {
|
||||
log.Debugw("ignoring not-found block in Receipts",
|
||||
"block", t.blockCid,
|
||||
"epoch", t.epoch,
|
||||
"cid", t.c)
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return xerrors.Errorf("writing object to car, bs.Get: %w", err)
|
||||
return xerrors.Errorf(
|
||||
"blockstore.Get(%s). Task: %s. Block: %s (%s). Epoch: %d. Err: %w",
|
||||
t.c, t.taskType, t.topLevelTaskType, t.blockCid, t.epoch, err)
|
||||
}
|
||||
|
||||
s.results <- taskResult{
|
||||
@ -425,15 +447,19 @@ func (s *walkScheduler) processTask(t walkTask, workerN int) error {
|
||||
b: blk,
|
||||
}
|
||||
|
||||
// We exported the ipld block. If it wasn't a CBOR block, there's nothing
|
||||
// else to do and we can bail out early as it won't have any links
|
||||
// etc.
|
||||
if t.c.Prefix().Codec != cid.DagCBOR || t.c.Prefix().MhType == mh.IDENTITY {
|
||||
return nil
|
||||
}
|
||||
|
||||
rawData := blk.RawData()
|
||||
|
||||
// extract relevant dags to walk from the block
|
||||
if t.taskType == blockTask {
|
||||
blk := t.c
|
||||
data, err := s.store.Get(s.ctx, blk)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var b types.BlockHeader
|
||||
if err := b.UnmarshalCBOR(bytes.NewBuffer(data.RawData())); err != nil {
|
||||
if err := b.UnmarshalCBOR(bytes.NewBuffer(rawData)); err != nil {
|
||||
return xerrors.Errorf("unmarshalling block header (cid=%s): %w", blk, err)
|
||||
}
|
||||
if b.Height%1_000 == 0 {
|
||||
@ -443,13 +469,19 @@ func (s *walkScheduler) processTask(t walkTask, workerN int) error {
|
||||
log.Info("exporting genesis block")
|
||||
for i := range b.Parents {
|
||||
s.enqueueIfNew(walkTask{
|
||||
c: b.Parents[i],
|
||||
taskType: dagTask,
|
||||
c: b.Parents[i],
|
||||
taskType: dagTask,
|
||||
topLevelTaskType: blockTask,
|
||||
blockCid: b.Parents[i],
|
||||
epoch: 0,
|
||||
})
|
||||
}
|
||||
s.enqueueIfNew(walkTask{
|
||||
c: b.ParentStateRoot,
|
||||
taskType: stateTask,
|
||||
c: b.ParentStateRoot,
|
||||
taskType: stateTask,
|
||||
topLevelTaskType: stateTask,
|
||||
blockCid: t.c,
|
||||
epoch: 0,
|
||||
})
|
||||
|
||||
return s.sendFinish(workerN)
|
||||
@ -457,33 +489,45 @@ func (s *walkScheduler) processTask(t walkTask, workerN int) error {
|
||||
// enqueue block parents
|
||||
for i := range b.Parents {
|
||||
s.enqueueIfNew(walkTask{
|
||||
c: b.Parents[i],
|
||||
taskType: blockTask,
|
||||
c: b.Parents[i],
|
||||
taskType: blockTask,
|
||||
topLevelTaskType: blockTask,
|
||||
blockCid: b.Parents[i],
|
||||
epoch: b.Height,
|
||||
})
|
||||
}
|
||||
if s.cfg.tail.Height() >= b.Height {
|
||||
log.Debugw("tail reached: only blocks will be exported from now until genesis", "cid", blk.String())
|
||||
log.Debugw("tail reached: only blocks will be exported from now until genesis", "cid", t.c.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
if s.cfg.includeMessages {
|
||||
// enqueue block messages
|
||||
s.enqueueIfNew(walkTask{
|
||||
c: b.Messages,
|
||||
taskType: messageTask,
|
||||
c: b.Messages,
|
||||
taskType: messageTask,
|
||||
topLevelTaskType: messageTask,
|
||||
blockCid: t.c,
|
||||
epoch: b.Height,
|
||||
})
|
||||
}
|
||||
if s.cfg.includeReceipts {
|
||||
// enqueue block receipts
|
||||
s.enqueueIfNew(walkTask{
|
||||
c: b.ParentMessageReceipts,
|
||||
taskType: receiptTask,
|
||||
c: b.ParentMessageReceipts,
|
||||
taskType: receiptTask,
|
||||
topLevelTaskType: receiptTask,
|
||||
blockCid: t.c,
|
||||
epoch: b.Height,
|
||||
})
|
||||
}
|
||||
if s.cfg.includeState {
|
||||
s.enqueueIfNew(walkTask{
|
||||
c: b.ParentStateRoot,
|
||||
taskType: stateTask,
|
||||
c: b.ParentStateRoot,
|
||||
taskType: stateTask,
|
||||
topLevelTaskType: stateTask,
|
||||
blockCid: t.c,
|
||||
epoch: b.Height,
|
||||
})
|
||||
}
|
||||
|
||||
@ -491,16 +535,22 @@ func (s *walkScheduler) processTask(t walkTask, workerN int) error {
|
||||
}
|
||||
|
||||
// Not a chain-block: we scan for CIDs in the raw block-data
|
||||
return cbg.ScanForLinks(bytes.NewReader(blk.RawData()), func(c cid.Cid) {
|
||||
if t.c.Prefix().Codec != cid.DagCBOR || t.c.Prefix().MhType == mh.IDENTITY {
|
||||
return
|
||||
}
|
||||
|
||||
err = cbg.ScanForLinks(bytes.NewReader(rawData), func(c cid.Cid) {
|
||||
s.enqueueIfNew(walkTask{
|
||||
c: c,
|
||||
taskType: dagTask,
|
||||
c: c,
|
||||
taskType: dagTask,
|
||||
topLevelTaskType: t.topLevelTaskType,
|
||||
blockCid: t.blockCid,
|
||||
epoch: t.epoch,
|
||||
})
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return xerrors.Errorf(
|
||||
"ScanForLinks(%s). Task: %s. Block: %s (%s). Epoch: %d. Err: %w",
|
||||
t.c, t.taskType, t.topLevelTaskType, t.blockCid, t.epoch, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cs *ChainStore) ExportRange(
|
||||
|
@ -12,11 +12,11 @@ import (
|
||||
"time"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru/v2"
|
||||
block "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
dstore "github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-datastore/query"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
block "github.com/ipfs/go-libipfs/blocks"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"go.opencensus.io/stats"
|
||||
"go.opencensus.io/trace"
|
||||
@ -378,7 +378,7 @@ func (cs *ChainStore) SetGenesis(ctx context.Context, b *types.BlockHeader) erro
|
||||
}
|
||||
|
||||
func (cs *ChainStore) PutTipSet(ctx context.Context, ts *types.TipSet) error {
|
||||
if err := cs.PersistTipset(ctx, ts); err != nil {
|
||||
if err := cs.PersistTipsets(ctx, []*types.TipSet{ts}); err != nil {
|
||||
return xerrors.Errorf("failed to persist tipset: %w", err)
|
||||
}
|
||||
|
||||
@ -639,22 +639,10 @@ func (cs *ChainStore) reorgWorker(ctx context.Context, initialNotifees []ReorgNo
|
||||
func (cs *ChainStore) takeHeaviestTipSet(ctx context.Context, ts *types.TipSet) error {
|
||||
_, span := trace.StartSpan(ctx, "takeHeaviestTipSet")
|
||||
defer span.End()
|
||||
|
||||
if cs.heaviest != nil { // buf
|
||||
if len(cs.reorgCh) > 0 {
|
||||
log.Warnf("Reorg channel running behind, %d reorgs buffered", len(cs.reorgCh))
|
||||
}
|
||||
cs.reorgCh <- reorg{
|
||||
old: cs.heaviest,
|
||||
new: ts,
|
||||
}
|
||||
} else {
|
||||
log.Warnf("no heaviest tipset found, using %s", ts.Cids())
|
||||
}
|
||||
|
||||
span.AddAttributes(trace.BoolAttribute("newHead", true))
|
||||
|
||||
log.Infof("New heaviest tipset! %s (height=%d)", ts.Cids(), ts.Height())
|
||||
prevHeaviest := cs.heaviest
|
||||
cs.heaviest = ts
|
||||
|
||||
if err := cs.writeHead(ctx, ts); err != nil {
|
||||
@ -662,6 +650,18 @@ func (cs *ChainStore) takeHeaviestTipSet(ctx context.Context, ts *types.TipSet)
|
||||
return err
|
||||
}
|
||||
|
||||
if prevHeaviest != nil { // buf
|
||||
if len(cs.reorgCh) > 0 {
|
||||
log.Warnf("Reorg channel running behind, %d reorgs buffered", len(cs.reorgCh))
|
||||
}
|
||||
cs.reorgCh <- reorg{
|
||||
old: prevHeaviest,
|
||||
new: ts,
|
||||
}
|
||||
} else {
|
||||
log.Warnf("no previous heaviest tipset found, using %s", ts.Cids())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -970,18 +970,25 @@ func (cs *ChainStore) AddToTipSetTracker(ctx context.Context, b *types.BlockHead
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cs *ChainStore) PersistTipset(ctx context.Context, ts *types.TipSet) error {
|
||||
if err := cs.persistBlockHeaders(ctx, ts.Blocks()...); err != nil {
|
||||
func (cs *ChainStore) PersistTipsets(ctx context.Context, tipsets []*types.TipSet) error {
|
||||
toPersist := make([]*types.BlockHeader, 0, len(tipsets)*int(build.BlocksPerEpoch))
|
||||
tsBlks := make([]block.Block, 0, len(tipsets))
|
||||
for _, ts := range tipsets {
|
||||
toPersist = append(toPersist, ts.Blocks()...)
|
||||
tsBlk, err := ts.Key().ToStorageBlock()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to get tipset key block: %w", err)
|
||||
}
|
||||
|
||||
tsBlks = append(tsBlks, tsBlk)
|
||||
}
|
||||
|
||||
if err := cs.persistBlockHeaders(ctx, toPersist...); err != nil {
|
||||
return xerrors.Errorf("failed to persist block headers: %w", err)
|
||||
}
|
||||
|
||||
tsBlk, err := ts.Key().ToStorageBlock()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to get tipset key block: %w", err)
|
||||
}
|
||||
|
||||
if err = cs.chainLocalBlockstore.Put(ctx, tsBlk); err != nil {
|
||||
return xerrors.Errorf("failed to put tipset key block: %w", err)
|
||||
if err := cs.chainLocalBlockstore.PutMany(ctx, tsBlks); err != nil {
|
||||
return xerrors.Errorf("failed to put tipset key blocks: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -1149,6 +1156,10 @@ func (cs *ChainStore) TryFillTipSet(ctx context.Context, ts *types.TipSet) (*Ful
|
||||
// selects the tipset before the null round if true, and the tipset following
|
||||
// the null round if false.
|
||||
func (cs *ChainStore) GetTipsetByHeight(ctx context.Context, h abi.ChainEpoch, ts *types.TipSet, prev bool) (*types.TipSet, error) {
|
||||
if h < 0 {
|
||||
return nil, xerrors.Errorf("height %d is negative", h)
|
||||
}
|
||||
|
||||
if ts == nil {
|
||||
ts = cs.GetHeaviestTipSet()
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/consensus"
|
||||
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||
"github.com/filecoin-project/lotus/chain/gen"
|
||||
"github.com/filecoin-project/lotus/chain/index"
|
||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
@ -214,7 +215,7 @@ func TestChainExportImportFull(t *testing.T) {
|
||||
t.Fatal("imported chain differed from exported chain")
|
||||
}
|
||||
|
||||
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), nil, filcns.DefaultUpgradeSchedule(), cg.BeaconSchedule(), ds)
|
||||
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), nil, filcns.DefaultUpgradeSchedule(), cg.BeaconSchedule(), ds, index.DummyMsgIndex)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -8,9 +8,9 @@ import (
|
||||
"time"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru/v2"
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
bserv "github.com/ipfs/go-blockservice"
|
||||
"github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/ipni/storetheindex/announce/message"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
|
@ -7,8 +7,8 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
"github.com/ipni/storetheindex/announce/message"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
pb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
|
@ -11,10 +11,10 @@ import (
|
||||
|
||||
"github.com/Gurpartap/async"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/libp2p/go-libp2p/core/connmgr"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
@ -208,8 +208,8 @@ func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
if syncer.consensus.IsEpochBeyondCurrMax(fts.TipSet().Height()) {
|
||||
log.Errorf("Received block with impossibly large height %d", fts.TipSet().Height())
|
||||
if !syncer.consensus.IsEpochInConsensusRange(fts.TipSet().Height()) {
|
||||
log.Infof("received block outside of consensus range at height %d", fts.TipSet().Height())
|
||||
return false
|
||||
}
|
||||
|
||||
@ -228,7 +228,7 @@ func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool {
|
||||
|
||||
// TODO: IMPORTANT(GARBAGE) this needs to be put in the 'temporary' side of
|
||||
// the blockstore
|
||||
if err := syncer.store.PersistTipset(ctx, fts.TipSet()); err != nil {
|
||||
if err := syncer.store.PersistTipsets(ctx, []*types.TipSet{fts.TipSet()}); err != nil {
|
||||
log.Warn("failed to persist incoming block header: ", err)
|
||||
return false
|
||||
}
|
||||
@ -1193,17 +1193,16 @@ func (syncer *Syncer) collectChain(ctx context.Context, ts *types.TipSet, hts *t
|
||||
span.AddAttributes(trace.Int64Attribute("syncChainLength", int64(len(headers))))
|
||||
|
||||
if !headers[0].Equals(ts) {
|
||||
log.Errorf("collectChain headers[0] should be equal to sync target. Its not: %s != %s", headers[0].Cids(), ts.Cids())
|
||||
return xerrors.Errorf("collectChain synced %s, wanted to sync %s", headers[0].Cids(), ts.Cids())
|
||||
}
|
||||
|
||||
ss.SetStage(api.StagePersistHeaders)
|
||||
|
||||
for _, ts := range headers {
|
||||
if err := syncer.store.PersistTipset(ctx, ts); err != nil {
|
||||
err = xerrors.Errorf("failed to persist synced tipset to the chainstore: %w", err)
|
||||
ss.Error(err)
|
||||
return err
|
||||
}
|
||||
// Write tipsets from oldest to newest.
|
||||
if err := syncer.store.PersistTipsets(ctx, headers); err != nil {
|
||||
err = xerrors.Errorf("failed to persist synced tipset to the chainstore: %w", err)
|
||||
ss.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
ss.SetStage(api.StageMessages)
|
||||
|
@ -4,8 +4,8 @@ import (
|
||||
"bytes"
|
||||
"math/big"
|
||||
|
||||
block "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
block "github.com/ipfs/go-libipfs/blocks"
|
||||
"github.com/minio/blake2b-simd"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
@ -13,8 +13,6 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/go-state-types/proof"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
)
|
||||
|
||||
type Ticket struct {
|
||||
@ -195,36 +193,6 @@ func CidArrsContains(a []cid.Cid, b cid.Cid) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
var blocksPerEpoch = NewInt(build.BlocksPerEpoch)
|
||||
|
||||
const sha256bits = 256
|
||||
|
||||
func IsTicketWinner(vrfTicket []byte, mypow BigInt, totpow BigInt) bool {
|
||||
/*
|
||||
Need to check that
|
||||
(h(vrfout) + 1) / (max(h) + 1) <= e * myPower / totalPower
|
||||
max(h) == 2^256-1
|
||||
which in terms of integer math means:
|
||||
(h(vrfout) + 1) * totalPower <= e * myPower * 2^256
|
||||
in 2^256 space, it is equivalent to:
|
||||
h(vrfout) * totalPower < e * myPower * 2^256
|
||||
|
||||
*/
|
||||
|
||||
h := blake2b.Sum256(vrfTicket)
|
||||
|
||||
lhs := BigFromBytes(h[:]).Int
|
||||
lhs = lhs.Mul(lhs, totpow.Int)
|
||||
|
||||
// rhs = sectorSize * 2^256
|
||||
// rhs = sectorSize << 256
|
||||
rhs := new(big.Int).Lsh(mypow.Int, sha256bits)
|
||||
rhs = rhs.Mul(rhs, blocksPerEpoch.Int)
|
||||
|
||||
// h(vrfout) * totalPower < e * sectorSize * 2^256?
|
||||
return lhs.Cmp(rhs) < 0
|
||||
}
|
||||
|
||||
func (t *Ticket) Equals(ot *Ticket) bool {
|
||||
return bytes.Equal(t.VRFProof, ot.VRFProof)
|
||||
}
|
||||
|
@ -100,6 +100,7 @@ func polyval(p []*big.Int, x *big.Int) *big.Int {
|
||||
|
||||
// computes lambda in Q.256
|
||||
func lambda(power, totalPower *big.Int) *big.Int {
|
||||
blocksPerEpoch := NewInt(build.BlocksPerEpoch)
|
||||
lam := new(big.Int).Mul(power, blocksPerEpoch.Int) // Q.0
|
||||
lam = lam.Lsh(lam, precision) // Q.256
|
||||
lam = lam.Div(lam /* Q.256 */, totalPower /* Q.0 */) // Q.256
|
||||
|
@ -5,8 +5,8 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
block "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
block "github.com/ipfs/go-libipfs/blocks"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
@ -220,12 +220,17 @@ func (m *Message) ValidForBlockInclusion(minGas int64, version network.Version)
|
||||
}
|
||||
|
||||
// EffectiveGasPremium returns the effective gas premium claimable by the miner
|
||||
// given the supplied base fee.
|
||||
// given the supplied base fee. This method is not used anywhere except the Eth API.
|
||||
//
|
||||
// Filecoin clamps the gas premium at GasFeeCap - BaseFee, if lower than the
|
||||
// specified premium.
|
||||
// specified premium. Returns 0 if GasFeeCap is less than BaseFee.
|
||||
func (m *Message) EffectiveGasPremium(baseFee abi.TokenAmount) abi.TokenAmount {
|
||||
available := big.Sub(m.GasFeeCap, baseFee)
|
||||
// It's possible that storage providers may include messages with gasFeeCap less than the baseFee
|
||||
// In such cases, their reward should be viewed as zero
|
||||
if available.LessThan(big.NewInt(0)) {
|
||||
available = big.NewInt(0)
|
||||
}
|
||||
if big.Cmp(m.GasPremium, available) <= 0 {
|
||||
return m.GasPremium
|
||||
}
|
||||
|
@ -4,8 +4,8 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
|
||||
block "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
block "github.com/ipfs/go-libipfs/blocks"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
|
@ -7,8 +7,8 @@ import (
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
block "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
block "github.com/ipfs/go-libipfs/blocks"
|
||||
typegen "github.com/whyrusleeping/cbor-gen"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
192
chain/vm/execution.go
Normal file
192
chain/vm/execution.go
Normal file
@ -0,0 +1,192 @@
|
||||
package vm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"go.opencensus.io/stats"
|
||||
"go.opencensus.io/tag"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/metrics"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultAvailableExecutionLanes is the number of available execution lanes; it is the bound of
|
||||
// concurrent active executions.
|
||||
// This is the default value in filecoin-ffi
|
||||
DefaultAvailableExecutionLanes = 4
|
||||
// DefaultPriorityExecutionLanes is the number of reserved execution lanes for priority computations.
|
||||
// This is purely userspace, but we believe it is a reasonable default, even with more available
|
||||
// lanes.
|
||||
DefaultPriorityExecutionLanes = 2
|
||||
)
|
||||
|
||||
// the execution environment; see below for definition, methods, and initialization
|
||||
var execution *executionEnv
|
||||
|
||||
// implementation of vm executor with simple sanity check preventing use after free.
|
||||
type vmExecutor struct {
|
||||
vmi Interface
|
||||
lane ExecutionLane
|
||||
}
|
||||
|
||||
var _ Interface = (*vmExecutor)(nil)
|
||||
|
||||
func newVMExecutor(vmi Interface, lane ExecutionLane) Interface {
|
||||
return &vmExecutor{vmi: vmi, lane: lane}
|
||||
}
|
||||
|
||||
func (e *vmExecutor) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, error) {
|
||||
token := execution.getToken(e.lane)
|
||||
defer token.Done()
|
||||
|
||||
return e.vmi.ApplyMessage(ctx, cmsg)
|
||||
}
|
||||
|
||||
func (e *vmExecutor) ApplyImplicitMessage(ctx context.Context, msg *types.Message) (*ApplyRet, error) {
|
||||
token := execution.getToken(e.lane)
|
||||
defer token.Done()
|
||||
|
||||
return e.vmi.ApplyImplicitMessage(ctx, msg)
|
||||
}
|
||||
|
||||
func (e *vmExecutor) Flush(ctx context.Context) (cid.Cid, error) {
|
||||
return e.vmi.Flush(ctx)
|
||||
}
|
||||
|
||||
type executionToken struct {
|
||||
lane ExecutionLane
|
||||
reserved int
|
||||
}
|
||||
|
||||
func (token *executionToken) Done() {
|
||||
execution.putToken(token)
|
||||
}
|
||||
|
||||
type executionEnv struct {
|
||||
mx *sync.Mutex
|
||||
cond *sync.Cond
|
||||
|
||||
// available executors
|
||||
available int
|
||||
// reserved executors
|
||||
reserved int
|
||||
}
|
||||
|
||||
func (e *executionEnv) getToken(lane ExecutionLane) *executionToken {
|
||||
metricsUp(metrics.VMExecutionWaiting, lane)
|
||||
defer metricsDown(metrics.VMExecutionWaiting, lane)
|
||||
|
||||
e.mx.Lock()
|
||||
defer e.mx.Unlock()
|
||||
|
||||
switch lane {
|
||||
case ExecutionLaneDefault:
|
||||
for e.available <= e.reserved {
|
||||
e.cond.Wait()
|
||||
}
|
||||
|
||||
e.available--
|
||||
|
||||
metricsUp(metrics.VMExecutionRunning, lane)
|
||||
return &executionToken{lane: lane, reserved: 0}
|
||||
|
||||
case ExecutionLanePriority:
|
||||
for e.available == 0 {
|
||||
e.cond.Wait()
|
||||
}
|
||||
|
||||
e.available--
|
||||
|
||||
reserving := 0
|
||||
if e.reserved > 0 {
|
||||
e.reserved--
|
||||
reserving = 1
|
||||
}
|
||||
|
||||
metricsUp(metrics.VMExecutionRunning, lane)
|
||||
return &executionToken{lane: lane, reserved: reserving}
|
||||
|
||||
default:
|
||||
// already checked at interface boundary in NewVM, so this is appropriate
|
||||
panic("bogus execution lane")
|
||||
}
|
||||
}
|
||||
|
||||
func (e *executionEnv) putToken(token *executionToken) {
|
||||
e.mx.Lock()
|
||||
defer e.mx.Unlock()
|
||||
|
||||
e.available++
|
||||
e.reserved += token.reserved
|
||||
|
||||
// Note: Signal is unsound, because a priority token could wake up a non-priority
|
||||
// goroutnie and lead to deadlock. So Broadcast it must be.
|
||||
e.cond.Broadcast()
|
||||
|
||||
metricsDown(metrics.VMExecutionRunning, token.lane)
|
||||
}
|
||||
|
||||
func metricsUp(metric *stats.Int64Measure, lane ExecutionLane) {
|
||||
metricsAdjust(metric, lane, 1)
|
||||
}
|
||||
|
||||
func metricsDown(metric *stats.Int64Measure, lane ExecutionLane) {
|
||||
metricsAdjust(metric, lane, -1)
|
||||
}
|
||||
|
||||
func metricsAdjust(metric *stats.Int64Measure, lane ExecutionLane, delta int) {
|
||||
laneName := "default"
|
||||
if lane > ExecutionLaneDefault {
|
||||
laneName = "priority"
|
||||
}
|
||||
|
||||
ctx, _ := tag.New(
|
||||
context.Background(),
|
||||
tag.Upsert(metrics.ExecutionLane, laneName),
|
||||
)
|
||||
stats.Record(ctx, metric.M(int64(delta)))
|
||||
}
|
||||
|
||||
func init() {
|
||||
var err error
|
||||
|
||||
available := DefaultAvailableExecutionLanes
|
||||
if concurrency := os.Getenv("LOTUS_FVM_CONCURRENCY"); concurrency != "" {
|
||||
available, err = strconv.Atoi(concurrency)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
priority := DefaultPriorityExecutionLanes
|
||||
if reserved := os.Getenv("LOTUS_FVM_CONCURRENCY_RESERVED"); reserved != "" {
|
||||
priority, err = strconv.Atoi(reserved)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// some sanity checks
|
||||
if available < 2 {
|
||||
panic("insufficient execution concurrency")
|
||||
}
|
||||
|
||||
if available <= priority {
|
||||
panic("insufficient default execution concurrency")
|
||||
}
|
||||
|
||||
mx := &sync.Mutex{}
|
||||
cond := sync.NewCond(mx)
|
||||
|
||||
execution = &executionEnv{
|
||||
mx: mx,
|
||||
cond: cond,
|
||||
available: available,
|
||||
reserved: priority,
|
||||
}
|
||||
}
|
@ -7,9 +7,9 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
block "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
block "github.com/ipfs/go-libipfs/blocks"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
@ -250,6 +250,8 @@ type VMOpts struct {
|
||||
Tracing bool
|
||||
// ReturnEvents decodes and returns emitted events.
|
||||
ReturnEvents bool
|
||||
// ExecutionLane specifies the execution priority of the created vm
|
||||
ExecutionLane ExecutionLane
|
||||
}
|
||||
|
||||
func NewLegacyVM(ctx context.Context, opts *VMOpts) (*LegacyVM, error) {
|
||||
|
@ -2,6 +2,7 @@ package vm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
cid "github.com/ipfs/go-cid"
|
||||
@ -17,6 +18,15 @@ var (
|
||||
StatApplied uint64
|
||||
)
|
||||
|
||||
type ExecutionLane int
|
||||
|
||||
const (
|
||||
// ExecutionLaneDefault signifies a default, non prioritized execution lane.
|
||||
ExecutionLaneDefault ExecutionLane = iota
|
||||
// ExecutionLanePriority signifies a prioritized execution lane with reserved resources.
|
||||
ExecutionLanePriority
|
||||
)
|
||||
|
||||
type Interface interface {
|
||||
// Applies the given message onto the VM's current state, returning the result of the execution
|
||||
ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, error)
|
||||
@ -33,7 +43,7 @@ type Interface interface {
|
||||
// Message failures, unexpected terminations,gas costs, etc. should all be ignored.
|
||||
var useFvmDebug = os.Getenv("LOTUS_FVM_DEVELOPER_DEBUG") == "1"
|
||||
|
||||
func NewVM(ctx context.Context, opts *VMOpts) (Interface, error) {
|
||||
func makeVM(ctx context.Context, opts *VMOpts) (Interface, error) {
|
||||
if opts.NetworkVersion >= network.Version16 {
|
||||
if useFvmDebug {
|
||||
return NewDualExecutionFVM(ctx, opts)
|
||||
@ -43,3 +53,18 @@ func NewVM(ctx context.Context, opts *VMOpts) (Interface, error) {
|
||||
|
||||
return NewLegacyVM(ctx, opts)
|
||||
}
|
||||
|
||||
func NewVM(ctx context.Context, opts *VMOpts) (Interface, error) {
|
||||
switch opts.ExecutionLane {
|
||||
case ExecutionLaneDefault, ExecutionLanePriority:
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid execution lane: %d", opts.ExecutionLane)
|
||||
}
|
||||
|
||||
vmi, err := makeVM(ctx, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newVMExecutor(vmi, opts.ExecutionLane), nil
|
||||
}
|
||||
|
@ -388,7 +388,7 @@ var ChainSetHeadCmd = &cli.Command{
|
||||
defer closer()
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
if cctx.NArg() != 1 {
|
||||
if !cctx.Bool("genesis") && !cctx.IsSet("epoch") && cctx.NArg() != 1 {
|
||||
return IncorrectNumArgs(cctx)
|
||||
}
|
||||
|
||||
|
@ -367,7 +367,7 @@ func AddrInfoFromArg(ctx context.Context, cctx *cli.Context) ([]peer.AddrInfo, e
|
||||
pis = append(pis, pi)
|
||||
}
|
||||
|
||||
return pis, err
|
||||
return pis, nil
|
||||
}
|
||||
|
||||
var NetId = &cli.Command{
|
||||
@ -445,8 +445,8 @@ var NetReachability = &cli.Command{
|
||||
}
|
||||
|
||||
fmt.Println("AutoNAT status: ", i.Reachability.String())
|
||||
if i.PublicAddr != "" {
|
||||
fmt.Println("Public address: ", i.PublicAddr)
|
||||
if len(i.PublicAddrs) > 0 {
|
||||
fmt.Println("Public address:", i.PublicAddrs)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
|
11
cli/state.go
11
cli/state.go
@ -1268,7 +1268,7 @@ var compStateMsg = `
|
||||
{{end}}
|
||||
|
||||
{{if ne .MsgRct.ExitCode 0}}
|
||||
<div class="error">Error: <pre>{{.Error}}</pre></div>
|
||||
<div class="error">Exit: <pre>{{.MsgRct.ExitCode}}</pre></div>
|
||||
{{end}}
|
||||
|
||||
<details>
|
||||
@ -1372,7 +1372,14 @@ func isVerySlow(t time.Duration) bool {
|
||||
}
|
||||
|
||||
func JsonParams(code cid.Cid, method abi.MethodNum, params []byte) (string, error) {
|
||||
p, err := stmgr.GetParamType(consensus.NewActorRegistry(), code, method) // todo use api for correct actor registry
|
||||
ar := consensus.NewActorRegistry()
|
||||
|
||||
_, found := ar.Methods[code][method]
|
||||
if !found {
|
||||
return fmt.Sprintf("raw:%x", params), nil
|
||||
}
|
||||
|
||||
p, err := stmgr.GetParamType(ar, code, method) // todo use api for correct actor registry
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -35,6 +35,7 @@ import (
|
||||
badgerbs "github.com/filecoin-project/lotus/blockstore/badger"
|
||||
"github.com/filecoin-project/lotus/chain/consensus"
|
||||
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||
"github.com/filecoin-project/lotus/chain/index"
|
||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
@ -228,7 +229,7 @@ var importBenchCmd = &cli.Command{
|
||||
defer cs.Close() //nolint:errcheck
|
||||
|
||||
// TODO: We need to supply the actual beacon after v14
|
||||
stm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(verifier), filcns.DefaultUpgradeSchedule(), nil, metadataDs)
|
||||
stm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(verifier), filcns.DefaultUpgradeSchedule(), nil, metadataDs, index.DummyMsgIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -325,14 +325,12 @@ func handleMiningInfo(ctx context.Context, cctx *cli.Context, fullapi v1api.Full
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
fmt.Println()
|
||||
|
||||
ws, err := nodeApi.WorkerStats(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting worker stats: %w", err)
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
ws, err := nodeApi.WorkerStats(ctx)
|
||||
if err != nil {
|
||||
fmt.Printf("ERROR: getting worker stats: %s\n", err)
|
||||
} else {
|
||||
workersByType := map[string]int{
|
||||
sealtasks.WorkerSealing: 0,
|
||||
sealtasks.WorkerWindowPoSt: 0,
|
||||
|
@ -43,16 +43,16 @@ func main() {
|
||||
backupCmd,
|
||||
lcli.WithCategory("chain", actorCmd),
|
||||
lcli.WithCategory("chain", infoCmd),
|
||||
lcli.WithCategory("market", storageDealsCmd),
|
||||
lcli.WithCategory("market", retrievalDealsCmd),
|
||||
lcli.WithCategory("market", dataTransfersCmd),
|
||||
lcli.WithCategory("market", dagstoreCmd),
|
||||
lcli.WithCategory("market", indexProvCmd),
|
||||
lcli.WithCategory("market", setHidden(storageDealsCmd)),
|
||||
lcli.WithCategory("market", setHidden(retrievalDealsCmd)),
|
||||
lcli.WithCategory("market", setHidden(dataTransfersCmd)),
|
||||
lcli.WithCategory("market", setHidden(dagstoreCmd)),
|
||||
lcli.WithCategory("market", setHidden(indexProvCmd)),
|
||||
lcli.WithCategory("storage", sectorsCmd),
|
||||
lcli.WithCategory("storage", provingCmd),
|
||||
lcli.WithCategory("storage", storageCmd),
|
||||
lcli.WithCategory("storage", sealingCmd),
|
||||
lcli.WithCategory("retrieval", piecesCmd),
|
||||
lcli.WithCategory("retrieval", setHidden(piecesCmd)),
|
||||
}
|
||||
|
||||
jaeger := tracing.SetupJaegerTracing("lotus")
|
||||
@ -86,6 +86,7 @@ func main() {
|
||||
// adapt the Net* commands to always hit the node running the markets
|
||||
// subsystem, as that is the only one that runs a libp2p node.
|
||||
netCmd := *lcli.NetCmd // make a copy.
|
||||
netCmd.Hidden = true
|
||||
prev := netCmd.Before
|
||||
netCmd.Before = func(c *cli.Context) error {
|
||||
if prev != nil {
|
||||
@ -137,11 +138,12 @@ func main() {
|
||||
&cli.StringFlag{
|
||||
Name: FlagMarketsRepo,
|
||||
EnvVars: []string{"LOTUS_MARKETS_PATH"},
|
||||
Usage: fmt.Sprintf("Markets repo path"),
|
||||
Hidden: true,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "call-on-markets",
|
||||
Usage: "(experimental; may be removed) call this command against a markets node; use only with common commands like net, auth, pprof, etc. whose target may be ambiguous",
|
||||
Name: "call-on-markets",
|
||||
Usage: "(experimental; may be removed) call this command against a markets node; use only with common commands like net, auth, pprof, etc. whose target may be ambiguous",
|
||||
Hidden: true,
|
||||
},
|
||||
cliutil.FlagVeryVerbose,
|
||||
},
|
||||
@ -190,3 +192,8 @@ func getActorAddress(ctx context.Context, cctx *cli.Context) (maddr address.Addr
|
||||
|
||||
return maddr, nil
|
||||
}
|
||||
|
||||
func setHidden(cmd *cli.Command) *cli.Command {
|
||||
cmd.Hidden = true
|
||||
return cmd
|
||||
}
|
||||
|
@ -67,6 +67,7 @@ var sectorsCmd = &cli.Command{
|
||||
sectorsBatching,
|
||||
sectorsRefreshPieceMatchingCmd,
|
||||
sectorsCompactPartitionsCmd,
|
||||
sectorsUnsealCmd,
|
||||
},
|
||||
}
|
||||
|
||||
@ -1392,6 +1393,12 @@ var sectorsRemoveCmd = &cli.Command{
|
||||
return xerrors.Errorf("could not parse sector number: %w", err)
|
||||
}
|
||||
|
||||
// Check if the sector exists
|
||||
_, err = minerAPI.SectorsStatus(ctx, abi.SectorNumber(id), false)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("sectorID %d has not been created yet: %w", id, err)
|
||||
}
|
||||
|
||||
return minerAPI.SectorRemove(ctx, abi.SectorNumber(id))
|
||||
},
|
||||
}
|
||||
@ -2248,3 +2255,27 @@ var sectorsNumbersFreeCmd = &cli.Command{
|
||||
return minerAPI.SectorNumFree(ctx, cctx.Args().First())
|
||||
},
|
||||
}
|
||||
|
||||
var sectorsUnsealCmd = &cli.Command{
|
||||
Name: "unseal",
|
||||
Usage: "unseal a sector",
|
||||
ArgsUsage: "[sector number]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
minerAPI, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
if cctx.NArg() != 1 {
|
||||
return lcli.IncorrectNumArgs(cctx)
|
||||
}
|
||||
|
||||
sectorNum, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("could not parse sector number: %w", err)
|
||||
}
|
||||
|
||||
return minerAPI.SectorUnseal(ctx, abi.SectorNumber(sectorNum))
|
||||
},
|
||||
}
|
||||
|
@ -35,6 +35,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/consensus"
|
||||
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||
"github.com/filecoin-project/lotus/chain/gen/genesis"
|
||||
"github.com/filecoin-project/lotus/chain/index"
|
||||
"github.com/filecoin-project/lotus/chain/state"
|
||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
@ -513,7 +514,7 @@ var chainBalanceStateCmd = &cli.Command{
|
||||
cst := cbor.NewCborStore(bs)
|
||||
store := adt.WrapStore(ctx, cst)
|
||||
|
||||
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds)
|
||||
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, index.DummyMsgIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -737,7 +738,7 @@ var chainPledgeCmd = &cli.Command{
|
||||
cst := cbor.NewCborStore(bs)
|
||||
store := adt.WrapStore(ctx, cst)
|
||||
|
||||
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds)
|
||||
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, index.DummyMsgIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
383
cmd/lotus-shed/chainwatch.go
Normal file
383
cmd/lotus-shed/chainwatch.go
Normal file
@ -0,0 +1,383 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
cliutil "github.com/filecoin-project/lotus/cli/util"
|
||||
)
|
||||
|
||||
var chainwatchCmd = &cli.Command{
|
||||
Name: "chainwatch",
|
||||
Usage: "lotus chainwatch",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "db",
|
||||
EnvVars: []string{"CHAINWATCH_DB"},
|
||||
Value: "./chainwatch.db",
|
||||
},
|
||||
},
|
||||
Subcommands: []*cli.Command{
|
||||
chainwatchRunCmd,
|
||||
chainwatchDotCmd,
|
||||
},
|
||||
}
|
||||
|
||||
var chainwatchDotCmd = &cli.Command{
|
||||
Name: "dot",
|
||||
Usage: "generate dot graphs",
|
||||
ArgsUsage: "<minHeight> <toseeHeight>",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
st, err := cwOpenStorage(cctx.String("db"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
minH, err := strconv.ParseInt(cctx.Args().Get(0), 10, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tosee, err := strconv.ParseInt(cctx.Args().Get(1), 10, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
maxH := minH + tosee
|
||||
|
||||
res, err := st.db.Query(`select block, parent, b.miner, b.height, p.height from block_parents
|
||||
inner join blocks b on block_parents.block = b.cid
|
||||
inner join blocks p on block_parents.parent = p.cid
|
||||
where b.height > ? and b.height < ?`, minH, maxH)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("digraph D {")
|
||||
|
||||
for res.Next() {
|
||||
var block, parent, miner string
|
||||
var height, ph uint64
|
||||
if err := res.Scan(&block, &parent, &miner, &height, &ph); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bc, err := cid.Parse(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
has := st.hasBlock(bc)
|
||||
|
||||
col := crc32.Checksum([]byte(miner), crc32.MakeTable(crc32.Castagnoli))&0xc0c0c0c0 + 0x30303030
|
||||
|
||||
hasstr := ""
|
||||
if !has {
|
||||
//col = 0xffffffff
|
||||
hasstr = " UNSYNCED"
|
||||
}
|
||||
|
||||
nulls := height - ph - 1
|
||||
for i := uint64(0); i < nulls; i++ {
|
||||
name := block + "NP" + fmt.Sprint(i)
|
||||
|
||||
fmt.Printf("%s [label = \"NULL:%d\", fillcolor = \"#ffddff\", style=filled, forcelabels=true]\n%s -> %s\n",
|
||||
name, height-nulls+i, name, parent)
|
||||
|
||||
parent = name
|
||||
}
|
||||
|
||||
fmt.Printf("%s [label = \"%s:%d%s\", fillcolor = \"#%06x\", style=filled, forcelabels=true]\n%s -> %s\n", block, miner, height, hasstr, col, block, parent)
|
||||
}
|
||||
if res.Err() != nil {
|
||||
return res.Err()
|
||||
}
|
||||
|
||||
fmt.Println("}")
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var chainwatchRunCmd = &cli.Command{
|
||||
Name: "run",
|
||||
Usage: "Start lotus chainwatch",
|
||||
|
||||
Action: func(cctx *cli.Context) error {
|
||||
api, closer, err := cliutil.GetFullNodeAPIV1(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
ctx := cliutil.ReqContext(cctx)
|
||||
|
||||
v, err := api.Version(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("Remote version: %s", v.Version)
|
||||
|
||||
st, err := cwOpenStorage(cctx.String("db")) // todo flag
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer st.close() // nolint:errcheck
|
||||
|
||||
cwRunSyncer(ctx, api, st)
|
||||
go cwSubBlocks(ctx, api, st)
|
||||
|
||||
<-ctx.Done()
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func cwSubBlocks(ctx context.Context, api api.FullNode, st *cwStorage) {
|
||||
sub, err := api.SyncIncomingBlocks(ctx)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
for bh := range sub {
|
||||
err := st.storeHeaders(map[cid.Cid]*types.BlockHeader{
|
||||
bh.Cid(): bh,
|
||||
}, false)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func cwRunSyncer(ctx context.Context, api api.FullNode, st *cwStorage) {
|
||||
notifs, err := api.ChainNotify(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
go func() {
|
||||
for notif := range notifs {
|
||||
for _, change := range notif {
|
||||
switch change.Type {
|
||||
case store.HCCurrent:
|
||||
fallthrough
|
||||
case store.HCApply:
|
||||
syncHead(ctx, api, st, change.Val)
|
||||
case store.HCRevert:
|
||||
log.Warnf("revert todo")
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func syncHead(ctx context.Context, api api.FullNode, st *cwStorage, ts *types.TipSet) {
|
||||
log.Infof("Getting headers / actors")
|
||||
|
||||
toSync := map[cid.Cid]*types.BlockHeader{}
|
||||
toVisit := list.New()
|
||||
|
||||
for _, header := range ts.Blocks() {
|
||||
toVisit.PushBack(header)
|
||||
}
|
||||
|
||||
for toVisit.Len() > 0 {
|
||||
bh := toVisit.Remove(toVisit.Back()).(*types.BlockHeader)
|
||||
|
||||
if _, seen := toSync[bh.Cid()]; seen || st.hasBlock(bh.Cid()) {
|
||||
continue
|
||||
}
|
||||
|
||||
toSync[bh.Cid()] = bh
|
||||
|
||||
if len(toSync)%500 == 10 {
|
||||
log.Infof("todo: (%d) %s", len(toSync), bh.Cid())
|
||||
}
|
||||
|
||||
if len(bh.Parents) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if bh.Height <= 530000 {
|
||||
continue
|
||||
}
|
||||
|
||||
pts, err := api.ChainGetTipSet(ctx, types.NewTipSetKey(bh.Parents...))
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, header := range pts.Blocks() {
|
||||
toVisit.PushBack(header)
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Syncing %d blocks", len(toSync))
|
||||
|
||||
log.Infof("Persisting headers")
|
||||
if err := st.storeHeaders(toSync, true); err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("Sync done")
|
||||
}
|
||||
|
||||
type cwStorage struct {
|
||||
db *sql.DB
|
||||
|
||||
headerLk sync.Mutex
|
||||
}
|
||||
|
||||
func cwOpenStorage(dbSource string) (*cwStorage, error) {
|
||||
db, err := sql.Open("sqlite3", dbSource)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
st := &cwStorage{db: db}
|
||||
|
||||
return st, st.setup()
|
||||
}
|
||||
|
||||
func (st *cwStorage) setup() error {
|
||||
tx, err := st.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = tx.Exec(`
|
||||
create table if not exists blocks
|
||||
(
|
||||
cid text not null
|
||||
constraint blocks_pk
|
||||
primary key,
|
||||
parentWeight numeric not null,
|
||||
parentStateRoot text not null,
|
||||
height int not null,
|
||||
miner text not null
|
||||
constraint blocks_id_address_map_miner_fk
|
||||
references id_address_map (address),
|
||||
timestamp int not null,
|
||||
vrfproof blob
|
||||
);
|
||||
|
||||
create unique index if not exists block_cid_uindex
|
||||
on blocks (cid);
|
||||
|
||||
create table if not exists blocks_synced
|
||||
(
|
||||
cid text not null
|
||||
constraint blocks_synced_pk
|
||||
primary key
|
||||
constraint blocks_synced_blocks_cid_fk
|
||||
references blocks,
|
||||
add_ts int not null
|
||||
);
|
||||
|
||||
create unique index if not exists blocks_synced_cid_uindex
|
||||
on blocks_synced (cid);
|
||||
|
||||
create table if not exists block_parents
|
||||
(
|
||||
block text not null
|
||||
constraint block_parents_blocks_cid_fk
|
||||
references blocks,
|
||||
parent text not null
|
||||
constraint block_parents_blocks_cid_fk_2
|
||||
references blocks
|
||||
);
|
||||
|
||||
create unique index if not exists block_parents_block_parent_uindex
|
||||
on block_parents (block, parent);
|
||||
|
||||
create unique index if not exists blocks_cid_uindex
|
||||
on blocks (cid);
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (st *cwStorage) hasBlock(bh cid.Cid) bool {
|
||||
var exitsts bool
|
||||
err := st.db.QueryRow(`select exists (select 1 FROM blocks_synced where cid=?)`, bh.String()).Scan(&exitsts)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return false
|
||||
}
|
||||
return exitsts
|
||||
}
|
||||
|
||||
func (st *cwStorage) storeHeaders(bhs map[cid.Cid]*types.BlockHeader, sync bool) error {
|
||||
st.headerLk.Lock()
|
||||
defer st.headerLk.Unlock()
|
||||
|
||||
tx, err := st.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stmt, err := tx.Prepare(`insert into blocks (cid, parentWeight, parentStateRoot, height, miner, "timestamp", vrfproof) values (?, ?, ?, ?, ?, ?, ?) on conflict do nothing`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer stmt.Close() // nolint:errcheck
|
||||
for _, bh := range bhs {
|
||||
if _, err := stmt.Exec(bh.Cid().String(),
|
||||
bh.ParentWeight.String(),
|
||||
bh.ParentStateRoot.String(),
|
||||
bh.Height,
|
||||
bh.Miner.String(),
|
||||
bh.Timestamp,
|
||||
bh.Ticket.VRFProof,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
stmt2, err := tx.Prepare(`insert into block_parents (block, parent) values (?, ?) on conflict do nothing`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer stmt2.Close() // nolint:errcheck
|
||||
for _, bh := range bhs {
|
||||
for _, parent := range bh.Parents {
|
||||
if _, err := stmt2.Exec(bh.Cid().String(), parent.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if sync {
|
||||
stmt, err := tx.Prepare(`insert into blocks_synced (cid, add_ts) values (?, ?) on conflict do nothing`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer stmt.Close() // nolint:errcheck
|
||||
now := time.Now().Unix()
|
||||
|
||||
for _, bh := range bhs {
|
||||
if _, err := stmt.Exec(bh.Cid().String(), now); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (st *cwStorage) close() error {
|
||||
return st.db.Close()
|
||||
}
|
@ -12,8 +12,8 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/dgraph-io/badger/v2/y"
|
||||
block "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
block "github.com/ipfs/go-libipfs/blocks"
|
||||
"github.com/mitchellh/go-homedir"
|
||||
"github.com/multiformats/go-base32"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
@ -15,11 +15,11 @@ import (
|
||||
"github.com/dgraph-io/badger/v2"
|
||||
"github.com/dgraph-io/badger/v2/pb"
|
||||
"github.com/dustin/go-humanize"
|
||||
block "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-blockservice"
|
||||
"github.com/ipfs/go-cid"
|
||||
offline "github.com/ipfs/go-ipfs-exchange-offline"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
block "github.com/ipfs/go-libipfs/blocks"
|
||||
"github.com/ipfs/go-merkledag"
|
||||
"github.com/ipld/go-car"
|
||||
"github.com/multiformats/go-base32"
|
||||
|
257
cmd/lotus-shed/fevmanalytics.go
Normal file
257
cmd/lotus-shed/fevmanalytics.go
Normal file
@ -0,0 +1,257 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
"github.com/urfave/cli/v2"
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
|
||||
badgerbs "github.com/filecoin-project/lotus/blockstore/badger"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
evm2 "github.com/filecoin-project/lotus/chain/actors/builtin/evm"
|
||||
"github.com/filecoin-project/lotus/chain/state"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
)
|
||||
|
||||
var FevmAnalyticsCmd = &cli.Command{
|
||||
Name: "evm-analytics",
|
||||
Usage: "Get FEVM related metrics",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "repo",
|
||||
Value: "~/.lotus",
|
||||
},
|
||||
},
|
||||
Subcommands: []*cli.Command{
|
||||
FevmBalanceCmd,
|
||||
FevmActorsCmd,
|
||||
},
|
||||
}
|
||||
|
||||
var FevmBalanceCmd = &cli.Command{
|
||||
Name: "evm-balance",
|
||||
Usage: "Balances in eth accounts, evm contracts and placeholders",
|
||||
ArgsUsage: "[state root]",
|
||||
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.NArg() != 1 {
|
||||
return xerrors.New("only needs state root")
|
||||
}
|
||||
|
||||
if !cctx.Args().Present() {
|
||||
return fmt.Errorf("must pass state root")
|
||||
}
|
||||
|
||||
sroot, err := cid.Decode(cctx.Args().First())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse input: %w", err)
|
||||
}
|
||||
|
||||
fsrepo, err := repo.NewFS(cctx.String("repo"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lkrepo, err := fsrepo.Lock(repo.FullNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer lkrepo.Close() //nolint:errcheck
|
||||
|
||||
path, err := lkrepo.SplitstorePath()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path = filepath.Join(path, "hot.badger")
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opts, err := repo.BadgerBlockstoreOptions(repo.HotBlockstore, path, lkrepo.Readonly())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bs, err := badgerbs.Open(opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cst := cbor.NewCborStore(bs)
|
||||
st, err := state.LoadStateTree(cst, sroot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("iterating over all actors")
|
||||
count := 0
|
||||
balanceEvm := abi.NewTokenAmount(0)
|
||||
balanceEthAccount := abi.NewTokenAmount(0)
|
||||
balancePlaceholder := abi.NewTokenAmount(0)
|
||||
|
||||
err = st.ForEach(func(addr address.Address, act *types.Actor) error {
|
||||
if count%200000 == 0 {
|
||||
fmt.Println("processed /n", count)
|
||||
}
|
||||
count++
|
||||
|
||||
if builtin.IsEvmActor(act.Code) {
|
||||
balanceEvm = types.BigAdd(balanceEvm, act.Balance)
|
||||
}
|
||||
|
||||
if builtin.IsEthAccountActor(act.Code) {
|
||||
balanceEthAccount = types.BigAdd(balanceEthAccount, act.Balance)
|
||||
}
|
||||
|
||||
if builtin.IsPlaceholderActor(act.Code) {
|
||||
balancePlaceholder = types.BigAdd(balancePlaceholder, act.Balance)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("balances in Eth contracts: ", balanceEvm)
|
||||
fmt.Println("balances in Eth accounts: ", balanceEthAccount)
|
||||
fmt.Println("balances in placeholder: ", balancePlaceholder)
|
||||
fmt.Println("Total balances: ", big.Add(big.Add(balanceEthAccount, balancePlaceholder), balanceEvm))
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var FevmActorsCmd = &cli.Command{
|
||||
Name: "evm-actors",
|
||||
Usage: "actors # in eth accounts, evm contracts and placeholders",
|
||||
ArgsUsage: "[state root]",
|
||||
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.NArg() != 1 {
|
||||
return xerrors.New("only needs state root")
|
||||
}
|
||||
|
||||
if !cctx.Args().Present() {
|
||||
return fmt.Errorf("must pass state root")
|
||||
}
|
||||
|
||||
sroot, err := cid.Decode(cctx.Args().First())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse input: %w", err)
|
||||
}
|
||||
|
||||
fsrepo, err := repo.NewFS(cctx.String("repo"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lkrepo, err := fsrepo.Lock(repo.FullNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer lkrepo.Close() //nolint:errcheck
|
||||
|
||||
path, err := lkrepo.SplitstorePath()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path = filepath.Join(path, "hot.badger")
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opts, err := repo.BadgerBlockstoreOptions(repo.HotBlockstore, path, lkrepo.Readonly())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bs, err := badgerbs.Open(opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
cst := cbor.NewCborStore(bs)
|
||||
store := adt.WrapStore(ctx, cst)
|
||||
|
||||
st, err := state.LoadStateTree(cst, sroot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("iterating over all actors")
|
||||
count := 0
|
||||
EvmCount := 0
|
||||
EthAccountCount := 0
|
||||
PlaceholderCount := 0
|
||||
ea := []cid.Cid{}
|
||||
|
||||
err = st.ForEach(func(addr address.Address, act *types.Actor) error {
|
||||
if count%200000 == 0 {
|
||||
fmt.Println("processed /n", count)
|
||||
}
|
||||
count++
|
||||
|
||||
if builtin.IsEvmActor(act.Code) {
|
||||
EvmCount++
|
||||
e, err := evm2.Load(store, act)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("fail to load evm actor: %w", err)
|
||||
}
|
||||
bcid, err := e.GetBytecodeCID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ea = append(ea, bcid)
|
||||
}
|
||||
|
||||
if builtin.IsEthAccountActor(act.Code) {
|
||||
EthAccountCount++
|
||||
}
|
||||
|
||||
if builtin.IsPlaceholderActor(act.Code) {
|
||||
PlaceholderCount++
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
uniquesa := unique(ea)
|
||||
fmt.Println("# of EVM contracts: ", EvmCount)
|
||||
fmt.Println("# of unqiue EVM contracts: ", len(uniquesa))
|
||||
fmt.Println("b# of Eth accounts: ", EthAccountCount)
|
||||
fmt.Println("# of placeholder: ", PlaceholderCount)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func unique(intSlice []cid.Cid) []cid.Cid {
|
||||
keys := make(map[cid.Cid]bool)
|
||||
list := []cid.Cid{}
|
||||
for _, entry := range intSlice {
|
||||
if _, value := keys[entry]; !value {
|
||||
keys[entry] = true
|
||||
list = append(list, entry)
|
||||
}
|
||||
}
|
||||
return list
|
||||
}
|
@ -20,6 +20,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/beacon/drand"
|
||||
"github.com/filecoin-project/lotus/chain/consensus"
|
||||
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||
"github.com/filecoin-project/lotus/chain/index"
|
||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
@ -111,7 +112,7 @@ var gasTraceCmd = &cli.Command{
|
||||
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
|
||||
defer cs.Close() //nolint:errcheck
|
||||
|
||||
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd, mds)
|
||||
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd, mds, index.DummyMsgIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -212,7 +213,7 @@ var replayOfflineCmd = &cli.Command{
|
||||
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
|
||||
defer cs.Close() //nolint:errcheck
|
||||
|
||||
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd, mds)
|
||||
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd, mds, index.DummyMsgIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -7,8 +7,8 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
block "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
block "github.com/ipfs/go-libipfs/blocks"
|
||||
"github.com/ipld/go-car"
|
||||
"github.com/urfave/cli/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/consensus"
|
||||
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||
"github.com/filecoin-project/lotus/chain/index"
|
||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
@ -90,7 +91,7 @@ var invariantsCmd = &cli.Command{
|
||||
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
|
||||
defer cs.Close() //nolint:errcheck
|
||||
|
||||
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds)
|
||||
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, index.DummyMsgIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -26,6 +26,7 @@ func main() {
|
||||
base32Cmd,
|
||||
base16Cmd,
|
||||
bitFieldCmd,
|
||||
chainwatchCmd,
|
||||
cronWcCmd,
|
||||
frozenMinersCmd,
|
||||
dealLabelCmd,
|
||||
@ -83,6 +84,9 @@ func main() {
|
||||
invariantsCmd,
|
||||
gasTraceCmd,
|
||||
replayOfflineCmd,
|
||||
msgindexCmd,
|
||||
FevmAnalyticsCmd,
|
||||
mismatchesCmd,
|
||||
}
|
||||
|
||||
app := &cli.App{
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user