Merge branch 'release/v1.25.0' into jen/v125cut
This commit is contained in:
commit
4a165b3551
@ -997,6 +997,7 @@ workflows:
|
|||||||
suite: utest-unit-cli
|
suite: utest-unit-cli
|
||||||
target: "./cli/... ./cmd/... ./api/..."
|
target: "./cli/... ./cmd/... ./api/..."
|
||||||
get-params: true
|
get-params: true
|
||||||
|
executor: golang-2xl
|
||||||
- test:
|
- test:
|
||||||
name: test-unit-node
|
name: test-unit-node
|
||||||
requires:
|
requires:
|
||||||
@ -1004,6 +1005,7 @@ workflows:
|
|||||||
suite: utest-unit-node
|
suite: utest-unit-node
|
||||||
target: "./node/..."
|
target: "./node/..."
|
||||||
|
|
||||||
|
|
||||||
- test:
|
- test:
|
||||||
name: test-unit-rest
|
name: test-unit-rest
|
||||||
requires:
|
requires:
|
||||||
@ -1018,6 +1020,7 @@ workflows:
|
|||||||
suite: utest-unit-storage
|
suite: utest-unit-storage
|
||||||
target: "./storage/... ./extern/..."
|
target: "./storage/... ./extern/..."
|
||||||
|
|
||||||
|
|
||||||
- test:
|
- test:
|
||||||
go-test-flags: "-run=TestMulticoreSDR"
|
go-test-flags: "-run=TestMulticoreSDR"
|
||||||
requires:
|
requires:
|
||||||
|
@ -558,6 +558,7 @@ workflows:
|
|||||||
suite: utest-[[ $suite ]]
|
suite: utest-[[ $suite ]]
|
||||||
target: "[[ $pkgs ]]"
|
target: "[[ $pkgs ]]"
|
||||||
[[if eq $suite "unit-cli"]]get-params: true[[end]]
|
[[if eq $suite "unit-cli"]]get-params: true[[end]]
|
||||||
|
[[if eq $suite "unit-cli"]]executor: golang-2xl[[end]]
|
||||||
[[- if eq $suite "unit-rest"]]executor: golang-2xl[[end]]
|
[[- if eq $suite "unit-rest"]]executor: golang-2xl[[end]]
|
||||||
[[- end]]
|
[[- end]]
|
||||||
- test:
|
- test:
|
||||||
|
31
.github/ISSUE_TEMPLATE/task.md
vendored
Normal file
31
.github/ISSUE_TEMPLATE/task.md
vendored
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
---
|
||||||
|
name: New Task
|
||||||
|
about: A larger yet well-scoped task
|
||||||
|
title: '<title>'
|
||||||
|
labels: Needs Triage
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## User Story
|
||||||
|
<!-- Why? -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- What? -->
|
||||||
|
<!-- add description-->
|
||||||
|
|
||||||
|
```[tasklist]
|
||||||
|
### Deliverables
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
## Technical Breakdown
|
||||||
|
```[tasklist]
|
||||||
|
### Development
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
```[tasklist]
|
||||||
|
### Testing
|
||||||
|
|
||||||
|
```
|
1
.github/pull_request_template.md
vendored
1
.github/pull_request_template.md
vendored
@ -16,6 +16,7 @@ Before you mark the PR ready for review, please make sure that:
|
|||||||
- example: ` fix: mempool: Introduce a cache for valid signatures`
|
- example: ` fix: mempool: Introduce a cache for valid signatures`
|
||||||
- `PR type`: fix, feat, build, chore, ci, docs, perf, refactor, revert, style, test
|
- `PR type`: fix, feat, build, chore, ci, docs, perf, refactor, revert, style, test
|
||||||
- `area`, e.g. api, chain, state, market, mempool, multisig, networking, paych, proving, sealing, wallet, deps
|
- `area`, e.g. api, chain, state, market, mempool, multisig, networking, paych, proving, sealing, wallet, deps
|
||||||
|
- [ ] If the PR affects users (e.g., new feature, bug fix, system requirements change), update the CHANGELOG.md and add details to the UNRELEASED section.
|
||||||
- [ ] New features have usage guidelines and / or documentation updates in
|
- [ ] New features have usage guidelines and / or documentation updates in
|
||||||
- [ ] [Lotus Documentation](https://lotus.filecoin.io)
|
- [ ] [Lotus Documentation](https://lotus.filecoin.io)
|
||||||
- [ ] [Discussion Tutorials](https://github.com/filecoin-project/lotus/discussions/categories/tutorials)
|
- [ ] [Discussion Tutorials](https://github.com/filecoin-project/lotus/discussions/categories/tutorials)
|
||||||
|
124
CHANGELOG.md
124
CHANGELOG.md
@ -1,5 +1,128 @@
|
|||||||
# Lotus changelog
|
# Lotus changelog
|
||||||
|
|
||||||
|
# v 1.25.0 / 2023-11-22
|
||||||
|
|
||||||
|
This is a highly recommended feature release of Lotus. This optional release supports the Filecoin network version 21 upgrade, codenamed Watermelon 🍉, in addition to the numerous improvements and enhancements for node operators, ETH RPC-providers and storage providers.
|
||||||
|
|
||||||
|
**The Filecoin network upgrade v21, codenamed Watermelon 🍉, is at epoch 3469380 - 2023-12-12T13:30:00Z**
|
||||||
|
|
||||||
|
The full list of [protocol improvements delivered in the network upgrade can be found here](https://github.com/filecoin-project/core-devs/blob/master/Network%20Upgrades/v21.md).
|
||||||
|
|
||||||
|
## ☢️ Upgrade Warnings ☢️
|
||||||
|
|
||||||
|
- Read through the [changelog of the mandatory v1.24.0 release](https://github.com/filecoin-project/lotus/releases/tag/v1.24.0). Especially the `Migration` and `v12 Builtin Actor Bundle` sections.
|
||||||
|
- Please remove and clone a new Lotus repo (`git clone https://github.com/filecoin-project/lotus.git`) when upgrading to this release.
|
||||||
|
- This feature release requires a minimum Go version of v1.20.7 or higher to successfully build Lotus. Go version 1.21.x is not supported yet.
|
||||||
|
- EthRPC providers, please check out the [new tracing API to Lotus RPC](https://github.com/filecoin-project/lotus/pull/11100)
|
||||||
|
|
||||||
|
## ⭐️ Highlights ⭐️
|
||||||
|
|
||||||
|
**Unsealing bugfixes and enhancements**
|
||||||
|
|
||||||
|
This feature release introduces significant improvements and bugfixes with regards to unsealing, and ensures that unsealing operates as one would expect. Consequently, unsealing of all sector types (deal sectors, snap-sectors without sector keys, and snap-sectors with sector keys) now all function seamlessly.
|
||||||
|
|
||||||
|
Some additional unsealing improvements are:
|
||||||
|
- Unsealing on workers with only sealing paths works. :tada:
|
||||||
|
- Transferring unsealed files to long-term storage upon successful unsealing. :arrow_right:
|
||||||
|
- Ensuring no residual files in sealing paths post a successful unsealing operation. :broom:
|
||||||
|
|
||||||
|
**SupraSeal C2**
|
||||||
|
|
||||||
|
Lotus-workers can now be built to leverage the SupraSeal C2 sealing optimizations in your sealing pipeline. The code optimizations are currently behind the `FFI_USE_CUDA_SUPRASEAL` feature flag. We advice users to test this feature on a test-network, before trying to use it on the mainnet. Users can test out the feature by building their lotus-workers by exporting the `FFI_USE_CUDA_SUPRASEAL=1` enviroment variable, and building from source. For questions about the SupraSeal C2 sealing optimizations, reach out in the #fil-proofs or the #dsa-sealing slack channel.
|
||||||
|
|
||||||
|
## New features
|
||||||
|
- feat: add Eip155ChainID to StateGetNetworkParams ([filecoin-project/lotus#10987](https://github.com/filecoin-project/lotus/pull/10987))
|
||||||
|
- feat: profiling: state summary and visualization ([filecoin-project/lotus#11012](https://github.com/filecoin-project/lotus/pull/11012))
|
||||||
|
- feat: snapshot: remove existing chain ([filecoin-project/lotus#11032](https://github.com/filecoin-project/lotus/pull/11032))
|
||||||
|
- feat: Add a metric to display pruning of the node's peer ([filecoin-project/lotus#11058](https://github.com/filecoin-project/lotus/pull/11058))
|
||||||
|
- feat:shed:gather partition metadata ([filecoin-project/lotus#11078](https://github.com/filecoin-project/lotus/pull/11078))
|
||||||
|
- feat: vm: allow raw "cbor" in state and use the new go-multicodec ([filecoin-project/lotus#11081](https://github.com/filecoin-project/lotus/pull/11081))
|
||||||
|
- Add new lotus-shed command for backfillling actor events ([filecoin-project/lotus#11088](https://github.com/filecoin-project/lotus/pull/11088))
|
||||||
|
- feat: Add new tracing API ([filecoin-project/lotus#11100](https://github.com/filecoin-project/lotus/pull/11100))
|
||||||
|
- feat: FVM: do not error on unsuccessful implicit messages ([filecoin-project/lotus#11120](https://github.com/filecoin-project/lotus/pull/11120))
|
||||||
|
- feat: chain node: Move consensus slasher to internal service ([filecoin-project/lotus#11126](https://github.com/filecoin-project/lotus/pull/11126))
|
||||||
|
- feat: miner: implement FRC-0051 ([filecoin-project/lotus#11157](https://github.com/filecoin-project/lotus/pull/11157))
|
||||||
|
- feat: chainstore: FRC-0051: Remove all equivocated blocks from tipsets ([filecoin-project/lotus#11104](https://github.com/filecoin-project/lotus/pull/11104))
|
||||||
|
- feat: miner: 2 minor refactors ([filecoin-project/lotus#11158](https://github.com/filecoin-project/lotus/pull/11158))
|
||||||
|
- feat: refactor: return randomness base to FVM without hashing ([filecoin-project/lotus#11167](https://github.com/filecoin-project/lotus/pull/11167))
|
||||||
|
- feat: Lotus Gateway: add allocation and claim related GET APIs to gateway ([filecoin-project/lotus#11183](https://github.com/filecoin-project/lotus/pull/11183))
|
||||||
|
- feat: shed: Add exec traces to `lotus-shed msg` ([filecoin-project/lotus#11188](https://github.com/filecoin-project/lotus/pull/11188))
|
||||||
|
- feat: miner: defensive check for equivocation ([filecoin-project/lotus#11328](https://github.com/filecoin-project/lotus/pull/11328))
|
||||||
|
|
||||||
|
## Improvements
|
||||||
|
- feat: daemon: improvemens to the consensus slasher ([filecoin-project/lotus#10979](https://github.com/filecoin-project/lotus/pull/10979))
|
||||||
|
- fix: Snapdeals unsealing fixes ([filecoin-project/lotus#11011](https://github.com/filecoin-project/lotus/pull/11011))
|
||||||
|
- refactor: Make all validation error actions explicit ([filecoin-project/lotus#11016](https://github.com/filecoin-project/lotus/pull/11016))
|
||||||
|
- feat: shed: command for decoding block headers ([filecoin-project/lotus#11031](https://github.com/filecoin-project/lotus/pull/11031))
|
||||||
|
- fix: stores: Tune down StorageDeclareSector` log-lvl ([filecoin-project/lotus#11045](https://github.com/filecoin-project/lotus/pull/11045))
|
||||||
|
- feat: types: apply a max length when decoding events ([filecoin-project/lotus#11054](https://github.com/filecoin-project/lotus/pull/11054))
|
||||||
|
- feat: slasher: improve UX ([filecoin-project/lotus#11060](https://github.com/filecoin-project/lotus/pull/11060))
|
||||||
|
- feat: daemon: improvemens to the consensus slasher ([filecoin-project/lotus#11063](https://github.com/filecoin-project/lotus/pull/11063))
|
||||||
|
- fix: events: Improve performance of event migration from V1 to V2 ([filecoin-project/lotus#11064](https://github.com/filecoin-project/lotus/pull/11064))
|
||||||
|
- feat:lotus-bench:AMT benchmarking ([filecoin-project/lotus#11075](https://github.com/filecoin-project/lotus/pull/11075))
|
||||||
|
- fix: DecodeRLP can panic ([filecoin-project/lotus#11079](https://github.com/filecoin-project/lotus/pull/11079))
|
||||||
|
- fix: daemon: set real beacon schedule when importing chain ([filecoin-project/lotus#11080](https://github.com/filecoin-project/lotus/pull/11080))
|
||||||
|
- fix: ethtypes: handle length overflow case ([filecoin-project/lotus#11082](https://github.com/filecoin-project/lotus/pull/11082))
|
||||||
|
- chore: stmgr: migrations: do not log noisily on cache misses ([filecoin-project/lotus#11083](https://github.com/filecoin-project/lotus/pull/11083))
|
||||||
|
- feat: daemon: import: only setup stmgr if validating chain (#11084) ([filecoin-project/lotus#11084](https://github.com/filecoin-project/lotus/pull/11084))
|
||||||
|
- fix: sealing pipeline: Fix PC1 retry loop ([filecoin-project/lotus#11087](https://github.com/filecoin-project/lotus/pull/11087))
|
||||||
|
- chore: legacy syscalls: Cleanup ComputeUnsealedSectorCID ([filecoin-project/lotus#11119](https://github.com/filecoin-project/lotus/pull/11119))
|
||||||
|
- sector import: fix evaluating randomness when importing a sector ([filecoin-project/lotus#11123](https://github.com/filecoin-project/lotus/pull/11123))
|
||||||
|
- fix: cli: Only display `warning` if behind sync ([filecoin-project/lotus#11140](https://github.com/filecoin-project/lotus/pull/11140))
|
||||||
|
- fix: worker: Support IPv6 formatted API-keys ([filecoin-project/lotus#11141](https://github.com/filecoin-project/lotus/pull/11141))
|
||||||
|
- fix: sealing: Switch to calling PreCommitSectorBatch2 ([filecoin-project/lotus#11142](https://github.com/filecoin-project/lotus/pull/11142))
|
||||||
|
- fix: downgrade harmless warning to debug ([filecoin-project/lotus#11145](https://github.com/filecoin-project/lotus/pull/11145))
|
||||||
|
- fix: sealing: Fix RetryCommitWait loop when sector cron activation fails ([filecoin-project/lotus#11046](https://github.com/filecoin-project/lotus/pull/11046))
|
||||||
|
- fix: gateway: return an error when an Eth filter is not found ([filecoin-project/lotus#11152](https://github.com/filecoin-project/lotus/pull/11152))
|
||||||
|
- fix: chainstore: do not get stuck in unhappy equivocation cases ([filecoin-project/lotus#11159](https://github.com/filecoin-project/lotus/pull/11159))
|
||||||
|
- fix: sealing: Run unsealing in the background for better ux ([filecoin-project/lotus#11177](https://github.com/filecoin-project/lotus/pull/11177))
|
||||||
|
- fix: build: Allow lotus-wallet to be built independently ([filecoin-project/lotus#11187](https://github.com/filecoin-project/lotus/pull/11187))
|
||||||
|
- fix: wallet: Make import handle SIGINT/SIGTERM ([filecoin-project/lotus#11190](https://github.com/filecoin-project/lotus/pull/11190))
|
||||||
|
- fix: markets/dagstore: remove trace goroutine for dagstore wrapper ([filecoin-project/lotus#11191](https://github.com/filecoin-project/lotus/pull/11191))
|
||||||
|
- fix: chain: Do not update message info cache until after message validation ([filecoin-project/lotus#11202](https://github.com/filecoin-project/lotus/pull/11202))
|
||||||
|
- fix: chain: cancel long operations upon ctx cancelation ([filecoin-project/lotus#11206](https://github.com/filecoin-project/lotus/pull/11206))
|
||||||
|
- fix(client): single-root error message ([filecoin-project/lotus#11214](https://github.com/filecoin-project/lotus/pull/11214))
|
||||||
|
- fix: worker: Convert `DC_[SectorSize]_[ResourceRestriction]` if set ([filecoin-project/lotus#11224](https://github.com/filecoin-project/lotus/pull/11224))
|
||||||
|
- chore: backport #11338 onto release/v1.25.0 ([filecoin-project/lotus#11350](https://github.com/filecoin-project/lotus/pull/11350))
|
||||||
|
|
||||||
|
## Dependencies
|
||||||
|
- deps: update go-libp2p to v0.28.1 ([filecoin-project/lotus#10998](https://github.com/filecoin-project/lotus/pull/10998))
|
||||||
|
- deps: update go-libp2p to v0.29.2 ([filecoin-project/lotus#11164](https://github.com/filecoin-project/lotus/pull/11164))
|
||||||
|
- deps: update go-libp2p to v0.30.0 ([filecoin-project/lotus#11189](https://github.com/filecoin-project/lotus/pull/11189))
|
||||||
|
- fix: build: use tagged releases ([filecoin-project/lotus#11194](https://github.com/filecoin-project/lotus/pull/11194))
|
||||||
|
- chore: test-vectors: update ([filecoin-project/lotus#11196](https://github.com/filecoin-project/lotus/pull/11196))
|
||||||
|
- chore: backport #11365 to release/v1.25.0 ([filecoin-project/lotus#11369](https://github.com/filecoin-project/lotus/pull/11369))
|
||||||
|
- chore: deps: update to go-state-types v0.12.8 ([filecoin-project/lotus#11339](https://github.com/filecoin-project/lotus/pull/11437))
|
||||||
|
- chore: deps: update to final actors ([filecoin-project/lotus#11330](https://github.com/filecoin-project/lotus/pull/11440))
|
||||||
|
- github.com/filecoin-project/go-amt-ipld/v4 (v4.0.0 -> v4.2.0)
|
||||||
|
- github.com/filecoin-project/test-vectors/schema (v0.0.5 -> v0.0.7)
|
||||||
|
|
||||||
|
## Others
|
||||||
|
- chore: Extract stable release and post release portion outside of RC testing in template ([filecoin-project/lotus#11000](https://github.com/filecoin-project/lotus/pull/11000))
|
||||||
|
- fix: docs: include FFI steps in lotus RELEASE_ISSUE_TEMPLATE ([filecoin-project/lotus#11047](https://github.com/filecoin-project/lotus/pull/11047))
|
||||||
|
- chore: build: update to v1.23.4-dev ([filecoin-project/lotus#11049](https://github.com/filecoin-project/lotus/pull/11049))
|
||||||
|
- fix: deflake: Use MockProofs ([filecoin-project/lotus#11059](https://github.com/filecoin-project/lotus/pull/11059))
|
||||||
|
- fix: failing test: Tweak TestWindowPostV1P1NV20 test condition ([filecoin-project/lotus#11121](https://github.com/filecoin-project/lotus/pull/11121))
|
||||||
|
- fix: CI: make test-unit-rest actually be the rest of the tests ([filecoin-project/lotus#11147](https://github.com/filecoin-project/lotus/pull/11147))
|
||||||
|
- chore: merge releases into master ([filecoin-project/lotus#11154](https://github.com/filecoin-project/lotus/pull/11154))
|
||||||
|
- tests: deflake: TestGetBlockByNumber ([filecoin-project/lotus#11155](https://github.com/filecoin-project/lotus/pull/11155))
|
||||||
|
- tests: mac seal test ([filecoin-project/lotus#11180](https://github.com/filecoin-project/lotus/pull/11180))
|
||||||
|
- tests: Take Download out of Sealer time ([filecoin-project/lotus#11182](https://github.com/filecoin-project/lotus/pull/11182))
|
||||||
|
- feat: test: Test that verified clients can directly transfer datacap, creating allocations ([filecoin-project/lotus#11169](https://github.com/filecoin-project/lotus/pull/11169))
|
||||||
|
- chore: merge feat/nv21 into master ([filecoin-project/lotus#11201](https://github.com/filecoin-project/lotus/pull/11201))
|
||||||
|
- ci: Use larger executor for cli tests ([filecoin-project/lotus#11212](https://github.com/filecoin-project/lotus/pull/11212))
|
||||||
|
- fix: dockerfile: Bump to Go 1.20.7 image ([filecoin-project/lotus#11221](https://github.com/filecoin-project/lotus/pull/11221))
|
||||||
|
- docs: Update PR template to callout remembering to update CHANGELOG ([filecoin-project/lotus#11232](https://github.com/filecoin-project/lotus/pull/11232))
|
||||||
|
- chore: release: 1.23.4rc1 prep ([filecoin-project/lotus#11248](https://github.com/filecoin-project/lotus/pull/11248))
|
||||||
|
- chore: backport #11262 (#11265) ([filecoin-project/lotus#11265](https://github.com/filecoin-project/lotus/pull/11265))
|
||||||
|
- chore: backport #11294 into `release/v1.23.4` ([filecoin-project/lotus#11295](https://github.com/filecoin-project/lotus/pull/11295))
|
||||||
|
- chore: release: V1.25 rebase ([filecoin-project/lotus#11342](https://github.com/filecoin-project/lotus/pull/11342))
|
||||||
|
- backport: tests: add SynthPorep layers to cachefiles ([filecoin-project/lotus#11344](https://github.com/filecoin-project/lotus/pull/11344))
|
||||||
|
- chore: backport #11408 to release/v1.25.0 ([filecoin-project/lotus#11414](https://github.com/filecoin-project/lotus/pull/11414))
|
||||||
|
- chore: backport calibnet lightweight patch ([filecoin-project/lotus#11422](https://github.com/filecoin-project/lotus/pull/11422))
|
||||||
|
- chore: update bootstrap nodes ([filecoin-project/lotus#11288](https://github.com/filecoin-project/lotus/pull/11288))
|
||||||
|
- chore: add bootstrap node on calibration ([filecoin-project/lotus#11175](https://github.com/filecoin-project/lotus/pull/11175))
|
||||||
|
|
||||||
# 1.24.0 / 2023-11-22
|
# 1.24.0 / 2023-11-22
|
||||||
|
|
||||||
This is the stable release for the upcoming **MANDATORY** Filecoin network upgrade v21, codenamed Watermelon 🍉, at **epoch 3469380 - 2023-12-12T13:30:00Z**.
|
This is the stable release for the upcoming **MANDATORY** Filecoin network upgrade v21, codenamed Watermelon 🍉, at **epoch 3469380 - 2023-12-12T13:30:00Z**.
|
||||||
@ -96,6 +219,7 @@ There is a new protocol limit on how many partition could be submited in one PoS
|
|||||||
The [Forest team](https://filecoinproject.slack.com/archives/C029LPZ5N73) at Chainsafe has launched a brand new lightweight snapshot service that is backed up by forest nodes! This is a great alternative service along with the fil-infra one, and it is compatible with lotus! We recommend lotus users to check it out [here](https://docs.filecoin.io/networks/mainnet#resources)!
|
The [Forest team](https://filecoinproject.slack.com/archives/C029LPZ5N73) at Chainsafe has launched a brand new lightweight snapshot service that is backed up by forest nodes! This is a great alternative service along with the fil-infra one, and it is compatible with lotus! We recommend lotus users to check it out [here](https://docs.filecoin.io/networks/mainnet#resources)!
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# v1.23.3 / 2023-08-01
|
# v1.23.3 / 2023-08-01
|
||||||
|
|
||||||
This feature release of Lotus includes numerous improvements and enhancements for node operators, ETH RPC-providers and storage providers.
|
This feature release of Lotus includes numerous improvements and enhancements for node operators, ETH RPC-providers and storage providers.
|
||||||
|
2
Makefile
2
Makefile
@ -193,7 +193,7 @@ lotus-health:
|
|||||||
.PHONY: lotus-health
|
.PHONY: lotus-health
|
||||||
BINS+=lotus-health
|
BINS+=lotus-health
|
||||||
|
|
||||||
lotus-wallet:
|
lotus-wallet: $(BUILD_DEPS)
|
||||||
rm -f lotus-wallet
|
rm -f lotus-wallet
|
||||||
$(GOCC) build $(GOFLAGS) -o lotus-wallet ./cmd/lotus-wallet
|
$(GOCC) build $(GOFLAGS) -o lotus-wallet ./cmd/lotus-wallet
|
||||||
.PHONY: lotus-wallet
|
.PHONY: lotus-wallet
|
||||||
|
@ -867,6 +867,13 @@ type FullNode interface {
|
|||||||
// Returns the client version
|
// Returns the client version
|
||||||
Web3ClientVersion(ctx context.Context) (string, error) //perm:read
|
Web3ClientVersion(ctx context.Context) (string, error) //perm:read
|
||||||
|
|
||||||
|
// TraceAPI related methods
|
||||||
|
//
|
||||||
|
// Returns traces created at given block
|
||||||
|
EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error) //perm:read
|
||||||
|
// Replays all transactions in a block returning the requested traces for each transaction
|
||||||
|
EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) //perm:read
|
||||||
|
|
||||||
// CreateBackup creates node backup onder the specified file name. The
|
// CreateBackup creates node backup onder the specified file name. The
|
||||||
// method requires that the lotus daemon is running with the
|
// method requires that the lotus daemon is running with the
|
||||||
// LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
|
// LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
|
||||||
|
@ -9,6 +9,7 @@ import (
|
|||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-jsonrpc"
|
"github.com/filecoin-project/go-jsonrpc"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
||||||
"github.com/filecoin-project/go-state-types/dline"
|
"github.com/filecoin-project/go-state-types/dline"
|
||||||
|
|
||||||
apitypes "github.com/filecoin-project/lotus/api/types"
|
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||||
@ -65,6 +66,11 @@ type Gateway interface {
|
|||||||
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (DealCollateralBounds, error)
|
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (DealCollateralBounds, error)
|
||||||
StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error)
|
StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error)
|
||||||
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
|
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
|
||||||
|
StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error)
|
||||||
|
StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error)
|
||||||
|
StateGetAllocations(ctx context.Context, clientAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error)
|
||||||
|
StateGetClaim(ctx context.Context, providerAddr address.Address, claimId verifregtypes.ClaimId, tsk types.TipSetKey) (*verifregtypes.Claim, error)
|
||||||
|
StateGetClaims(ctx context.Context, providerAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error)
|
||||||
StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*ActorState, error)
|
StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*ActorState, error)
|
||||||
StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error)
|
StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error)
|
||||||
StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
|
StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
|
||||||
@ -121,4 +127,6 @@ type Gateway interface {
|
|||||||
EthSubscribe(ctx context.Context, params jsonrpc.RawParams) (ethtypes.EthSubscriptionID, error)
|
EthSubscribe(ctx context.Context, params jsonrpc.RawParams) (ethtypes.EthSubscriptionID, error)
|
||||||
EthUnsubscribe(ctx context.Context, id ethtypes.EthSubscriptionID) (bool, error)
|
EthUnsubscribe(ctx context.Context, id ethtypes.EthSubscriptionID) (bool, error)
|
||||||
Web3ClientVersion(ctx context.Context) (string, error)
|
Web3ClientVersion(ctx context.Context) (string, error)
|
||||||
|
EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error)
|
||||||
|
EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error)
|
||||||
}
|
}
|
||||||
|
@ -40,6 +40,9 @@ func CreateEthRPCAliases(as apitypes.Aliaser) {
|
|||||||
as.AliasMethod("eth_subscribe", "Filecoin.EthSubscribe")
|
as.AliasMethod("eth_subscribe", "Filecoin.EthSubscribe")
|
||||||
as.AliasMethod("eth_unsubscribe", "Filecoin.EthUnsubscribe")
|
as.AliasMethod("eth_unsubscribe", "Filecoin.EthUnsubscribe")
|
||||||
|
|
||||||
|
as.AliasMethod("trace_block", "Filecoin.EthTraceBlock")
|
||||||
|
as.AliasMethod("trace_replayBlockTransactions", "Filecoin.EthTraceReplayBlockTransactions")
|
||||||
|
|
||||||
as.AliasMethod("net_version", "Filecoin.NetVersion")
|
as.AliasMethod("net_version", "Filecoin.NetVersion")
|
||||||
as.AliasMethod("net_listening", "Filecoin.NetListening")
|
as.AliasMethod("net_listening", "Filecoin.NetListening")
|
||||||
|
|
||||||
|
@ -1491,6 +1491,36 @@ func (mr *MockFullNodeMockRecorder) EthSyncing(arg0 interface{}) *gomock.Call {
|
|||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthSyncing", reflect.TypeOf((*MockFullNode)(nil).EthSyncing), arg0)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthSyncing", reflect.TypeOf((*MockFullNode)(nil).EthSyncing), arg0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EthTraceBlock mocks base method.
|
||||||
|
func (m *MockFullNode) EthTraceBlock(arg0 context.Context, arg1 string) ([]*ethtypes.EthTraceBlock, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "EthTraceBlock", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].([]*ethtypes.EthTraceBlock)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// EthTraceBlock indicates an expected call of EthTraceBlock.
|
||||||
|
func (mr *MockFullNodeMockRecorder) EthTraceBlock(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthTraceBlock", reflect.TypeOf((*MockFullNode)(nil).EthTraceBlock), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// EthTraceReplayBlockTransactions mocks base method.
|
||||||
|
func (m *MockFullNode) EthTraceReplayBlockTransactions(arg0 context.Context, arg1 string, arg2 []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "EthTraceReplayBlockTransactions", arg0, arg1, arg2)
|
||||||
|
ret0, _ := ret[0].([]*ethtypes.EthTraceReplayBlockTransaction)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// EthTraceReplayBlockTransactions indicates an expected call of EthTraceReplayBlockTransactions.
|
||||||
|
func (mr *MockFullNodeMockRecorder) EthTraceReplayBlockTransactions(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthTraceReplayBlockTransactions", reflect.TypeOf((*MockFullNode)(nil).EthTraceReplayBlockTransactions), arg0, arg1, arg2)
|
||||||
|
}
|
||||||
|
|
||||||
// EthUninstallFilter mocks base method.
|
// EthUninstallFilter mocks base method.
|
||||||
func (m *MockFullNode) EthUninstallFilter(arg0 context.Context, arg1 ethtypes.EthFilterID) (bool, error) {
|
func (m *MockFullNode) EthUninstallFilter(arg0 context.Context, arg1 ethtypes.EthFilterID) (bool, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
|
117
api/proxy_gen.go
117
api/proxy_gen.go
@ -315,6 +315,10 @@ type FullNodeMethods struct {
|
|||||||
|
|
||||||
EthSyncing func(p0 context.Context) (ethtypes.EthSyncingResult, error) `perm:"read"`
|
EthSyncing func(p0 context.Context) (ethtypes.EthSyncingResult, error) `perm:"read"`
|
||||||
|
|
||||||
|
EthTraceBlock func(p0 context.Context, p1 string) ([]*ethtypes.EthTraceBlock, error) `perm:"read"`
|
||||||
|
|
||||||
|
EthTraceReplayBlockTransactions func(p0 context.Context, p1 string, p2 []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) `perm:"read"`
|
||||||
|
|
||||||
EthUninstallFilter func(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) `perm:"read"`
|
EthUninstallFilter func(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) `perm:"read"`
|
||||||
|
|
||||||
EthUnsubscribe func(p0 context.Context, p1 ethtypes.EthSubscriptionID) (bool, error) `perm:"read"`
|
EthUnsubscribe func(p0 context.Context, p1 ethtypes.EthSubscriptionID) (bool, error) `perm:"read"`
|
||||||
@ -731,6 +735,10 @@ type GatewayMethods struct {
|
|||||||
|
|
||||||
EthSyncing func(p0 context.Context) (ethtypes.EthSyncingResult, error) ``
|
EthSyncing func(p0 context.Context) (ethtypes.EthSyncingResult, error) ``
|
||||||
|
|
||||||
|
EthTraceBlock func(p0 context.Context, p1 string) ([]*ethtypes.EthTraceBlock, error) ``
|
||||||
|
|
||||||
|
EthTraceReplayBlockTransactions func(p0 context.Context, p1 string, p2 []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) ``
|
||||||
|
|
||||||
EthUninstallFilter func(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) ``
|
EthUninstallFilter func(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) ``
|
||||||
|
|
||||||
EthUnsubscribe func(p0 context.Context, p1 ethtypes.EthSubscriptionID) (bool, error) ``
|
EthUnsubscribe func(p0 context.Context, p1 ethtypes.EthSubscriptionID) (bool, error) ``
|
||||||
@ -769,6 +777,16 @@ type GatewayMethods struct {
|
|||||||
|
|
||||||
StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) ``
|
StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) ``
|
||||||
|
|
||||||
|
StateGetAllocation func(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) ``
|
||||||
|
|
||||||
|
StateGetAllocationForPendingDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) ``
|
||||||
|
|
||||||
|
StateGetAllocations func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) ``
|
||||||
|
|
||||||
|
StateGetClaim func(p0 context.Context, p1 address.Address, p2 verifregtypes.ClaimId, p3 types.TipSetKey) (*verifregtypes.Claim, error) ``
|
||||||
|
|
||||||
|
StateGetClaims func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) ``
|
||||||
|
|
||||||
StateListMiners func(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) ``
|
StateListMiners func(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) ``
|
||||||
|
|
||||||
StateLookupID func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) ``
|
StateLookupID func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) ``
|
||||||
@ -2446,6 +2464,28 @@ func (s *FullNodeStub) EthSyncing(p0 context.Context) (ethtypes.EthSyncingResult
|
|||||||
return *new(ethtypes.EthSyncingResult), ErrNotSupported
|
return *new(ethtypes.EthSyncingResult), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStruct) EthTraceBlock(p0 context.Context, p1 string) ([]*ethtypes.EthTraceBlock, error) {
|
||||||
|
if s.Internal.EthTraceBlock == nil {
|
||||||
|
return *new([]*ethtypes.EthTraceBlock), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.EthTraceBlock(p0, p1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStub) EthTraceBlock(p0 context.Context, p1 string) ([]*ethtypes.EthTraceBlock, error) {
|
||||||
|
return *new([]*ethtypes.EthTraceBlock), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStruct) EthTraceReplayBlockTransactions(p0 context.Context, p1 string, p2 []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) {
|
||||||
|
if s.Internal.EthTraceReplayBlockTransactions == nil {
|
||||||
|
return *new([]*ethtypes.EthTraceReplayBlockTransaction), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.EthTraceReplayBlockTransactions(p0, p1, p2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStub) EthTraceReplayBlockTransactions(p0 context.Context, p1 string, p2 []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) {
|
||||||
|
return *new([]*ethtypes.EthTraceReplayBlockTransaction), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *FullNodeStruct) EthUninstallFilter(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) {
|
func (s *FullNodeStruct) EthUninstallFilter(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) {
|
||||||
if s.Internal.EthUninstallFilter == nil {
|
if s.Internal.EthUninstallFilter == nil {
|
||||||
return false, ErrNotSupported
|
return false, ErrNotSupported
|
||||||
@ -4668,6 +4708,28 @@ func (s *GatewayStub) EthSyncing(p0 context.Context) (ethtypes.EthSyncingResult,
|
|||||||
return *new(ethtypes.EthSyncingResult), ErrNotSupported
|
return *new(ethtypes.EthSyncingResult), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStruct) EthTraceBlock(p0 context.Context, p1 string) ([]*ethtypes.EthTraceBlock, error) {
|
||||||
|
if s.Internal.EthTraceBlock == nil {
|
||||||
|
return *new([]*ethtypes.EthTraceBlock), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.EthTraceBlock(p0, p1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStub) EthTraceBlock(p0 context.Context, p1 string) ([]*ethtypes.EthTraceBlock, error) {
|
||||||
|
return *new([]*ethtypes.EthTraceBlock), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStruct) EthTraceReplayBlockTransactions(p0 context.Context, p1 string, p2 []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) {
|
||||||
|
if s.Internal.EthTraceReplayBlockTransactions == nil {
|
||||||
|
return *new([]*ethtypes.EthTraceReplayBlockTransaction), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.EthTraceReplayBlockTransactions(p0, p1, p2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStub) EthTraceReplayBlockTransactions(p0 context.Context, p1 string, p2 []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) {
|
||||||
|
return *new([]*ethtypes.EthTraceReplayBlockTransaction), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *GatewayStruct) EthUninstallFilter(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) {
|
func (s *GatewayStruct) EthUninstallFilter(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) {
|
||||||
if s.Internal.EthUninstallFilter == nil {
|
if s.Internal.EthUninstallFilter == nil {
|
||||||
return false, ErrNotSupported
|
return false, ErrNotSupported
|
||||||
@ -4877,6 +4939,61 @@ func (s *GatewayStub) StateGetActor(p0 context.Context, p1 address.Address, p2 t
|
|||||||
return nil, ErrNotSupported
|
return nil, ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStruct) StateGetAllocation(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) {
|
||||||
|
if s.Internal.StateGetAllocation == nil {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.StateGetAllocation(p0, p1, p2, p3)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStub) StateGetAllocation(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStruct) StateGetAllocationForPendingDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) {
|
||||||
|
if s.Internal.StateGetAllocationForPendingDeal == nil {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.StateGetAllocationForPendingDeal(p0, p1, p2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStub) StateGetAllocationForPendingDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStruct) StateGetAllocations(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) {
|
||||||
|
if s.Internal.StateGetAllocations == nil {
|
||||||
|
return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.StateGetAllocations(p0, p1, p2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStub) StateGetAllocations(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) {
|
||||||
|
return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStruct) StateGetClaim(p0 context.Context, p1 address.Address, p2 verifregtypes.ClaimId, p3 types.TipSetKey) (*verifregtypes.Claim, error) {
|
||||||
|
if s.Internal.StateGetClaim == nil {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.StateGetClaim(p0, p1, p2, p3)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStub) StateGetClaim(p0 context.Context, p1 address.Address, p2 verifregtypes.ClaimId, p3 types.TipSetKey) (*verifregtypes.Claim, error) {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStruct) StateGetClaims(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) {
|
||||||
|
if s.Internal.StateGetClaims == nil {
|
||||||
|
return *new(map[verifregtypes.ClaimId]verifregtypes.Claim), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.StateGetClaims(p0, p1, p2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStub) StateGetClaims(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) {
|
||||||
|
return *new(map[verifregtypes.ClaimId]verifregtypes.Claim), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *GatewayStruct) StateListMiners(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) {
|
func (s *GatewayStruct) StateListMiners(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) {
|
||||||
if s.Internal.StateListMiners == nil {
|
if s.Internal.StateListMiners == nil {
|
||||||
return *new([]address.Address), ErrNotSupported
|
return *new([]address.Address), ErrNotSupported
|
||||||
|
@ -312,6 +312,7 @@ type NetworkParams struct {
|
|||||||
SupportedProofTypes []abi.RegisteredSealProof
|
SupportedProofTypes []abi.RegisteredSealProof
|
||||||
PreCommitChallengeDelay abi.ChainEpoch
|
PreCommitChallengeDelay abi.ChainEpoch
|
||||||
ForkUpgradeParams ForkUpgradeParams
|
ForkUpgradeParams ForkUpgradeParams
|
||||||
|
Eip155ChainID int
|
||||||
}
|
}
|
||||||
|
|
||||||
type ForkUpgradeParams struct {
|
type ForkUpgradeParams struct {
|
||||||
|
@ -8,6 +8,7 @@ import (
|
|||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
||||||
"github.com/filecoin-project/go-state-types/dline"
|
"github.com/filecoin-project/go-state-types/dline"
|
||||||
abinetwork "github.com/filecoin-project/go-state-types/network"
|
abinetwork "github.com/filecoin-project/go-state-types/network"
|
||||||
|
|
||||||
@ -61,6 +62,11 @@ type Gateway interface {
|
|||||||
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error)
|
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error)
|
||||||
StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error)
|
StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error)
|
||||||
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
|
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
|
||||||
|
StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error)
|
||||||
|
StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error)
|
||||||
|
StateGetAllocations(ctx context.Context, clientAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error)
|
||||||
|
StateGetClaim(ctx context.Context, providerAddr address.Address, claimId verifregtypes.ClaimId, tsk types.TipSetKey) (*verifregtypes.Claim, error)
|
||||||
|
StateGetClaims(ctx context.Context, providerAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error)
|
||||||
StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error)
|
StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error)
|
||||||
StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error)
|
StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error)
|
||||||
StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
|
StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
|
||||||
|
@ -478,6 +478,16 @@ type GatewayMethods struct {
|
|||||||
|
|
||||||
StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) ``
|
StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) ``
|
||||||
|
|
||||||
|
StateGetAllocation func(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) ``
|
||||||
|
|
||||||
|
StateGetAllocationForPendingDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) ``
|
||||||
|
|
||||||
|
StateGetAllocations func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) ``
|
||||||
|
|
||||||
|
StateGetClaim func(p0 context.Context, p1 address.Address, p2 verifregtypes.ClaimId, p3 types.TipSetKey) (*verifregtypes.Claim, error) ``
|
||||||
|
|
||||||
|
StateGetClaims func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) ``
|
||||||
|
|
||||||
StateGetReceipt func(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) ``
|
StateGetReceipt func(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) ``
|
||||||
|
|
||||||
StateListMiners func(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) ``
|
StateListMiners func(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) ``
|
||||||
@ -2850,6 +2860,61 @@ func (s *GatewayStub) StateGetActor(p0 context.Context, p1 address.Address, p2 t
|
|||||||
return nil, ErrNotSupported
|
return nil, ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStruct) StateGetAllocation(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) {
|
||||||
|
if s.Internal.StateGetAllocation == nil {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.StateGetAllocation(p0, p1, p2, p3)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStub) StateGetAllocation(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStruct) StateGetAllocationForPendingDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) {
|
||||||
|
if s.Internal.StateGetAllocationForPendingDeal == nil {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.StateGetAllocationForPendingDeal(p0, p1, p2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStub) StateGetAllocationForPendingDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStruct) StateGetAllocations(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) {
|
||||||
|
if s.Internal.StateGetAllocations == nil {
|
||||||
|
return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.StateGetAllocations(p0, p1, p2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStub) StateGetAllocations(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) {
|
||||||
|
return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStruct) StateGetClaim(p0 context.Context, p1 address.Address, p2 verifregtypes.ClaimId, p3 types.TipSetKey) (*verifregtypes.Claim, error) {
|
||||||
|
if s.Internal.StateGetClaim == nil {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.StateGetClaim(p0, p1, p2, p3)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStub) StateGetClaim(p0 context.Context, p1 address.Address, p2 verifregtypes.ClaimId, p3 types.TipSetKey) (*verifregtypes.Claim, error) {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStruct) StateGetClaims(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) {
|
||||||
|
if s.Internal.StateGetClaims == nil {
|
||||||
|
return *new(map[verifregtypes.ClaimId]verifregtypes.Claim), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.StateGetClaims(p0, p1, p2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStub) StateGetClaims(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) {
|
||||||
|
return *new(map[verifregtypes.ClaimId]verifregtypes.Claim), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *GatewayStruct) StateGetReceipt(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) {
|
func (s *GatewayStruct) StateGetReceipt(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) {
|
||||||
if s.Internal.StateGetReceipt == nil {
|
if s.Internal.StateGetReceipt == nil {
|
||||||
return nil, ErrNotSupported
|
return nil, ErrNotSupported
|
||||||
|
@ -138,6 +138,8 @@ const BlockDelaySecs = uint64(4)
|
|||||||
|
|
||||||
const PropagationDelaySecs = uint64(1)
|
const PropagationDelaySecs = uint64(1)
|
||||||
|
|
||||||
|
var EquivocationDelaySecs = uint64(0)
|
||||||
|
|
||||||
// SlashablePowerDelay is the number of epochs after ElectionPeriodStart, after
|
// SlashablePowerDelay is the number of epochs after ElectionPeriodStart, after
|
||||||
// which the miner is slashed
|
// which the miner is slashed
|
||||||
//
|
//
|
||||||
|
@ -89,6 +89,8 @@ const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
|
|||||||
|
|
||||||
const PropagationDelaySecs = uint64(6)
|
const PropagationDelaySecs = uint64(6)
|
||||||
|
|
||||||
|
var EquivocationDelaySecs = uint64(2)
|
||||||
|
|
||||||
// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
|
// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
|
||||||
const BootstrapPeerThreshold = 2
|
const BootstrapPeerThreshold = 2
|
||||||
|
|
||||||
|
@ -129,6 +129,8 @@ const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
|
|||||||
|
|
||||||
var PropagationDelaySecs = uint64(10)
|
var PropagationDelaySecs = uint64(10)
|
||||||
|
|
||||||
|
var EquivocationDelaySecs = uint64(2)
|
||||||
|
|
||||||
// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
|
// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
|
||||||
const BootstrapPeerThreshold = 4
|
const BootstrapPeerThreshold = 4
|
||||||
|
|
||||||
|
@ -127,6 +127,8 @@ const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
|
|||||||
|
|
||||||
const PropagationDelaySecs = uint64(6)
|
const PropagationDelaySecs = uint64(6)
|
||||||
|
|
||||||
|
var EquivocationDelaySecs = uint64(2)
|
||||||
|
|
||||||
// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
|
// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
|
||||||
const BootstrapPeerThreshold = 2
|
const BootstrapPeerThreshold = 2
|
||||||
|
|
||||||
|
@ -112,6 +112,8 @@ var ConsensusMinerMinPower = abi.NewStoragePower(10 << 40)
|
|||||||
var PreCommitChallengeDelay = abi.ChainEpoch(150)
|
var PreCommitChallengeDelay = abi.ChainEpoch(150)
|
||||||
var PropagationDelaySecs = uint64(10)
|
var PropagationDelaySecs = uint64(10)
|
||||||
|
|
||||||
|
var EquivocationDelaySecs = uint64(2)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" {
|
if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" {
|
||||||
SetAddressNetwork(address.Mainnet)
|
SetAddressNetwork(address.Mainnet)
|
||||||
|
@ -9,7 +9,6 @@ package build
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"math/big"
|
"math/big"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
|
|
||||||
@ -34,6 +33,7 @@ var (
|
|||||||
MinimumBaseFee = int64(100)
|
MinimumBaseFee = int64(100)
|
||||||
BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
|
BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
|
||||||
PropagationDelaySecs = uint64(6)
|
PropagationDelaySecs = uint64(6)
|
||||||
|
EquivocationDelaySecs = uint64(2)
|
||||||
SupportedProofTypes = []abi.RegisteredSealProof{
|
SupportedProofTypes = []abi.RegisteredSealProof{
|
||||||
abi.RegisteredSealProof_StackedDrg32GiBV1,
|
abi.RegisteredSealProof_StackedDrg32GiBV1,
|
||||||
abi.RegisteredSealProof_StackedDrg64GiBV1,
|
abi.RegisteredSealProof_StackedDrg64GiBV1,
|
||||||
@ -141,7 +141,3 @@ const BootstrapPeerThreshold = 1
|
|||||||
// ChainId defines the chain ID used in the Ethereum JSON-RPC endpoint.
|
// ChainId defines the chain ID used in the Ethereum JSON-RPC endpoint.
|
||||||
// As per https://github.com/ethereum-lists/chains
|
// As per https://github.com/ethereum-lists/chains
|
||||||
const Eip155ChainId = 31415926
|
const Eip155ChainId = 31415926
|
||||||
|
|
||||||
// Reducing the delivery delay for equivocation of
|
|
||||||
// consistent broadcast to just half a second.
|
|
||||||
var CBDeliveryDelay = 500 * time.Millisecond
|
|
||||||
|
@ -37,7 +37,7 @@ func BuildTypeString() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// BuildVersion is the local build version
|
// BuildVersion is the local build version
|
||||||
const BuildVersion = "1.24.0"
|
const BuildVersion = "1.25.0"
|
||||||
|
|
||||||
func UserVersion() string {
|
func UserVersion() string {
|
||||||
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {
|
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {
|
||||||
|
@ -235,3 +235,16 @@ func (db *DrandBeacon) maxBeaconRoundV2(latestTs uint64) uint64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var _ beacon.RandomBeacon = (*DrandBeacon)(nil)
|
var _ beacon.RandomBeacon = (*DrandBeacon)(nil)
|
||||||
|
|
||||||
|
func BeaconScheduleFromDrandSchedule(dcs dtypes.DrandSchedule, genesisTime uint64, ps *pubsub.PubSub) (beacon.Schedule, error) {
|
||||||
|
shd := beacon.Schedule{}
|
||||||
|
for _, dc := range dcs {
|
||||||
|
bc, err := NewDrandBeacon(genesisTime, build.BlockDelaySecs, ps, dc.Config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("creating drand beacon: %w", err)
|
||||||
|
}
|
||||||
|
shd = append(shd, beacon.BeaconPoint{Start: dc.Start, Beacon: bc})
|
||||||
|
}
|
||||||
|
|
||||||
|
return shd, nil
|
||||||
|
}
|
||||||
|
@ -362,7 +362,7 @@ func (cg *ChainGen) nextBlockProof(ctx context.Context, pts *types.TipSet, m add
|
|||||||
rbase = entries[len(entries)-1]
|
rbase = entries[len(entries)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
eproof, err := IsRoundWinner(ctx, pts, round, m, rbase, mbi, mc)
|
eproof, err := IsRoundWinner(ctx, round, m, rbase, mbi, mc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, xerrors.Errorf("checking round winner failed: %w", err)
|
return nil, nil, nil, xerrors.Errorf("checking round winner failed: %w", err)
|
||||||
}
|
}
|
||||||
@ -449,18 +449,19 @@ func (cg *ChainGen) NextTipSetFromMiners(base *types.TipSet, miners []address.Ad
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (cg *ChainGen) NextTipSetFromMinersWithMessagesAndNulls(base *types.TipSet, miners []address.Address, msgs [][]*types.SignedMessage, nulls abi.ChainEpoch) (*store.FullTipSet, error) {
|
func (cg *ChainGen) NextTipSetFromMinersWithMessagesAndNulls(base *types.TipSet, miners []address.Address, msgs [][]*types.SignedMessage, nulls abi.ChainEpoch) (*store.FullTipSet, error) {
|
||||||
|
ctx := context.TODO()
|
||||||
var blks []*types.FullBlock
|
var blks []*types.FullBlock
|
||||||
|
|
||||||
for round := base.Height() + nulls + 1; len(blks) == 0; round++ {
|
for round := base.Height() + nulls + 1; len(blks) == 0; round++ {
|
||||||
for mi, m := range miners {
|
for mi, m := range miners {
|
||||||
bvals, et, ticket, err := cg.nextBlockProof(context.TODO(), base, m, round)
|
bvals, et, ticket, err := cg.nextBlockProof(ctx, base, m, round)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("next block proof: %w", err)
|
return nil, xerrors.Errorf("next block proof: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if et != nil {
|
if et != nil {
|
||||||
// TODO: maybe think about passing in more real parameters to this?
|
// TODO: maybe think about passing in more real parameters to this?
|
||||||
wpost, err := cg.eppProvs[m].ComputeProof(context.TODO(), nil, nil, round, network.Version0)
|
wpost, err := cg.eppProvs[m].ComputeProof(ctx, nil, nil, round, network.Version0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -476,8 +477,18 @@ func (cg *ChainGen) NextTipSetFromMinersWithMessagesAndNulls(base *types.TipSet,
|
|||||||
}
|
}
|
||||||
|
|
||||||
fts := store.NewFullTipSet(blks)
|
fts := store.NewFullTipSet(blks)
|
||||||
if err := cg.cs.PutTipSet(context.TODO(), fts.TipSet()); err != nil {
|
if err := cg.cs.PersistTipsets(ctx, []*types.TipSet{fts.TipSet()}); err != nil {
|
||||||
return nil, err
|
return nil, xerrors.Errorf("failed to persist tipset: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, blk := range blks {
|
||||||
|
if err := cg.cs.AddToTipSetTracker(ctx, blk.Header); err != nil {
|
||||||
|
return nil, xerrors.Errorf("failed to add to tipset tracker: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cg.cs.RefreshHeaviestTipSet(ctx, fts.TipSet().Height()); err != nil {
|
||||||
|
return nil, xerrors.Errorf("failed to put tipset: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cg.CurTipset = fts
|
cg.CurTipset = fts
|
||||||
@ -628,7 +639,7 @@ func (wpp *wppProvider) ComputeProof(context.Context, []proof7.ExtendedSectorInf
|
|||||||
return ValidWpostForTesting, nil
|
return ValidWpostForTesting, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsRoundWinner(ctx context.Context, ts *types.TipSet, round abi.ChainEpoch,
|
func IsRoundWinner(ctx context.Context, round abi.ChainEpoch,
|
||||||
miner address.Address, brand types.BeaconEntry, mbi *api.MiningBaseInfo, a MiningCheckAPI) (*types.ElectionProof, error) {
|
miner address.Address, brand types.BeaconEntry, mbi *api.MiningBaseInfo, a MiningCheckAPI) (*types.ElectionProof, error) {
|
||||||
|
|
||||||
buf := new(bytes.Buffer)
|
buf := new(bytes.Buffer)
|
||||||
|
@ -63,6 +63,9 @@ var MaxNonceGap = uint64(4)
|
|||||||
|
|
||||||
const MaxMessageSize = 64 << 10 // 64KiB
|
const MaxMessageSize = 64 << 10 // 64KiB
|
||||||
|
|
||||||
|
// NOTE: When adding a new error type, please make sure to add the new error type in
|
||||||
|
// func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub.Message)
|
||||||
|
// in /chain/sub/incoming.go
|
||||||
var (
|
var (
|
||||||
ErrMessageTooBig = errors.New("message too big")
|
ErrMessageTooBig = errors.New("message too big")
|
||||||
|
|
||||||
|
@ -388,6 +388,14 @@ func (sm *StateManager) GetCirculatingSupply(ctx context.Context, height abi.Cha
|
|||||||
circ := big.Zero()
|
circ := big.Zero()
|
||||||
unCirc := big.Zero()
|
unCirc := big.Zero()
|
||||||
err := st.ForEach(func(a address.Address, actor *types.Actor) error {
|
err := st.ForEach(func(a address.Address, actor *types.Actor) error {
|
||||||
|
// this can be a lengthy operation, we need to cancel early when
|
||||||
|
// the context is cancelled to avoid resource exhaustion
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
// this will cause ForEach to return
|
||||||
|
return ctx.Err()
|
||||||
|
default:
|
||||||
|
}
|
||||||
switch {
|
switch {
|
||||||
case actor.Balance.IsZero():
|
case actor.Balance.IsZero():
|
||||||
// Do nothing for zero-balance actors
|
// Do nothing for zero-balance actors
|
||||||
|
@ -70,7 +70,7 @@ func TestChainCheckpoint(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// See if the chain will take the fork, it shouldn't.
|
// See if the chain will take the fork, it shouldn't.
|
||||||
err = cs.MaybeTakeHeavierTipSet(context.Background(), last)
|
err = cs.RefreshHeaviestTipSet(context.Background(), last.Height())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
head = cs.GetHeaviestTipSet()
|
head = cs.GetHeaviestTipSet()
|
||||||
require.True(t, head.Equals(checkpoint))
|
require.True(t, head.Equals(checkpoint))
|
||||||
@ -80,7 +80,7 @@ func TestChainCheckpoint(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Now switch to the other fork.
|
// Now switch to the other fork.
|
||||||
err = cs.MaybeTakeHeavierTipSet(context.Background(), last)
|
err = cs.RefreshHeaviestTipSet(context.Background(), last.Height())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
head = cs.GetHeaviestTipSet()
|
head = cs.GetHeaviestTipSet()
|
||||||
require.True(t, head.Equals(last))
|
require.True(t, head.Equals(last))
|
||||||
|
@ -16,6 +16,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||||
"github.com/filecoin-project/lotus/chain/gen"
|
"github.com/filecoin-project/lotus/chain/gen"
|
||||||
"github.com/filecoin-project/lotus/chain/store"
|
"github.com/filecoin-project/lotus/chain/store"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/chain/types/mock"
|
"github.com/filecoin-project/lotus/chain/types/mock"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -47,28 +48,29 @@ func TestIndexSeeks(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
cur := mock.TipSet(gen)
|
cur := mock.TipSet(gen)
|
||||||
if err := cs.PutTipSet(ctx, mock.TipSet(gen)); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
assert.NoError(t, cs.SetGenesis(ctx, gen))
|
assert.NoError(t, cs.SetGenesis(ctx, gen))
|
||||||
|
|
||||||
// Put 113 blocks from genesis
|
// Put 113 blocks from genesis
|
||||||
for i := 0; i < 113; i++ {
|
for i := 0; i < 113; i++ {
|
||||||
nextts := mock.TipSet(mock.MkBlock(cur, 1, 1))
|
nextBlk := mock.MkBlock(cur, 1, 1)
|
||||||
|
nextts := mock.TipSet(nextBlk)
|
||||||
if err := cs.PutTipSet(ctx, nextts); err != nil {
|
assert.NoError(t, cs.PersistTipsets(ctx, []*types.TipSet{nextts}))
|
||||||
t.Fatal(err)
|
assert.NoError(t, cs.AddToTipSetTracker(ctx, nextBlk))
|
||||||
}
|
|
||||||
cur = nextts
|
cur = nextts
|
||||||
}
|
}
|
||||||
|
|
||||||
|
assert.NoError(t, cs.RefreshHeaviestTipSet(ctx, cur.Height()))
|
||||||
|
|
||||||
// Put 50 null epochs + 1 block
|
// Put 50 null epochs + 1 block
|
||||||
skip := mock.MkBlock(cur, 1, 1)
|
skip := mock.MkBlock(cur, 1, 1)
|
||||||
skip.Height += 50
|
skip.Height += 50
|
||||||
|
|
||||||
skipts := mock.TipSet(skip)
|
skipts := mock.TipSet(skip)
|
||||||
|
|
||||||
if err := cs.PutTipSet(ctx, skipts); err != nil {
|
assert.NoError(t, cs.PersistTipsets(ctx, []*types.TipSet{skipts}))
|
||||||
|
assert.NoError(t, cs.AddToTipSetTracker(ctx, skip))
|
||||||
|
|
||||||
|
if err := cs.RefreshHeaviestTipSet(ctx, skip.Height); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -15,7 +15,7 @@ import (
|
|||||||
"github.com/ipld/go-car"
|
"github.com/ipld/go-car"
|
||||||
carutil "github.com/ipld/go-car/util"
|
carutil "github.com/ipld/go-car/util"
|
||||||
carv2 "github.com/ipld/go-car/v2"
|
carv2 "github.com/ipld/go-car/v2"
|
||||||
mh "github.com/multiformats/go-multihash"
|
"github.com/multiformats/go-multicodec"
|
||||||
cbg "github.com/whyrusleeping/cbor-gen"
|
cbg "github.com/whyrusleeping/cbor-gen"
|
||||||
"go.uber.org/atomic"
|
"go.uber.org/atomic"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
@ -369,14 +369,16 @@ func (s *walkScheduler) Wait() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *walkScheduler) enqueueIfNew(task walkTask) {
|
func (s *walkScheduler) enqueueIfNew(task walkTask) {
|
||||||
if task.c.Prefix().MhType == mh.IDENTITY {
|
if multicodec.Code(task.c.Prefix().MhType) == multicodec.Identity {
|
||||||
//log.Infow("ignored", "cid", todo.c.String())
|
//log.Infow("ignored", "cid", todo.c.String())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// This lets through RAW and CBOR blocks, the only two types that we
|
// This lets through RAW, CBOR, and DagCBOR blocks, the only types that we end up writing to
|
||||||
// end up writing to the exported CAR.
|
// the exported CAR.
|
||||||
if task.c.Prefix().Codec != cid.Raw && task.c.Prefix().Codec != cid.DagCBOR {
|
switch multicodec.Code(task.c.Prefix().Codec) {
|
||||||
|
case multicodec.Cbor, multicodec.DagCbor, multicodec.Raw:
|
||||||
|
default:
|
||||||
//log.Infow("ignored", "cid", todo.c.String())
|
//log.Infow("ignored", "cid", todo.c.String())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -450,7 +452,8 @@ func (s *walkScheduler) processTask(t walkTask, workerN int) error {
|
|||||||
// We exported the ipld block. If it wasn't a CBOR block, there's nothing
|
// We exported the ipld block. If it wasn't a CBOR block, there's nothing
|
||||||
// else to do and we can bail out early as it won't have any links
|
// else to do and we can bail out early as it won't have any links
|
||||||
// etc.
|
// etc.
|
||||||
if t.c.Prefix().Codec != cid.DagCBOR || t.c.Prefix().MhType == mh.IDENTITY {
|
if multicodec.Code(t.c.Prefix().Codec) != multicodec.DagCbor ||
|
||||||
|
multicodec.Code(t.c.Prefix().MhType) == multicodec.Identity {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -683,14 +686,13 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe
|
|||||||
prefix := c.Prefix()
|
prefix := c.Prefix()
|
||||||
|
|
||||||
// Don't include identity CIDs.
|
// Don't include identity CIDs.
|
||||||
if prefix.MhType == mh.IDENTITY {
|
if multicodec.Code(prefix.MhType) == multicodec.Identity {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// We only include raw and dagcbor, for now.
|
// We only include raw, cbor, and dagcbor, for now.
|
||||||
// Raw for "code" CIDs.
|
switch multicodec.Code(prefix.Codec) {
|
||||||
switch prefix.Codec {
|
case multicodec.Cbor, multicodec.DagCbor, multicodec.Raw:
|
||||||
case cid.Raw, cid.DagCBOR:
|
|
||||||
default:
|
default:
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -722,7 +724,7 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe
|
|||||||
}
|
}
|
||||||
|
|
||||||
func recurseLinks(ctx context.Context, bs bstore.Blockstore, walked *cid.Set, root cid.Cid, in []cid.Cid) ([]cid.Cid, error) {
|
func recurseLinks(ctx context.Context, bs bstore.Blockstore, walked *cid.Set, root cid.Cid, in []cid.Cid) ([]cid.Cid, error) {
|
||||||
if root.Prefix().Codec != cid.DagCBOR {
|
if multicodec.Code(root.Prefix().Codec) != multicodec.DagCbor {
|
||||||
return in, nil
|
return in, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -367,49 +367,32 @@ func (cs *ChainStore) UnmarkBlockAsValidated(ctx context.Context, blkid cid.Cid)
|
|||||||
func (cs *ChainStore) SetGenesis(ctx context.Context, b *types.BlockHeader) error {
|
func (cs *ChainStore) SetGenesis(ctx context.Context, b *types.BlockHeader) error {
|
||||||
ts, err := types.NewTipSet([]*types.BlockHeader{b})
|
ts, err := types.NewTipSet([]*types.BlockHeader{b})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return xerrors.Errorf("failed to construct genesis tipset: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := cs.PutTipSet(ctx, ts); err != nil {
|
if err := cs.PersistTipsets(ctx, []*types.TipSet{ts}); err != nil {
|
||||||
return err
|
return xerrors.Errorf("failed to persist genesis tipset: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cs.AddToTipSetTracker(ctx, b); err != nil {
|
||||||
|
return xerrors.Errorf("failed to add genesis tipset to tracker: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cs.RefreshHeaviestTipSet(ctx, ts.Height()); err != nil {
|
||||||
|
return xerrors.Errorf("failed to put genesis tipset: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return cs.metadataDs.Put(ctx, dstore.NewKey("0"), b.Cid().Bytes())
|
return cs.metadataDs.Put(ctx, dstore.NewKey("0"), b.Cid().Bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *ChainStore) PutTipSet(ctx context.Context, ts *types.TipSet) error {
|
// RefreshHeaviestTipSet receives a newTsHeight at which a new tipset might exist. It then:
|
||||||
if err := cs.PersistTipsets(ctx, []*types.TipSet{ts}); err != nil {
|
// - "refreshes" the heaviest tipset that can be formed at its current heaviest height
|
||||||
return xerrors.Errorf("failed to persist tipset: %w", err)
|
// - if equivocation is detected among the miners of the current heaviest tipset, the head is immediately updated to the heaviest tipset that can be formed in a range of 5 epochs
|
||||||
}
|
//
|
||||||
|
// - forms the best tipset that can be formed at the _input_ height
|
||||||
expanded, err := cs.expandTipset(ctx, ts.Blocks()[0])
|
// - compares the three tipset weights: "current" heaviest tipset, "refreshed" tipset, and best tipset at newTsHeight
|
||||||
if err != nil {
|
// - updates "current" heaviest to the heaviest of those 3 tipsets (if an update is needed), assuming it doesn't violate the maximum fork rule
|
||||||
return xerrors.Errorf("errored while expanding tipset: %w", err)
|
func (cs *ChainStore) RefreshHeaviestTipSet(ctx context.Context, newTsHeight abi.ChainEpoch) error {
|
||||||
}
|
|
||||||
|
|
||||||
if expanded.Key() != ts.Key() {
|
|
||||||
log.Debugf("expanded %s into %s\n", ts.Cids(), expanded.Cids())
|
|
||||||
|
|
||||||
tsBlk, err := expanded.Key().ToStorageBlock()
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("failed to get tipset key block: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = cs.chainLocalBlockstore.Put(ctx, tsBlk); err != nil {
|
|
||||||
return xerrors.Errorf("failed to put tipset key block: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := cs.MaybeTakeHeavierTipSet(ctx, expanded); err != nil {
|
|
||||||
return xerrors.Errorf("MaybeTakeHeavierTipSet failed in PutTipSet: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MaybeTakeHeavierTipSet evaluates the incoming tipset and locks it in our
|
|
||||||
// internal state as our new head, if and only if it is heavier than the current
|
|
||||||
// head and does not exceed the maximum fork length.
|
|
||||||
func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipSet) error {
|
|
||||||
for {
|
for {
|
||||||
cs.heaviestLk.Lock()
|
cs.heaviestLk.Lock()
|
||||||
if len(cs.reorgCh) < reorgChBuf/2 {
|
if len(cs.reorgCh) < reorgChBuf/2 {
|
||||||
@ -426,39 +409,90 @@ func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipS
|
|||||||
|
|
||||||
defer cs.heaviestLk.Unlock()
|
defer cs.heaviestLk.Unlock()
|
||||||
|
|
||||||
if ts.Equals(cs.heaviest) {
|
heaviestWeight, err := cs.weight(ctx, cs.StateBlockstore(), cs.heaviest)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to calculate currentHeaviest's weight: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
heaviestHeight := abi.ChainEpoch(0)
|
||||||
|
if cs.heaviest != nil {
|
||||||
|
heaviestHeight = cs.heaviest.Height()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Before we look at newTs, let's refresh best tipset at current head's height -- this is done to detect equivocation
|
||||||
|
newHeaviest, newHeaviestWeight, err := cs.FormHeaviestTipSetForHeight(ctx, heaviestHeight)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to reform head at same height: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equivocation has occurred! We need a new head NOW!
|
||||||
|
if newHeaviest == nil || newHeaviestWeight.LessThan(heaviestWeight) {
|
||||||
|
log.Warnf("chainstore heaviest tipset's weight SHRANK from %d (%s) to %d (%s) due to equivocation", heaviestWeight, cs.heaviest, newHeaviestWeight, newHeaviest)
|
||||||
|
// Unfortunately, we don't know what the right height to form a new heaviest tipset is.
|
||||||
|
// It is _probably_, but not _necessarily_, heaviestHeight.
|
||||||
|
// So, we need to explore a range of epochs, finding the heaviest tipset in that range.
|
||||||
|
// We thus try to form the heaviest tipset for 5 epochs above heaviestHeight (most of which will likely not exist),
|
||||||
|
// as well as for 5 below.
|
||||||
|
// This is slow, but we expect to almost-never be here (only if miners are equivocating, which carries a hefty penalty).
|
||||||
|
for i := heaviestHeight + 5; i > heaviestHeight-5; i-- {
|
||||||
|
possibleHeaviestTs, possibleHeaviestWeight, err := cs.FormHeaviestTipSetForHeight(ctx, i)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to produce head at height %d: %w", i, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if possibleHeaviestWeight.GreaterThan(newHeaviestWeight) {
|
||||||
|
newHeaviestWeight = possibleHeaviestWeight
|
||||||
|
newHeaviest = possibleHeaviestTs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// if we've found something, we know it's the heaviest equivocation-free head, take it IMMEDIATELY
|
||||||
|
if newHeaviest != nil {
|
||||||
|
errTake := cs.takeHeaviestTipSet(ctx, newHeaviest)
|
||||||
|
if errTake != nil {
|
||||||
|
return xerrors.Errorf("failed to take newHeaviest tipset as head: %w", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// if we haven't found something, just stay with our equivocation-y head
|
||||||
|
newHeaviest = cs.heaviest
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// if the new height we were notified about isn't what we just refreshed at, see if we have a heavier tipset there
|
||||||
|
if newTsHeight != newHeaviest.Height() {
|
||||||
|
bestTs, bestTsWeight, err := cs.FormHeaviestTipSetForHeight(ctx, newTsHeight)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to form new heaviest tipset at height %d: %w", newTsHeight, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
heavier := bestTsWeight.GreaterThan(newHeaviestWeight)
|
||||||
|
if bestTsWeight.Equals(newHeaviestWeight) {
|
||||||
|
heavier = breakWeightTie(bestTs, newHeaviest)
|
||||||
|
}
|
||||||
|
|
||||||
|
if heavier {
|
||||||
|
newHeaviest = bestTs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Everything's the same as before, exit early
|
||||||
|
if newHeaviest.Equals(cs.heaviest) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
w, err := cs.weight(ctx, cs.StateBlockstore(), ts)
|
// At this point, it MUST be true that newHeaviest is heavier than cs.heaviest -- update if fork allows
|
||||||
|
exceeds, err := cs.exceedsForkLength(ctx, cs.heaviest, newHeaviest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return xerrors.Errorf("failed to check fork length: %w", err)
|
||||||
}
|
}
|
||||||
heaviestW, err := cs.weight(ctx, cs.StateBlockstore(), cs.heaviest)
|
|
||||||
|
if exceeds {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err = cs.takeHeaviestTipSet(ctx, newHeaviest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return xerrors.Errorf("failed to take heaviest tipset: %w", err)
|
||||||
}
|
|
||||||
|
|
||||||
heavier := w.GreaterThan(heaviestW)
|
|
||||||
if w.Equals(heaviestW) && !ts.Equals(cs.heaviest) {
|
|
||||||
log.Errorw("weight draw", "currTs", cs.heaviest, "ts", ts)
|
|
||||||
heavier = breakWeightTie(ts, cs.heaviest)
|
|
||||||
}
|
|
||||||
|
|
||||||
if heavier {
|
|
||||||
// TODO: don't do this for initial sync. Now that we don't have a
|
|
||||||
// difference between 'bootstrap sync' and 'caught up' sync, we need
|
|
||||||
// some other heuristic.
|
|
||||||
|
|
||||||
exceeds, err := cs.exceedsForkLength(ctx, cs.heaviest, ts)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if exceeds {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return cs.takeHeaviestTipSet(ctx, ts)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -655,6 +689,16 @@ func (cs *ChainStore) takeHeaviestTipSet(ctx context.Context, ts *types.TipSet)
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// write the tipsetkey block to the blockstore for EthAPI queries
|
||||||
|
tsBlk, err := ts.Key().ToStorageBlock()
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to get tipset key block: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = cs.chainLocalBlockstore.Put(ctx, tsBlk); err != nil {
|
||||||
|
return xerrors.Errorf("failed to put tipset key block: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
if prevHeaviest != nil { // buf
|
if prevHeaviest != nil { // buf
|
||||||
if len(cs.reorgCh) > 0 {
|
if len(cs.reorgCh) > 0 {
|
||||||
log.Warnf("Reorg channel running behind, %d reorgs buffered", len(cs.reorgCh))
|
log.Warnf("Reorg channel running behind, %d reorgs buffered", len(cs.reorgCh))
|
||||||
@ -904,6 +948,14 @@ func ReorgOps(ctx context.Context, lts func(ctx context.Context, _ types.TipSetK
|
|||||||
|
|
||||||
var leftChain, rightChain []*types.TipSet
|
var leftChain, rightChain []*types.TipSet
|
||||||
for !left.Equals(right) {
|
for !left.Equals(right) {
|
||||||
|
// this can take a long time and lot of memory if the tipsets are far apart
|
||||||
|
// since it can be reached through remote calls, we need to
|
||||||
|
// cancel early when possible to prevent resource exhaustion.
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return nil, nil, ctx.Err()
|
||||||
|
default:
|
||||||
|
}
|
||||||
if left.Height() > right.Height() {
|
if left.Height() > right.Height() {
|
||||||
leftChain = append(leftChain, left)
|
leftChain = append(leftChain, left)
|
||||||
par, err := lts(ctx, left.Parents())
|
par, err := lts(ctx, left.Parents())
|
||||||
@ -960,7 +1012,7 @@ func (cs *ChainStore) AddToTipSetTracker(ctx context.Context, b *types.BlockHead
|
|||||||
// This means that we ideally want to keep only most recent 900 epochs in here
|
// This means that we ideally want to keep only most recent 900 epochs in here
|
||||||
// Golang's map iteration starts at a random point in a map.
|
// Golang's map iteration starts at a random point in a map.
|
||||||
// With 5 tries per epoch, and 900 entries to keep, on average we will have
|
// With 5 tries per epoch, and 900 entries to keep, on average we will have
|
||||||
// ~136 garbage entires in the `cs.tipsets` map. (solve for 1-(1-x/(900+x))^5 == 0.5)
|
// ~136 garbage entries in the `cs.tipsets` map. (solve for 1-(1-x/(900+x))^5 == 0.5)
|
||||||
// Seems good enough to me
|
// Seems good enough to me
|
||||||
|
|
||||||
for height := range cs.tipsets {
|
for height := range cs.tipsets {
|
||||||
@ -975,6 +1027,7 @@ func (cs *ChainStore) AddToTipSetTracker(ctx context.Context, b *types.BlockHead
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PersistTipsets writes the provided blocks and the TipSetKey objects to the blockstore
|
||||||
func (cs *ChainStore) PersistTipsets(ctx context.Context, tipsets []*types.TipSet) error {
|
func (cs *ChainStore) PersistTipsets(ctx context.Context, tipsets []*types.TipSet) error {
|
||||||
toPersist := make([]*types.BlockHeader, 0, len(tipsets)*int(build.BlocksPerEpoch))
|
toPersist := make([]*types.BlockHeader, 0, len(tipsets)*int(build.BlocksPerEpoch))
|
||||||
tsBlks := make([]block.Block, 0, len(tipsets))
|
tsBlks := make([]block.Block, 0, len(tipsets))
|
||||||
@ -1027,44 +1080,72 @@ func (cs *ChainStore) persistBlockHeaders(ctx context.Context, b ...*types.Block
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *ChainStore) expandTipset(ctx context.Context, b *types.BlockHeader) (*types.TipSet, error) {
|
// FormHeaviestTipSetForHeight looks up all valid blocks at a given height, and returns the heaviest tipset that can be made at that height
|
||||||
// Hold lock for the whole function for now, if it becomes a problem we can
|
// It does not consider ANY blocks from miners that have "equivocated" (produced 2 blocks at the same height)
|
||||||
// fix pretty easily
|
func (cs *ChainStore) FormHeaviestTipSetForHeight(ctx context.Context, height abi.ChainEpoch) (*types.TipSet, types.BigInt, error) {
|
||||||
cs.tstLk.Lock()
|
cs.tstLk.Lock()
|
||||||
defer cs.tstLk.Unlock()
|
defer cs.tstLk.Unlock()
|
||||||
|
|
||||||
all := []*types.BlockHeader{b}
|
blockCids, ok := cs.tipsets[height]
|
||||||
|
|
||||||
tsets, ok := cs.tipsets[b.Height]
|
|
||||||
if !ok {
|
if !ok {
|
||||||
return types.NewTipSet(all)
|
return nil, types.NewInt(0), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
inclMiners := map[address.Address]cid.Cid{b.Miner: b.Cid()}
|
// First, identify "bad" miners for the height
|
||||||
for _, bhc := range tsets {
|
|
||||||
if bhc == b.Cid() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
|
seenMiners := map[address.Address]struct{}{}
|
||||||
|
badMiners := map[address.Address]struct{}{}
|
||||||
|
blocks := make([]*types.BlockHeader, 0, len(blockCids))
|
||||||
|
for _, bhc := range blockCids {
|
||||||
h, err := cs.GetBlock(ctx, bhc)
|
h, err := cs.GetBlock(ctx, bhc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("failed to load block (%s) for tipset expansion: %w", bhc, err)
|
return nil, types.NewInt(0), xerrors.Errorf("failed to load block (%s) for tipset expansion: %w", bhc, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if cid, found := inclMiners[h.Miner]; found {
|
if _, seen := seenMiners[h.Miner]; seen {
|
||||||
log.Warnf("Have multiple blocks from miner %s at height %d in our tipset cache %s-%s", h.Miner, h.Height, h.Cid(), cid)
|
badMiners[h.Miner] = struct{}{}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
seenMiners[h.Miner] = struct{}{}
|
||||||
|
blocks = append(blocks, h)
|
||||||
|
}
|
||||||
|
|
||||||
if types.CidArrsEqual(h.Parents, b.Parents) {
|
// Next, group by parent tipset
|
||||||
all = append(all, h)
|
|
||||||
inclMiners[h.Miner] = bhc
|
formableTipsets := make(map[types.TipSetKey][]*types.BlockHeader, 0)
|
||||||
|
for _, h := range blocks {
|
||||||
|
if _, bad := badMiners[h.Miner]; bad {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ptsk := types.NewTipSetKey(h.Parents...)
|
||||||
|
formableTipsets[ptsk] = append(formableTipsets[ptsk], h)
|
||||||
|
}
|
||||||
|
|
||||||
|
maxWeight := types.NewInt(0)
|
||||||
|
var maxTs *types.TipSet
|
||||||
|
for _, headers := range formableTipsets {
|
||||||
|
ts, err := types.NewTipSet(headers)
|
||||||
|
if err != nil {
|
||||||
|
return nil, types.NewInt(0), xerrors.Errorf("unexpected error forming tipset: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
weight, err := cs.Weight(ctx, ts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, types.NewInt(0), xerrors.Errorf("failed to calculate weight: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
heavier := weight.GreaterThan(maxWeight)
|
||||||
|
if weight.Equals(maxWeight) {
|
||||||
|
heavier = breakWeightTie(ts, maxTs)
|
||||||
|
}
|
||||||
|
|
||||||
|
if heavier {
|
||||||
|
maxWeight = weight
|
||||||
|
maxTs = ts
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: other validation...?
|
return maxTs, maxWeight, nil
|
||||||
|
|
||||||
return types.NewTipSet(all)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *ChainStore) GetGenesis(ctx context.Context) (*types.BlockHeader, error) {
|
func (cs *ChainStore) GetGenesis(ctx context.Context) (*types.BlockHeader, error) {
|
||||||
|
@ -10,6 +10,7 @@ import (
|
|||||||
"github.com/ipfs/go-datastore"
|
"github.com/ipfs/go-datastore"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-state-types/crypto"
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
|
|
||||||
@ -238,3 +239,171 @@ func TestChainExportImportFull(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestEquivocations(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
cg, err := gen.NewGenerator()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var last *types.TipSet
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
ts, err := cg.NextTipSet()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
last = ts.TipSet.TipSet()
|
||||||
|
}
|
||||||
|
|
||||||
|
mTs, err := cg.NextTipSetFromMiners(last, []address.Address{last.Blocks()[0].Miner}, 0)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 1, len(mTs.TipSet.TipSet().Cids()))
|
||||||
|
last = mTs.TipSet.TipSet()
|
||||||
|
|
||||||
|
require.NotEmpty(t, last.Blocks())
|
||||||
|
blk1 := *last.Blocks()[0]
|
||||||
|
|
||||||
|
// quick check: asking to form tipset at latest height just returns head
|
||||||
|
bestHead, bestHeadWeight, err := cg.ChainStore().FormHeaviestTipSetForHeight(ctx, last.Height())
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, last.Key(), bestHead.Key())
|
||||||
|
require.Contains(t, last.Cids(), blk1.Cid())
|
||||||
|
expectedWeight, err := cg.ChainStore().Weight(ctx, bestHead)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, expectedWeight, bestHeadWeight)
|
||||||
|
|
||||||
|
// add another block by a different miner -- it should get included in the best tipset
|
||||||
|
blk2 := blk1
|
||||||
|
blk1Miner, err := address.IDFromAddress(blk2.Miner)
|
||||||
|
require.NoError(t, err)
|
||||||
|
blk2.Miner, err = address.NewIDAddress(blk1Miner + 50)
|
||||||
|
require.NoError(t, err)
|
||||||
|
addBlockToTracker(t, cg.ChainStore(), &blk2)
|
||||||
|
|
||||||
|
bestHead, bestHeadWeight, err = cg.ChainStore().FormHeaviestTipSetForHeight(ctx, last.Height())
|
||||||
|
require.NoError(t, err)
|
||||||
|
for _, blkCid := range last.Cids() {
|
||||||
|
require.Contains(t, bestHead.Cids(), blkCid)
|
||||||
|
}
|
||||||
|
require.Contains(t, bestHead.Cids(), blk2.Cid())
|
||||||
|
expectedWeight, err = cg.ChainStore().Weight(ctx, bestHead)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, expectedWeight, bestHeadWeight)
|
||||||
|
|
||||||
|
// add another block by a different miner, but on a different tipset -- it should NOT get included
|
||||||
|
blk3 := blk1
|
||||||
|
blk3.Miner, err = address.NewIDAddress(blk1Miner + 100)
|
||||||
|
require.NoError(t, err)
|
||||||
|
blk1Parent, err := cg.ChainStore().GetBlock(ctx, blk3.Parents[0])
|
||||||
|
require.NoError(t, err)
|
||||||
|
blk3.Parents = blk1Parent.Parents
|
||||||
|
addBlockToTracker(t, cg.ChainStore(), &blk3)
|
||||||
|
|
||||||
|
bestHead, bestHeadWeight, err = cg.ChainStore().FormHeaviestTipSetForHeight(ctx, last.Height())
|
||||||
|
require.NoError(t, err)
|
||||||
|
for _, blkCid := range last.Cids() {
|
||||||
|
require.Contains(t, bestHead.Cids(), blkCid)
|
||||||
|
}
|
||||||
|
require.Contains(t, bestHead.Cids(), blk2.Cid())
|
||||||
|
require.NotContains(t, bestHead.Cids(), blk3.Cid())
|
||||||
|
expectedWeight, err = cg.ChainStore().Weight(ctx, bestHead)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, expectedWeight, bestHeadWeight)
|
||||||
|
|
||||||
|
// add another block by the same miner as blk1 -- it should NOT get included, and blk1 should be excluded too
|
||||||
|
blk4 := blk1
|
||||||
|
blk4.Timestamp = blk1.Timestamp + 1
|
||||||
|
addBlockToTracker(t, cg.ChainStore(), &blk4)
|
||||||
|
|
||||||
|
bestHead, bestHeadWeight, err = cg.ChainStore().FormHeaviestTipSetForHeight(ctx, last.Height())
|
||||||
|
require.NoError(t, err)
|
||||||
|
for _, blkCid := range last.Cids() {
|
||||||
|
if blkCid != blk1.Cid() {
|
||||||
|
require.Contains(t, bestHead.Cids(), blkCid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
require.NotContains(t, bestHead.Cids(), blk4.Cid())
|
||||||
|
require.NotContains(t, bestHead.Cids(), blk1.Cid())
|
||||||
|
expectedWeight, err = cg.ChainStore().Weight(ctx, bestHead)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, expectedWeight, bestHeadWeight)
|
||||||
|
|
||||||
|
// check that after all of that, the chainstore's head has NOT changed
|
||||||
|
require.Equal(t, last.Key(), cg.ChainStore().GetHeaviestTipSet().Key())
|
||||||
|
|
||||||
|
// NOW, after all that, notify the chainstore to refresh its head
|
||||||
|
require.NoError(t, cg.ChainStore().RefreshHeaviestTipSet(ctx, blk1.Height+1))
|
||||||
|
|
||||||
|
originalHead := *last
|
||||||
|
newHead := cg.ChainStore().GetHeaviestTipSet()
|
||||||
|
// the newHead should be at the same height as the originalHead
|
||||||
|
require.Equal(t, originalHead.Height(), newHead.Height())
|
||||||
|
// the newHead should NOT be the same as the originalHead
|
||||||
|
require.NotEqual(t, originalHead.Key(), newHead.Key())
|
||||||
|
// specifically, it should not contain any blocks by blk1Miner
|
||||||
|
for _, b := range newHead.Blocks() {
|
||||||
|
require.NotEqual(t, blk1.Miner, b.Miner)
|
||||||
|
}
|
||||||
|
|
||||||
|
// now have blk2's Miner equivocate too! this causes us to switch to a tipset with a different parent!
|
||||||
|
blk5 := blk2
|
||||||
|
blk5.Timestamp = blk5.Timestamp + 1
|
||||||
|
addBlockToTracker(t, cg.ChainStore(), &blk5)
|
||||||
|
|
||||||
|
// notify the chainstore to refresh its head
|
||||||
|
require.NoError(t, cg.ChainStore().RefreshHeaviestTipSet(ctx, blk1.Height+1))
|
||||||
|
newHead = cg.ChainStore().GetHeaviestTipSet()
|
||||||
|
// the newHead should still be at the same height as the originalHead
|
||||||
|
require.Equal(t, originalHead.Height(), newHead.Height())
|
||||||
|
// BUT it should no longer have the same parents -- only blk3's miner is good, and they mined on a different tipset
|
||||||
|
require.Equal(t, 1, len(newHead.Blocks()))
|
||||||
|
require.Equal(t, blk3.Cid(), newHead.Cids()[0])
|
||||||
|
require.NotEqual(t, originalHead.Parents(), newHead.Parents())
|
||||||
|
|
||||||
|
// now have blk3's Miner equivocate too! this causes us to switch to a previous epoch entirely :(
|
||||||
|
blk6 := blk3
|
||||||
|
blk6.Timestamp = blk6.Timestamp + 1
|
||||||
|
addBlockToTracker(t, cg.ChainStore(), &blk6)
|
||||||
|
|
||||||
|
// trying to form a tipset at our previous height leads to emptiness
|
||||||
|
tryTs, tryTsWeight, err := cg.ChainStore().FormHeaviestTipSetForHeight(ctx, blk1.Height)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Nil(t, tryTs)
|
||||||
|
require.True(t, tryTsWeight.IsZero())
|
||||||
|
|
||||||
|
// notify the chainstore to refresh its head
|
||||||
|
require.NoError(t, cg.ChainStore().RefreshHeaviestTipSet(ctx, blk1.Height+1))
|
||||||
|
newHead = cg.ChainStore().GetHeaviestTipSet()
|
||||||
|
// the newHead should now be one epoch behind originalHead
|
||||||
|
require.Greater(t, originalHead.Height(), newHead.Height())
|
||||||
|
|
||||||
|
// next, we create a new tipset with only one block after many null rounds
|
||||||
|
headAfterNulls, err := cg.NextTipSetFromMiners(newHead, []address.Address{newHead.Blocks()[0].Miner}, 15)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 1, len(headAfterNulls.TipSet.Blocks))
|
||||||
|
|
||||||
|
// now, we disqualify the block in this tipset because of equivocation
|
||||||
|
blkAfterNulls := headAfterNulls.TipSet.TipSet().Blocks()[0]
|
||||||
|
equivocatedBlkAfterNulls := *blkAfterNulls
|
||||||
|
equivocatedBlkAfterNulls.Timestamp = blkAfterNulls.Timestamp + 1
|
||||||
|
addBlockToTracker(t, cg.ChainStore(), &equivocatedBlkAfterNulls)
|
||||||
|
|
||||||
|
// try to form a tipset at this height -- it should be empty
|
||||||
|
tryTs2, tryTsWeight2, err := cg.ChainStore().FormHeaviestTipSetForHeight(ctx, blkAfterNulls.Height)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Nil(t, tryTs2)
|
||||||
|
require.True(t, tryTsWeight2.IsZero())
|
||||||
|
|
||||||
|
// now we "notify" at this height -- it should lead to no head change because there's no formable head in near epochs
|
||||||
|
require.NoError(t, cg.ChainStore().RefreshHeaviestTipSet(ctx, blkAfterNulls.Height))
|
||||||
|
require.True(t, headAfterNulls.TipSet.TipSet().Equals(cg.ChainStore().GetHeaviestTipSet()))
|
||||||
|
}
|
||||||
|
|
||||||
|
func addBlockToTracker(t *testing.T, cs *store.ChainStore, blk *types.BlockHeader) {
|
||||||
|
blk2Ts, err := types.NewTipSet([]*types.BlockHeader{blk})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, cs.PersistTipsets(context.TODO(), []*types.TipSet{blk2Ts}))
|
||||||
|
require.NoError(t, cs.AddToTipSetTracker(context.TODO(), blk))
|
||||||
|
}
|
||||||
|
@ -350,6 +350,7 @@ func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubs
|
|||||||
)
|
)
|
||||||
recordFailure(ctx, metrics.MessageValidationFailure, "add")
|
recordFailure(ctx, metrics.MessageValidationFailure, "add")
|
||||||
switch {
|
switch {
|
||||||
|
|
||||||
case xerrors.Is(err, messagepool.ErrSoftValidationFailure):
|
case xerrors.Is(err, messagepool.ErrSoftValidationFailure):
|
||||||
fallthrough
|
fallthrough
|
||||||
case xerrors.Is(err, messagepool.ErrRBFTooLowPremium):
|
case xerrors.Is(err, messagepool.ErrRBFTooLowPremium):
|
||||||
@ -362,8 +363,17 @@ func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubs
|
|||||||
fallthrough
|
fallthrough
|
||||||
case xerrors.Is(err, messagepool.ErrNonceTooLow):
|
case xerrors.Is(err, messagepool.ErrNonceTooLow):
|
||||||
fallthrough
|
fallthrough
|
||||||
|
case xerrors.Is(err, messagepool.ErrNotEnoughFunds):
|
||||||
|
fallthrough
|
||||||
case xerrors.Is(err, messagepool.ErrExistingNonce):
|
case xerrors.Is(err, messagepool.ErrExistingNonce):
|
||||||
return pubsub.ValidationIgnore
|
return pubsub.ValidationIgnore
|
||||||
|
|
||||||
|
case xerrors.Is(err, messagepool.ErrMessageTooBig):
|
||||||
|
fallthrough
|
||||||
|
case xerrors.Is(err, messagepool.ErrMessageValueTooHigh):
|
||||||
|
fallthrough
|
||||||
|
case xerrors.Is(err, messagepool.ErrInvalidToAddr):
|
||||||
|
fallthrough
|
||||||
default:
|
default:
|
||||||
return pubsub.ValidationReject
|
return pubsub.ValidationReject
|
||||||
}
|
}
|
||||||
@ -519,9 +529,8 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg
|
|||||||
|
|
||||||
msgCid := idxrMsg.Cid
|
msgCid := idxrMsg.Cid
|
||||||
|
|
||||||
var msgInfo *peerMsgInfo
|
msgInfo, cached := v.peerCache.Get(minerAddr)
|
||||||
msgInfo, ok := v.peerCache.Get(minerAddr)
|
if !cached {
|
||||||
if !ok {
|
|
||||||
msgInfo = &peerMsgInfo{}
|
msgInfo = &peerMsgInfo{}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -529,17 +538,17 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg
|
|||||||
msgInfo.mutex.Lock()
|
msgInfo.mutex.Lock()
|
||||||
defer msgInfo.mutex.Unlock()
|
defer msgInfo.mutex.Unlock()
|
||||||
|
|
||||||
if ok {
|
var seqno uint64
|
||||||
|
if cached {
|
||||||
// Reject replayed messages.
|
// Reject replayed messages.
|
||||||
seqno := binary.BigEndian.Uint64(msg.Message.GetSeqno())
|
seqno = binary.BigEndian.Uint64(msg.Message.GetSeqno())
|
||||||
if seqno <= msgInfo.lastSeqno {
|
if seqno <= msgInfo.lastSeqno {
|
||||||
log.Debugf("ignoring replayed indexer message")
|
log.Debugf("ignoring replayed indexer message")
|
||||||
return pubsub.ValidationIgnore
|
return pubsub.ValidationIgnore
|
||||||
}
|
}
|
||||||
msgInfo.lastSeqno = seqno
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if !ok || originPeer != msgInfo.peerID {
|
if !cached || originPeer != msgInfo.peerID {
|
||||||
// Check that the miner ID maps to the peer that sent the message.
|
// Check that the miner ID maps to the peer that sent the message.
|
||||||
err = v.authenticateMessage(ctx, minerAddr, originPeer)
|
err = v.authenticateMessage(ctx, minerAddr, originPeer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -548,7 +557,7 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg
|
|||||||
return pubsub.ValidationReject
|
return pubsub.ValidationReject
|
||||||
}
|
}
|
||||||
msgInfo.peerID = originPeer
|
msgInfo.peerID = originPeer
|
||||||
if !ok {
|
if !cached {
|
||||||
// Add msgInfo to cache only after being authenticated. If two
|
// Add msgInfo to cache only after being authenticated. If two
|
||||||
// messages from the same peer are handled concurrently, there is a
|
// messages from the same peer are handled concurrently, there is a
|
||||||
// small chance that one msgInfo could replace the other here when
|
// small chance that one msgInfo could replace the other here when
|
||||||
@ -557,6 +566,9 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Update message info cache with the latest message's sequence number.
|
||||||
|
msgInfo.lastSeqno = seqno
|
||||||
|
|
||||||
// See if message needs to be ignored due to rate limiting.
|
// See if message needs to be ignored due to rate limiting.
|
||||||
if v.rateLimitPeer(msgInfo, msgCid) {
|
if v.rateLimitPeer(msgInfo, msgCid) {
|
||||||
return pubsub.ValidationIgnore
|
return pubsub.ValidationIgnore
|
||||||
|
@ -12,10 +12,12 @@ import (
|
|||||||
"github.com/ipni/go-libipni/announce/message"
|
"github.com/ipni/go-libipni/announce/message"
|
||||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||||
pb "github.com/libp2p/go-libp2p-pubsub/pb"
|
pb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||||
|
"github.com/libp2p/go-libp2p/core/crypto"
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/api/mocks"
|
"github.com/filecoin-project/lotus/api/mocks"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
)
|
)
|
||||||
@ -134,3 +136,123 @@ func TestIndexerMessageValidator_Validate(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestIdxValidator(t *testing.T) {
|
||||||
|
validCid, err := cid.Decode("QmbpDgg5kRLDgMxS8vPKNFXEcA6D5MC4CkuUdSWDVtHPGK")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
addr, err := address.NewFromString("f01024")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf1, err := addr.MarshalBinary()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
selfPID := "12D3KooWQiCbqEStCkdqUvr69gQsrp9urYJZUCkzsQXia7mbqbFW"
|
||||||
|
senderPID := "12D3KooWE8yt84RVwW3sFcd6WMjbUdWrZer2YtT4dmtj3dHdahSZ"
|
||||||
|
extraData := buf1
|
||||||
|
|
||||||
|
mc := gomock.NewController(t)
|
||||||
|
node := mocks.NewMockFullNode(mc)
|
||||||
|
node.EXPECT().ChainHead(gomock.Any()).Return(nil, nil).AnyTimes()
|
||||||
|
|
||||||
|
subject := NewIndexerMessageValidator(peer.ID(selfPID), node, node)
|
||||||
|
message := message.Message{
|
||||||
|
Cid: validCid,
|
||||||
|
Addrs: nil,
|
||||||
|
ExtraData: extraData,
|
||||||
|
}
|
||||||
|
buf := bytes.NewBuffer(nil)
|
||||||
|
if err := message.MarshalCBOR(buf); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
topic := "topic"
|
||||||
|
|
||||||
|
privk, _, err := crypto.GenerateKeyPair(crypto.RSA, 2048)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
id, err := peer.IDFromPublicKey(privk.GetPublic())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
node.EXPECT().StateMinerInfo(gomock.Any(), gomock.Any(), gomock.Any()).Return(api.MinerInfo{PeerId: &id}, nil).AnyTimes()
|
||||||
|
|
||||||
|
pbm := &pb.Message{
|
||||||
|
Data: buf.Bytes(),
|
||||||
|
Topic: &topic,
|
||||||
|
From: []byte(id),
|
||||||
|
Seqno: []byte{1, 1, 1, 1, 2, 2, 2, 2},
|
||||||
|
}
|
||||||
|
validate := subject.Validate(context.Background(), peer.ID(senderPID), &pubsub.Message{
|
||||||
|
Message: pbm,
|
||||||
|
ReceivedFrom: peer.ID("f01024"), // peer.ID(senderPID),
|
||||||
|
ValidatorData: nil,
|
||||||
|
})
|
||||||
|
if validate != pubsub.ValidationAccept {
|
||||||
|
t.Error("Expected to receive ValidationAccept")
|
||||||
|
}
|
||||||
|
msgInfo, cached := subject.peerCache.Get(addr)
|
||||||
|
if !cached {
|
||||||
|
t.Fatal("Message info should be in cache")
|
||||||
|
}
|
||||||
|
seqno := msgInfo.lastSeqno
|
||||||
|
msgInfo.rateLimit = nil // prevent interference from rate limiting
|
||||||
|
|
||||||
|
t.Log("Sending DoS msg")
|
||||||
|
privk, _, err = crypto.GenerateKeyPair(crypto.RSA, 2048)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
id2, err := peer.IDFromPublicKey(privk.GetPublic())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
pbm = &pb.Message{
|
||||||
|
Data: buf.Bytes(),
|
||||||
|
Topic: &topic,
|
||||||
|
From: []byte(id2),
|
||||||
|
Seqno: []byte{255, 255, 255, 255, 255, 255, 255, 255},
|
||||||
|
}
|
||||||
|
validate = subject.Validate(context.Background(), peer.ID(senderPID), &pubsub.Message{
|
||||||
|
Message: pbm,
|
||||||
|
ReceivedFrom: peer.ID(senderPID),
|
||||||
|
ValidatorData: nil,
|
||||||
|
})
|
||||||
|
if validate != pubsub.ValidationReject {
|
||||||
|
t.Error("Expected to get ValidationReject")
|
||||||
|
}
|
||||||
|
msgInfo, cached = subject.peerCache.Get(addr)
|
||||||
|
if !cached {
|
||||||
|
t.Fatal("Message info should be in cache")
|
||||||
|
}
|
||||||
|
msgInfo.rateLimit = nil // prevent interference from rate limiting
|
||||||
|
|
||||||
|
// Check if DoS is possible.
|
||||||
|
if msgInfo.lastSeqno != seqno {
|
||||||
|
t.Fatal("Sequence number should not have been updated")
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Log("Sending another valid message from miner...")
|
||||||
|
pbm = &pb.Message{
|
||||||
|
Data: buf.Bytes(),
|
||||||
|
Topic: &topic,
|
||||||
|
From: []byte(id),
|
||||||
|
Seqno: []byte{1, 1, 1, 1, 2, 2, 2, 3},
|
||||||
|
}
|
||||||
|
validate = subject.Validate(context.Background(), peer.ID(senderPID), &pubsub.Message{
|
||||||
|
Message: pbm,
|
||||||
|
ReceivedFrom: peer.ID("f01024"), // peer.ID(senderPID),
|
||||||
|
ValidatorData: nil,
|
||||||
|
})
|
||||||
|
if validate != pubsub.ValidationAccept {
|
||||||
|
t.Fatal("Did not receive ValidationAccept")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -536,7 +536,7 @@ func (syncer *Syncer) Sync(ctx context.Context, maybeHead *types.TipSet) error {
|
|||||||
|
|
||||||
// At this point we have accepted and synced to the new `maybeHead`
|
// At this point we have accepted and synced to the new `maybeHead`
|
||||||
// (`StageSyncComplete`).
|
// (`StageSyncComplete`).
|
||||||
if err := syncer.store.PutTipSet(ctx, maybeHead); err != nil {
|
if err := syncer.store.RefreshHeaviestTipSet(ctx, maybeHead.Height()); err != nil {
|
||||||
span.AddAttributes(trace.StringAttribute("put_error", err.Error()))
|
span.AddAttributes(trace.StringAttribute("put_error", err.Error()))
|
||||||
span.SetStatus(trace.Status{
|
span.SetStatus(trace.Status{
|
||||||
Code: 13,
|
Code: 13,
|
||||||
|
@ -92,6 +92,7 @@ type syncManager struct {
|
|||||||
var _ SyncManager = (*syncManager)(nil)
|
var _ SyncManager = (*syncManager)(nil)
|
||||||
|
|
||||||
type peerHead struct {
|
type peerHead struct {
|
||||||
|
// Note: this doesn't _necessarily_ mean that p's head is ts, just that ts is a tipset that p sent to us
|
||||||
p peer.ID
|
p peer.ID
|
||||||
ts *types.TipSet
|
ts *types.TipSet
|
||||||
}
|
}
|
||||||
|
@ -311,7 +311,7 @@ func (tu *syncTestUtil) addSourceNode(gen int) {
|
|||||||
for _, lastB := range lastTs.Blocks {
|
for _, lastB := range lastTs.Blocks {
|
||||||
require.NoError(tu.t, cs.AddToTipSetTracker(context.Background(), lastB.Header))
|
require.NoError(tu.t, cs.AddToTipSetTracker(context.Background(), lastB.Header))
|
||||||
}
|
}
|
||||||
err = cs.PutTipSet(tu.ctx, lastTs.TipSet())
|
err = cs.RefreshHeaviestTipSet(tu.ctx, lastTs.TipSet().Height())
|
||||||
require.NoError(tu.t, err)
|
require.NoError(tu.t, err)
|
||||||
|
|
||||||
tu.genesis = genesis
|
tu.genesis = genesis
|
||||||
|
@ -18,6 +18,7 @@ import (
|
|||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-state-types/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
builtintypes "github.com/filecoin-project/go-state-types/builtin"
|
builtintypes "github.com/filecoin-project/go-state-types/builtin"
|
||||||
|
|
||||||
@ -929,3 +930,57 @@ func (e *EthBlockNumberOrHash) UnmarshalJSON(b []byte) error {
|
|||||||
|
|
||||||
return errors.New("invalid block param")
|
return errors.New("invalid block param")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type EthTrace struct {
|
||||||
|
Action EthTraceAction `json:"action"`
|
||||||
|
Result EthTraceResult `json:"result"`
|
||||||
|
Subtraces int `json:"subtraces"`
|
||||||
|
TraceAddress []int `json:"traceAddress"`
|
||||||
|
Type string `json:"Type"`
|
||||||
|
|
||||||
|
Parent *EthTrace `json:"-"`
|
||||||
|
|
||||||
|
// if a subtrace makes a call to GetBytecode, we store a pointer to that subtrace here
|
||||||
|
// which we then lookup when checking for delegatecall (InvokeContractDelegate)
|
||||||
|
LastByteCode *EthTrace `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *EthTrace) SetCallType(callType string) {
|
||||||
|
t.Action.CallType = callType
|
||||||
|
t.Type = callType
|
||||||
|
}
|
||||||
|
|
||||||
|
type EthTraceBlock struct {
|
||||||
|
*EthTrace
|
||||||
|
BlockHash EthHash `json:"blockHash"`
|
||||||
|
BlockNumber int64 `json:"blockNumber"`
|
||||||
|
TransactionHash EthHash `json:"transactionHash"`
|
||||||
|
TransactionPosition int `json:"transactionPosition"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type EthTraceReplayBlockTransaction struct {
|
||||||
|
Output EthBytes `json:"output"`
|
||||||
|
StateDiff *string `json:"stateDiff"`
|
||||||
|
Trace []*EthTrace `json:"trace"`
|
||||||
|
TransactionHash EthHash `json:"transactionHash"`
|
||||||
|
VmTrace *string `json:"vmTrace"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type EthTraceAction struct {
|
||||||
|
CallType string `json:"callType"`
|
||||||
|
From EthAddress `json:"from"`
|
||||||
|
To EthAddress `json:"to"`
|
||||||
|
Gas EthUint64 `json:"gas"`
|
||||||
|
Input EthBytes `json:"input"`
|
||||||
|
Value EthBigInt `json:"value"`
|
||||||
|
|
||||||
|
FilecoinMethod abi.MethodNum `json:"-"`
|
||||||
|
FilecoinCodeCid cid.Cid `json:"-"`
|
||||||
|
FilecoinFrom address.Address `json:"-"`
|
||||||
|
FilecoinTo address.Address `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type EthTraceResult struct {
|
||||||
|
GasUsed EthUint64 `json:"gasUsed"`
|
||||||
|
Output EthBytes `json:"output"`
|
||||||
|
}
|
||||||
|
@ -134,7 +134,7 @@ func decodeRLP(data []byte) (res interface{}, consumed int, err error) {
|
|||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
totalLen := 1 + strLenInBytes + strLen
|
totalLen := 1 + strLenInBytes + strLen
|
||||||
if totalLen > len(data) {
|
if totalLen > len(data) || totalLen < 0 {
|
||||||
return nil, 0, xerrors.Errorf("invalid rlp data: out of bound while parsing string")
|
return nil, 0, xerrors.Errorf("invalid rlp data: out of bound while parsing string")
|
||||||
}
|
}
|
||||||
return data[1+strLenInBytes : totalLen], totalLen, nil
|
return data[1+strLenInBytes : totalLen], totalLen, nil
|
||||||
@ -157,7 +157,12 @@ func decodeLength(data []byte, lenInBytes int) (length int, err error) {
|
|||||||
if err := binary.Read(r, binary.BigEndian, &decodedLength); err != nil {
|
if err := binary.Read(r, binary.BigEndian, &decodedLength); err != nil {
|
||||||
return 0, xerrors.Errorf("invalid rlp data: cannot parse string length: %w", err)
|
return 0, xerrors.Errorf("invalid rlp data: cannot parse string length: %w", err)
|
||||||
}
|
}
|
||||||
if lenInBytes+int(decodedLength) > len(data) {
|
if decodedLength < 0 {
|
||||||
|
return 0, xerrors.Errorf("invalid rlp data: negative string length")
|
||||||
|
}
|
||||||
|
|
||||||
|
totalLength := lenInBytes + int(decodedLength)
|
||||||
|
if totalLength < 0 || totalLength > len(data) {
|
||||||
return 0, xerrors.Errorf("invalid rlp data: out of bound while parsing list")
|
return 0, xerrors.Errorf("invalid rlp data: out of bound while parsing list")
|
||||||
}
|
}
|
||||||
return int(decodedLength), nil
|
return int(decodedLength), nil
|
||||||
|
@ -143,6 +143,20 @@ func TestDecodeList(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDecodeNegativeLength(t *testing.T) {
|
||||||
|
testcases := [][]byte{
|
||||||
|
mustDecodeHex("0xbfffffffffffffff0041424344"),
|
||||||
|
mustDecodeHex("0xc1bFFF1111111111111111"),
|
||||||
|
mustDecodeHex("0xbFFF11111111111111"),
|
||||||
|
mustDecodeHex("0xbf7fffffffffffffff41424344"),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testcases {
|
||||||
|
_, err := DecodeRLP(tc)
|
||||||
|
require.ErrorContains(t, err, "invalid rlp data")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestDecodeEncodeTx(t *testing.T) {
|
func TestDecodeEncodeTx(t *testing.T) {
|
||||||
testcases := [][]byte{
|
testcases := [][]byte{
|
||||||
mustDecodeHex("0xdc82013a0185012a05f2008504a817c8008080872386f26fc1000000c0"),
|
mustDecodeHex("0xdc82013a0185012a05f2008504a817c8008080872386f26fc1000000c0"),
|
||||||
|
@ -1,11 +1,6 @@
|
|||||||
package types
|
package types
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
cbg "github.com/whyrusleeping/cbor-gen"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -38,24 +33,3 @@ type EventEntry struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type FilterID [32]byte // compatible with EthHash
|
type FilterID [32]byte // compatible with EthHash
|
||||||
|
|
||||||
// DecodeEvents decodes a CBOR list of CBOR-encoded events.
|
|
||||||
func DecodeEvents(input []byte) ([]Event, error) {
|
|
||||||
r := bytes.NewReader(input)
|
|
||||||
typ, len, err := cbg.NewCborReader(r).ReadHeader()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to read events: %w", err)
|
|
||||||
}
|
|
||||||
if typ != cbg.MajArray {
|
|
||||||
return nil, fmt.Errorf("expected a CBOR list, was major type %d", typ)
|
|
||||||
}
|
|
||||||
events := make([]Event, 0, len)
|
|
||||||
for i := 0; i < int(len); i++ {
|
|
||||||
var evt Event
|
|
||||||
if err := evt.UnmarshalCBOR(r); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to parse event: %w", err)
|
|
||||||
}
|
|
||||||
events = append(events, evt)
|
|
||||||
}
|
|
||||||
return events, nil
|
|
||||||
}
|
|
||||||
|
@ -468,7 +468,7 @@ func (vm *FVM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet
|
|||||||
}
|
}
|
||||||
|
|
||||||
if vm.returnEvents && len(ret.EventsBytes) > 0 {
|
if vm.returnEvents && len(ret.EventsBytes) > 0 {
|
||||||
applyRet.Events, err = types.DecodeEvents(ret.EventsBytes)
|
applyRet.Events, err = decodeEvents(ret.EventsBytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to decode events returned by the FVM: %w", err)
|
return nil, fmt.Errorf("failed to decode events returned by the FVM: %w", err)
|
||||||
}
|
}
|
||||||
@ -524,7 +524,7 @@ func (vm *FVM) ApplyImplicitMessage(ctx context.Context, cmsg *types.Message) (*
|
|||||||
}
|
}
|
||||||
|
|
||||||
if vm.returnEvents && len(ret.EventsBytes) > 0 {
|
if vm.returnEvents && len(ret.EventsBytes) > 0 {
|
||||||
applyRet.Events, err = types.DecodeEvents(ret.EventsBytes)
|
applyRet.Events, err = decodeEvents(ret.EventsBytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to decode events returned by the FVM: %w", err)
|
return nil, fmt.Errorf("failed to decode events returned by the FVM: %w", err)
|
||||||
}
|
}
|
||||||
|
39
chain/vm/fvm_util.go
Normal file
39
chain/vm/fvm_util.go
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
package vm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
cbg "github.com/whyrusleeping/cbor-gen"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
const MaxEventSliceLength = 6_000_000
|
||||||
|
|
||||||
|
// DecodeEvents decodes a CBOR list of CBOR-encoded events.
|
||||||
|
func decodeEvents(input []byte) ([]types.Event, error) {
|
||||||
|
r := bytes.NewReader(input)
|
||||||
|
typ, length, err := cbg.NewCborReader(r).ReadHeader()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read events: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if length > MaxEventSliceLength {
|
||||||
|
log.Errorf("extremely long event slice (len %d) returned, not decoding", length)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if typ != cbg.MajArray {
|
||||||
|
return nil, fmt.Errorf("expected a CBOR list, was major type %d", typ)
|
||||||
|
}
|
||||||
|
events := make([]types.Event, 0, length)
|
||||||
|
for i := 0; i < int(length); i++ {
|
||||||
|
var evt types.Event
|
||||||
|
if err := evt.UnmarshalCBOR(r); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse event: %w", err)
|
||||||
|
}
|
||||||
|
events = append(events, evt)
|
||||||
|
}
|
||||||
|
return events, nil
|
||||||
|
}
|
@ -70,11 +70,6 @@ type syscallShim struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ss *syscallShim) ComputeUnsealedSectorCID(st abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) {
|
func (ss *syscallShim) ComputeUnsealedSectorCID(st abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) {
|
||||||
var sum abi.PaddedPieceSize
|
|
||||||
for _, p := range pieces {
|
|
||||||
sum += p.Size
|
|
||||||
}
|
|
||||||
|
|
||||||
commd, err := ffiwrapper.GenerateUnsealedCID(st, pieces)
|
commd, err := ffiwrapper.GenerateUnsealedCID(st, pieces)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("generate data commitment failed: %s", err)
|
log.Errorf("generate data commitment failed: %s", err)
|
||||||
|
@ -11,7 +11,7 @@ import (
|
|||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
cbor "github.com/ipfs/go-ipld-cbor"
|
cbor "github.com/ipfs/go-ipld-cbor"
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
mh "github.com/multiformats/go-multihash"
|
"github.com/multiformats/go-multicodec"
|
||||||
cbg "github.com/whyrusleeping/cbor-gen"
|
cbg "github.com/whyrusleeping/cbor-gen"
|
||||||
"go.opencensus.io/stats"
|
"go.opencensus.io/stats"
|
||||||
"go.opencensus.io/trace"
|
"go.opencensus.io/trace"
|
||||||
@ -38,7 +38,6 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const MaxCallDepth = 4096
|
const MaxCallDepth = 4096
|
||||||
const CborCodec = 0x51
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
log = logging.Logger("vm")
|
log = logging.Logger("vm")
|
||||||
@ -128,7 +127,7 @@ func (bs *gasChargingBlocks) Put(ctx context.Context, blk block.Block) error {
|
|||||||
func (vm *LegacyVM) makeRuntime(ctx context.Context, msg *types.Message, parent *Runtime) *Runtime {
|
func (vm *LegacyVM) makeRuntime(ctx context.Context, msg *types.Message, parent *Runtime) *Runtime {
|
||||||
paramsCodec := uint64(0)
|
paramsCodec := uint64(0)
|
||||||
if len(msg.Params) > 0 {
|
if len(msg.Params) > 0 {
|
||||||
paramsCodec = CborCodec
|
paramsCodec = uint64(multicodec.Cbor)
|
||||||
}
|
}
|
||||||
rt := &Runtime{
|
rt := &Runtime{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
@ -380,7 +379,7 @@ func (vm *LegacyVM) send(ctx context.Context, msg *types.Message, parent *Runtim
|
|||||||
|
|
||||||
retCodec := uint64(0)
|
retCodec := uint64(0)
|
||||||
if len(ret) > 0 {
|
if len(ret) > 0 {
|
||||||
retCodec = CborCodec
|
retCodec = uint64(multicodec.Cbor)
|
||||||
}
|
}
|
||||||
rt.executionTrace.MsgRct = types.ReturnTrace{
|
rt.executionTrace.MsgRct = types.ReturnTrace{
|
||||||
ExitCode: aerrors.RetCode(err),
|
ExitCode: aerrors.RetCode(err),
|
||||||
@ -695,15 +694,15 @@ func (vm *LegacyVM) ActorStore(ctx context.Context) adt.Store {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func linksForObj(blk block.Block, cb func(cid.Cid)) error {
|
func linksForObj(blk block.Block, cb func(cid.Cid)) error {
|
||||||
switch blk.Cid().Prefix().Codec {
|
switch multicodec.Code(blk.Cid().Prefix().Codec) {
|
||||||
case cid.DagCBOR:
|
case multicodec.DagCbor:
|
||||||
err := cbg.ScanForLinks(bytes.NewReader(blk.RawData()), cb)
|
err := cbg.ScanForLinks(bytes.NewReader(blk.RawData()), cb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("cbg.ScanForLinks: %w", err)
|
return xerrors.Errorf("cbg.ScanForLinks: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
case cid.Raw:
|
case multicodec.Raw, multicodec.Cbor:
|
||||||
// We implicitly have all children of raw blocks.
|
// We implicitly have all children of raw/cbor blocks.
|
||||||
return nil
|
return nil
|
||||||
default:
|
default:
|
||||||
return xerrors.Errorf("vm flush copy method only supports dag cbor")
|
return xerrors.Errorf("vm flush copy method only supports dag cbor")
|
||||||
@ -803,14 +802,17 @@ func copyRec(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid,
|
|||||||
}
|
}
|
||||||
|
|
||||||
prefix := link.Prefix()
|
prefix := link.Prefix()
|
||||||
if prefix.Codec == cid.FilCommitmentSealed || prefix.Codec == cid.FilCommitmentUnsealed {
|
codec := multicodec.Code(prefix.Codec)
|
||||||
|
switch codec {
|
||||||
|
case multicodec.FilCommitmentSealed, cid.FilCommitmentUnsealed:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// We always have blocks inlined into CIDs, but we may not have their children.
|
// We always have blocks inlined into CIDs, but we may not have their children.
|
||||||
if prefix.MhType == mh.IDENTITY {
|
if multicodec.Code(prefix.MhType) == multicodec.Identity {
|
||||||
// Unless the inlined block has no children.
|
// Unless the inlined block has no children.
|
||||||
if prefix.Codec == cid.Raw {
|
switch codec {
|
||||||
|
case multicodec.Raw, multicodec.Cbor:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
19
cli/sync.go
19
cli/sync.go
@ -273,11 +273,6 @@ func SyncWait(ctx context.Context, napi v0api.FullNode, watch bool) error {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
head, err := napi.ChainHead(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
working := -1
|
working := -1
|
||||||
for i, ss := range state.ActiveSyncs {
|
for i, ss := range state.ActiveSyncs {
|
||||||
switch ss.Stage {
|
switch ss.Stage {
|
||||||
@ -332,7 +327,11 @@ func SyncWait(ctx context.Context, napi v0api.FullNode, watch bool) error {
|
|||||||
|
|
||||||
_ = target // todo: maybe print? (creates a bunch of line wrapping issues with most tipsets)
|
_ = target // todo: maybe print? (creates a bunch of line wrapping issues with most tipsets)
|
||||||
|
|
||||||
if !watch && time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs) {
|
isDone, err := IsSyncDone(ctx, napi)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !watch && isDone {
|
||||||
fmt.Println("\nDone!")
|
fmt.Println("\nDone!")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -347,3 +346,11 @@ func SyncWait(ctx context.Context, napi v0api.FullNode, watch bool) error {
|
|||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func IsSyncDone(ctx context.Context, napi v0api.FullNode) (bool, error) {
|
||||||
|
head, err := napi.ChainHead(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs), nil
|
||||||
|
}
|
||||||
|
@ -7,7 +7,9 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"os/signal"
|
||||||
"strings"
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
"golang.org/x/term"
|
"golang.org/x/term"
|
||||||
@ -206,7 +208,12 @@ var walletBalance = &cli.Command{
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if balance.Equals(types.NewInt(0)) {
|
inSync, err := IsSyncDone(ctx, api)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if balance.Equals(types.NewInt(0)) && !inSync {
|
||||||
afmt.Printf("%s (warning: may display 0 if chain sync in progress)\n", types.FIL(balance))
|
afmt.Printf("%s (warning: may display 0 if chain sync in progress)\n", types.FIL(balance))
|
||||||
} else {
|
} else {
|
||||||
afmt.Printf("%s\n", types.FIL(balance))
|
afmt.Printf("%s\n", types.FIL(balance))
|
||||||
@ -330,6 +337,17 @@ var walletImport = &cli.Command{
|
|||||||
if !cctx.Args().Present() || cctx.Args().First() == "-" {
|
if !cctx.Args().Present() || cctx.Args().First() == "-" {
|
||||||
if term.IsTerminal(int(os.Stdin.Fd())) {
|
if term.IsTerminal(int(os.Stdin.Fd())) {
|
||||||
fmt.Print("Enter private key(not display in the terminal): ")
|
fmt.Print("Enter private key(not display in the terminal): ")
|
||||||
|
|
||||||
|
sigCh := make(chan os.Signal, 1)
|
||||||
|
// Notify the channel when SIGINT is received
|
||||||
|
signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
<-sigCh
|
||||||
|
fmt.Println("\nInterrupt signal received. Exiting...")
|
||||||
|
os.Exit(1)
|
||||||
|
}()
|
||||||
|
|
||||||
inpdata, err = term.ReadPassword(int(os.Stdin.Fd()))
|
inpdata, err = term.ReadPassword(int(os.Stdin.Fd()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
apitypes "github.com/filecoin-project/lotus/api/types"
|
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types/mock"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestWalletNew(t *testing.T) {
|
func TestWalletNew(t *testing.T) {
|
||||||
@ -133,6 +134,11 @@ func TestWalletBalance(t *testing.T) {
|
|||||||
|
|
||||||
balance := big.NewInt(1234)
|
balance := big.NewInt(1234)
|
||||||
|
|
||||||
|
// add blocks to the chain
|
||||||
|
first := mock.TipSet(mock.MkBlock(nil, 5, 4))
|
||||||
|
head := mock.TipSet(mock.MkBlock(first, 15, 7))
|
||||||
|
|
||||||
|
mockApi.EXPECT().ChainHead(ctx).Return(head, nil)
|
||||||
mockApi.EXPECT().WalletBalance(ctx, addr).Return(balance, nil)
|
mockApi.EXPECT().WalletBalance(ctx, addr).Return(balance, nil)
|
||||||
|
|
||||||
//stm: @CLI_WALLET_BALANCE_001
|
//stm: @CLI_WALLET_BALANCE_001
|
||||||
|
312
cmd/lotus-bench/amt_internal.go
Normal file
312
cmd/lotus-bench/amt_internal.go
Normal file
@ -0,0 +1,312 @@
|
|||||||
|
// Copied from go-amt-ipld https://github.com/filecoin-project/go-amt-ipld/tree/master/internal
|
||||||
|
// which for some reason is a go internal package and therefore cannot be imported
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
cid "github.com/ipfs/go-cid"
|
||||||
|
cbg "github.com/whyrusleeping/cbor-gen"
|
||||||
|
xerrors "golang.org/x/xerrors"
|
||||||
|
)
|
||||||
|
|
||||||
|
type AMTRoot struct {
|
||||||
|
BitWidth uint64
|
||||||
|
Height uint64
|
||||||
|
Count uint64
|
||||||
|
AMTNode AMTNode
|
||||||
|
}
|
||||||
|
|
||||||
|
type AMTNode struct {
|
||||||
|
Bmap []byte
|
||||||
|
Links []cid.Cid
|
||||||
|
Values []*cbg.Deferred
|
||||||
|
}
|
||||||
|
|
||||||
|
// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT.
|
||||||
|
|
||||||
|
var _ = xerrors.Errorf
|
||||||
|
var _ = cid.Undef
|
||||||
|
var _ = math.E
|
||||||
|
var _ = sort.Sort
|
||||||
|
|
||||||
|
var lengthBufAMTRoot = []byte{132}
|
||||||
|
|
||||||
|
func (t *AMTRoot) MarshalCBOR(w io.Writer) error {
|
||||||
|
if t == nil {
|
||||||
|
_, err := w.Write(cbg.CborNull)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
cw := cbg.NewCborWriter(w)
|
||||||
|
|
||||||
|
if _, err := cw.Write(lengthBufAMTRoot); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// t.BitWidth (uint64) (uint64)
|
||||||
|
|
||||||
|
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, t.BitWidth); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// t.Height (uint64) (uint64)
|
||||||
|
|
||||||
|
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, t.Height); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// t.Count (uint64) (uint64)
|
||||||
|
|
||||||
|
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, t.Count); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// t.AMTNode (internal.AMTNode) (struct)
|
||||||
|
if err := t.AMTNode.MarshalCBOR(cw); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *AMTRoot) UnmarshalCBOR(r io.Reader) (err error) {
|
||||||
|
*t = AMTRoot{}
|
||||||
|
|
||||||
|
cr := cbg.NewCborReader(r)
|
||||||
|
|
||||||
|
maj, extra, err := cr.ReadHeader()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err == io.EOF {
|
||||||
|
err = io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if maj != cbg.MajArray {
|
||||||
|
return fmt.Errorf("cbor input should be of type array")
|
||||||
|
}
|
||||||
|
|
||||||
|
if extra != 4 {
|
||||||
|
return fmt.Errorf("cbor input had wrong number of fields")
|
||||||
|
}
|
||||||
|
|
||||||
|
// t.BitWidth (uint64) (uint64)
|
||||||
|
|
||||||
|
{
|
||||||
|
|
||||||
|
maj, extra, err = cr.ReadHeader()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if maj != cbg.MajUnsignedInt {
|
||||||
|
return fmt.Errorf("wrong type for uint64 field")
|
||||||
|
}
|
||||||
|
t.BitWidth = extra
|
||||||
|
|
||||||
|
}
|
||||||
|
// t.Height (uint64) (uint64)
|
||||||
|
|
||||||
|
{
|
||||||
|
|
||||||
|
maj, extra, err = cr.ReadHeader()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if maj != cbg.MajUnsignedInt {
|
||||||
|
return fmt.Errorf("wrong type for uint64 field")
|
||||||
|
}
|
||||||
|
t.Height = extra
|
||||||
|
|
||||||
|
}
|
||||||
|
// t.Count (uint64) (uint64)
|
||||||
|
|
||||||
|
{
|
||||||
|
|
||||||
|
maj, extra, err = cr.ReadHeader()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if maj != cbg.MajUnsignedInt {
|
||||||
|
return fmt.Errorf("wrong type for uint64 field")
|
||||||
|
}
|
||||||
|
t.Count = extra
|
||||||
|
|
||||||
|
}
|
||||||
|
// t.AMTNode (internal.AMTNode) (struct)
|
||||||
|
|
||||||
|
{
|
||||||
|
|
||||||
|
if err := t.AMTNode.UnmarshalCBOR(cr); err != nil {
|
||||||
|
return xerrors.Errorf("unmarshaling t.AMTNode: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var lengthBufAMTNode = []byte{131}
|
||||||
|
|
||||||
|
func (t *AMTNode) MarshalCBOR(w io.Writer) error {
|
||||||
|
if t == nil {
|
||||||
|
_, err := w.Write(cbg.CborNull)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
cw := cbg.NewCborWriter(w)
|
||||||
|
|
||||||
|
if _, err := cw.Write(lengthBufAMTNode); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// t.Bmap ([]uint8) (slice)
|
||||||
|
if len(t.Bmap) > cbg.ByteArrayMaxLen {
|
||||||
|
return xerrors.Errorf("Byte array in field t.Bmap was too long")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.Bmap))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := cw.Write(t.Bmap[:]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// t.Links ([]cid.Cid) (slice)
|
||||||
|
if len(t.Links) > cbg.MaxLength {
|
||||||
|
return xerrors.Errorf("Slice value in field t.Links was too long")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Links))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, v := range t.Links {
|
||||||
|
if err := cbg.WriteCid(w, v); err != nil {
|
||||||
|
return xerrors.Errorf("failed writing cid field t.Links: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// t.Values ([]*typegen.Deferred) (slice)
|
||||||
|
if len(t.Values) > cbg.MaxLength {
|
||||||
|
return xerrors.Errorf("Slice value in field t.Values was too long")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Values))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, v := range t.Values {
|
||||||
|
if err := v.MarshalCBOR(cw); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *AMTNode) UnmarshalCBOR(r io.Reader) (err error) {
|
||||||
|
*t = AMTNode{}
|
||||||
|
|
||||||
|
cr := cbg.NewCborReader(r)
|
||||||
|
|
||||||
|
maj, extra, err := cr.ReadHeader()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err == io.EOF {
|
||||||
|
err = io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if maj != cbg.MajArray {
|
||||||
|
return fmt.Errorf("cbor input should be of type array")
|
||||||
|
}
|
||||||
|
|
||||||
|
if extra != 3 {
|
||||||
|
return fmt.Errorf("cbor input had wrong number of fields")
|
||||||
|
}
|
||||||
|
|
||||||
|
// t.Bmap ([]uint8) (slice)
|
||||||
|
|
||||||
|
maj, extra, err = cr.ReadHeader()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if extra > cbg.ByteArrayMaxLen {
|
||||||
|
return fmt.Errorf("t.Bmap: byte array too large (%d)", extra)
|
||||||
|
}
|
||||||
|
if maj != cbg.MajByteString {
|
||||||
|
return fmt.Errorf("expected byte array")
|
||||||
|
}
|
||||||
|
|
||||||
|
if extra > 0 {
|
||||||
|
t.Bmap = make([]uint8, extra)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := io.ReadFull(cr, t.Bmap[:]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// t.Links ([]cid.Cid) (slice)
|
||||||
|
|
||||||
|
maj, extra, err = cr.ReadHeader()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if extra > cbg.MaxLength {
|
||||||
|
return fmt.Errorf("t.Links: array too large (%d)", extra)
|
||||||
|
}
|
||||||
|
|
||||||
|
if maj != cbg.MajArray {
|
||||||
|
return fmt.Errorf("expected cbor array")
|
||||||
|
}
|
||||||
|
|
||||||
|
if extra > 0 {
|
||||||
|
t.Links = make([]cid.Cid, extra)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < int(extra); i++ {
|
||||||
|
|
||||||
|
c, err := cbg.ReadCid(cr)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("reading cid field t.Links failed: %w", err)
|
||||||
|
}
|
||||||
|
t.Links[i] = c
|
||||||
|
}
|
||||||
|
|
||||||
|
// t.Values ([]*typegen.Deferred) (slice)
|
||||||
|
|
||||||
|
maj, extra, err = cr.ReadHeader()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if extra > cbg.MaxLength {
|
||||||
|
return fmt.Errorf("t.Values: array too large (%d)", extra)
|
||||||
|
}
|
||||||
|
|
||||||
|
if maj != cbg.MajArray {
|
||||||
|
return fmt.Errorf("expected cbor array")
|
||||||
|
}
|
||||||
|
|
||||||
|
if extra > 0 {
|
||||||
|
t.Values = make([]*cbg.Deferred, extra)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < int(extra); i++ {
|
||||||
|
|
||||||
|
var v cbg.Deferred
|
||||||
|
if err := v.UnmarshalCBOR(cr); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Values[i] = &v
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
@ -1,6 +1,7 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -8,9 +9,16 @@ import (
|
|||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/go-units"
|
"github.com/docker/go-units"
|
||||||
|
"github.com/ipfs/boxo/blockservice"
|
||||||
|
"github.com/ipfs/boxo/ipld/merkledag"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
offline "github.com/ipfs/go-ipfs-exchange-offline"
|
||||||
|
cbor "github.com/ipfs/go-ipld-cbor"
|
||||||
|
format "github.com/ipfs/go-ipld-format"
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
"github.com/minio/blake2b-simd"
|
"github.com/minio/blake2b-simd"
|
||||||
"github.com/mitchellh/go-homedir"
|
"github.com/mitchellh/go-homedir"
|
||||||
@ -20,10 +28,14 @@ import (
|
|||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-paramfetch"
|
"github.com/filecoin-project/go-paramfetch"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
||||||
prooftypes "github.com/filecoin-project/go-state-types/proof"
|
prooftypes "github.com/filecoin-project/go-state-types/proof"
|
||||||
|
adt "github.com/filecoin-project/specs-actors/v6/actors/util/adt"
|
||||||
|
|
||||||
lapi "github.com/filecoin-project/lotus/api"
|
lapi "github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/blockstore"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
lcli "github.com/filecoin-project/lotus/cli"
|
lcli "github.com/filecoin-project/lotus/cli"
|
||||||
@ -104,6 +116,7 @@ func main() {
|
|||||||
DisableSliceFlagSeparator: true,
|
DisableSliceFlagSeparator: true,
|
||||||
Commands: []*cli.Command{
|
Commands: []*cli.Command{
|
||||||
proveCmd,
|
proveCmd,
|
||||||
|
amtBenchCmd,
|
||||||
sealBenchCmd,
|
sealBenchCmd,
|
||||||
simpleCmd,
|
simpleCmd,
|
||||||
importBenchCmd,
|
importBenchCmd,
|
||||||
@ -117,6 +130,211 @@ func main() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type amtStatCollector struct {
|
||||||
|
ds format.NodeGetter
|
||||||
|
walk func(format.Node) ([]*format.Link, error)
|
||||||
|
|
||||||
|
statsLk sync.Mutex
|
||||||
|
totalAMTLinks int
|
||||||
|
totalAMTValues int
|
||||||
|
totalAMTLinkNodes int
|
||||||
|
totalAMTValueNodes int
|
||||||
|
totalAMTLinkNodeSize int
|
||||||
|
totalAMTValueNodeSize int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (asc *amtStatCollector) String() string {
|
||||||
|
asc.statsLk.Lock()
|
||||||
|
defer asc.statsLk.Unlock()
|
||||||
|
|
||||||
|
str := "\n------------\n"
|
||||||
|
str += fmt.Sprintf("Link Count: %d\n", asc.totalAMTLinks)
|
||||||
|
str += fmt.Sprintf("Value Count: %d\n", asc.totalAMTValues)
|
||||||
|
str += fmt.Sprintf("%d link nodes %d bytes\n", asc.totalAMTLinkNodes, asc.totalAMTLinkNodeSize)
|
||||||
|
str += fmt.Sprintf("%d value nodes %d bytes\n", asc.totalAMTValueNodes, asc.totalAMTValueNodeSize)
|
||||||
|
str += fmt.Sprintf("Total bytes: %d\n------------\n", asc.totalAMTLinkNodeSize+asc.totalAMTValueNodeSize)
|
||||||
|
return str
|
||||||
|
}
|
||||||
|
|
||||||
|
func (asc *amtStatCollector) record(ctx context.Context, nd format.Node) error {
|
||||||
|
size, err := nd.Size()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var node AMTNode
|
||||||
|
if err := node.UnmarshalCBOR(bytes.NewReader(nd.RawData())); err != nil {
|
||||||
|
// try to deserialize root
|
||||||
|
var root AMTRoot
|
||||||
|
if err := root.UnmarshalCBOR(bytes.NewReader(nd.RawData())); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
node = root.AMTNode
|
||||||
|
}
|
||||||
|
|
||||||
|
asc.statsLk.Lock()
|
||||||
|
defer asc.statsLk.Unlock()
|
||||||
|
|
||||||
|
link := len(node.Links) > 0
|
||||||
|
value := len(node.Values) > 0
|
||||||
|
|
||||||
|
if link {
|
||||||
|
asc.totalAMTLinks += len(node.Links)
|
||||||
|
asc.totalAMTLinkNodes++
|
||||||
|
asc.totalAMTLinkNodeSize += int(size)
|
||||||
|
} else if value {
|
||||||
|
asc.totalAMTValues += len(node.Values)
|
||||||
|
asc.totalAMTValueNodes++
|
||||||
|
asc.totalAMTValueNodeSize += int(size)
|
||||||
|
} else {
|
||||||
|
return xerrors.Errorf("unexpected AMT node %x: neither link nor value", nd.RawData())
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (asc *amtStatCollector) walkLinks(ctx context.Context, c cid.Cid) ([]*format.Link, error) {
|
||||||
|
nd, err := asc.ds.Get(ctx, c)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := asc.record(ctx, nd); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return asc.walk(nd)
|
||||||
|
}
|
||||||
|
|
||||||
|
func carWalkFunc(nd format.Node) (out []*format.Link, err error) {
|
||||||
|
for _, link := range nd.Links() {
|
||||||
|
if link.Cid.Prefix().Codec == cid.FilCommitmentSealed || link.Cid.Prefix().Codec == cid.FilCommitmentUnsealed {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
out = append(out, link)
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var amtBenchCmd = &cli.Command{
|
||||||
|
Name: "amt",
|
||||||
|
Usage: "Benchmark AMT churn",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.IntFlag{
|
||||||
|
Name: "rounds",
|
||||||
|
Usage: "rounds of churn to measure",
|
||||||
|
Value: 1,
|
||||||
|
},
|
||||||
|
&cli.IntFlag{
|
||||||
|
Name: "interval",
|
||||||
|
Usage: "AMT idx interval for churning values",
|
||||||
|
Value: 2880,
|
||||||
|
},
|
||||||
|
&cli.IntFlag{
|
||||||
|
Name: "bitwidth",
|
||||||
|
Usage: "AMT bitwidth",
|
||||||
|
Value: 6,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: func(c *cli.Context) error {
|
||||||
|
bs := blockstore.NewMemory()
|
||||||
|
ctx := c.Context
|
||||||
|
store := adt.WrapStore(ctx, cbor.NewCborStore(bs))
|
||||||
|
|
||||||
|
// Setup in memory blockstore
|
||||||
|
bitwidth := c.Int("bitwidth")
|
||||||
|
array, err := adt.MakeEmptyArray(store, bitwidth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Using motivating empirical example: market actor states AMT
|
||||||
|
// Create 40,000,000 states for realistic workload
|
||||||
|
fmt.Printf("Populating AMT\n")
|
||||||
|
for i := 0; i < 40000000; i++ {
|
||||||
|
if err := array.Set(uint64(i), &market.DealState{
|
||||||
|
SectorStartEpoch: abi.ChainEpoch(2000000 + i),
|
||||||
|
LastUpdatedEpoch: abi.ChainEpoch(-1),
|
||||||
|
SlashEpoch: -1,
|
||||||
|
VerifiedClaim: verifreg.AllocationId(i),
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := array.Root()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Measure ratio of internal / leaf nodes / sizes
|
||||||
|
dag := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs)))
|
||||||
|
asc := &amtStatCollector{
|
||||||
|
ds: dag,
|
||||||
|
walk: carWalkFunc,
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Measuring AMT\n")
|
||||||
|
seen := cid.NewSet()
|
||||||
|
if err := merkledag.Walk(ctx, asc.walkLinks, r, seen.Visit, merkledag.Concurrent()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("%s\n", asc)
|
||||||
|
|
||||||
|
// Overwrite ids with idx % interval: one epoch of market cron
|
||||||
|
rounds := c.Int("rounds")
|
||||||
|
interval := c.Int("interval")
|
||||||
|
|
||||||
|
fmt.Printf("Overwrite 1 out of %d values for %d rounds\n", interval, rounds)
|
||||||
|
array, err = adt.AsArray(store, r, bitwidth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
roots := make([]cid.Cid, rounds)
|
||||||
|
for j := 0; j < rounds; j++ {
|
||||||
|
if j%10 == 0 {
|
||||||
|
fmt.Printf("round: %d\n", j)
|
||||||
|
}
|
||||||
|
for i := j; i < 40000000; i += interval {
|
||||||
|
if i%interval == j {
|
||||||
|
if err := array.Set(uint64(i), &market.DealState{
|
||||||
|
SectorStartEpoch: abi.ChainEpoch(2000000 + i),
|
||||||
|
LastUpdatedEpoch: abi.ChainEpoch(1),
|
||||||
|
SlashEpoch: -1,
|
||||||
|
VerifiedClaim: verifreg.AllocationId(i),
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
roots[j], err = array.Root()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Measure churn
|
||||||
|
dag = merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs)))
|
||||||
|
asc = &amtStatCollector{
|
||||||
|
ds: dag,
|
||||||
|
walk: carWalkFunc,
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Measuring %d rounds of churn\n", rounds)
|
||||||
|
|
||||||
|
for _, r := range roots {
|
||||||
|
if err := merkledag.Walk(ctx, asc.walkLinks, r, seen.Visit, merkledag.Concurrent()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("%s\n", asc)
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
var sealBenchCmd = &cli.Command{
|
var sealBenchCmd = &cli.Command{
|
||||||
Name: "sealing",
|
Name: "sealing",
|
||||||
Usage: "Benchmark seal and winning post and window post",
|
Usage: "Benchmark seal and winning post and window post",
|
||||||
|
@ -120,6 +120,11 @@ p: pvC0JBrEyUqtIIUvB2UUx/2a24c3Cvnu6AZ0D3IMBYAu...
|
|||||||
|
|
||||||
type benchSectorProvider map[storiface.SectorFileType]string
|
type benchSectorProvider map[storiface.SectorFileType]string
|
||||||
|
|
||||||
|
func (b benchSectorProvider) AcquireSectorCopy(ctx context.Context, id storiface.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) {
|
||||||
|
// there's no copying in this context
|
||||||
|
return b.AcquireSector(ctx, id, existing, allocate, ptype)
|
||||||
|
}
|
||||||
|
|
||||||
func (b benchSectorProvider) AcquireSector(ctx context.Context, id storiface.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) {
|
func (b benchSectorProvider) AcquireSector(ctx context.Context, id storiface.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) {
|
||||||
out := storiface.SectorPaths{
|
out := storiface.SectorPaths{
|
||||||
ID: id.ID,
|
ID: id.ID,
|
||||||
|
42
cmd/lotus-shed/block.go
Normal file
42
cmd/lotus-shed/block.go
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/urfave/cli/v2"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
lcli "github.com/filecoin-project/lotus/cli"
|
||||||
|
)
|
||||||
|
|
||||||
|
var blockCmd = &cli.Command{
|
||||||
|
Name: "block",
|
||||||
|
Usage: "Output decoded block header in readeble form",
|
||||||
|
ArgsUsage: "[block header hex]",
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
if cctx.NArg() != 1 {
|
||||||
|
return lcli.IncorrectNumArgs(cctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := hex.DecodeString(cctx.Args().First())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var blk types.BlockHeader
|
||||||
|
if err := blk.UnmarshalCBOR(bytes.NewReader(b)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
jb, err := json.MarshalIndent(blk, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println(string(jb))
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
@ -1,14 +1,24 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
ipldcbor "github.com/ipfs/go-ipld-cbor"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-bitfield"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
miner11 "github.com/filecoin-project/go-state-types/builtin/v11/miner"
|
||||||
|
"github.com/filecoin-project/go-state-types/builtin/v11/util/adt"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
lcli "github.com/filecoin-project/lotus/cli"
|
lcli "github.com/filecoin-project/lotus/cli"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -17,6 +27,245 @@ var cronWcCmd = &cli.Command{
|
|||||||
Description: "cron stats",
|
Description: "cron stats",
|
||||||
Subcommands: []*cli.Command{
|
Subcommands: []*cli.Command{
|
||||||
minerDeadlineCronCountCmd,
|
minerDeadlineCronCountCmd,
|
||||||
|
minerDeadlinePartitionMeasurementCmd,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
type DeadlineRef struct {
|
||||||
|
To string
|
||||||
|
Height abi.ChainEpoch
|
||||||
|
Gas json.RawMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
type DeadlineSummary struct {
|
||||||
|
Partitions []PartitionSummary
|
||||||
|
PreCommitExpiry PreCommitExpiry
|
||||||
|
VestingDiff VestingDiff
|
||||||
|
}
|
||||||
|
|
||||||
|
type PreCommitExpiry struct {
|
||||||
|
Expired []uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type VestingDiff struct {
|
||||||
|
PrevTableSize int
|
||||||
|
NewTableSize int
|
||||||
|
}
|
||||||
|
|
||||||
|
type PartitionSummary struct {
|
||||||
|
Live int
|
||||||
|
Dead int
|
||||||
|
Faulty int
|
||||||
|
Diff PartitionDiff
|
||||||
|
}
|
||||||
|
|
||||||
|
type PartitionDiff struct {
|
||||||
|
Faulted int
|
||||||
|
Recovered int
|
||||||
|
Killed int
|
||||||
|
}
|
||||||
|
|
||||||
|
var minerDeadlinePartitionMeasurementCmd = &cli.Command{
|
||||||
|
Name: "deadline-summary",
|
||||||
|
Description: "",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "json",
|
||||||
|
Usage: "read input as json",
|
||||||
|
Value: true,
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "tipset",
|
||||||
|
Usage: "specify tipset state to search on (pass comma separated array of cids)",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: func(c *cli.Context) error {
|
||||||
|
// read in values to process
|
||||||
|
if !c.Bool("json") {
|
||||||
|
return xerrors.Errorf("unsupported non json input format")
|
||||||
|
}
|
||||||
|
var refStream []DeadlineRef
|
||||||
|
if err := json.NewDecoder(os.Stdin).Decode(&refStream); err != nil {
|
||||||
|
return xerrors.Errorf("failed to parse input: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// go from height and sp addr to deadline partition data
|
||||||
|
n, acloser, err := lcli.GetFullNodeAPI(c)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer acloser()
|
||||||
|
ctx := lcli.ReqContext(c)
|
||||||
|
|
||||||
|
bs := ReadOnlyAPIBlockstore{n}
|
||||||
|
adtStore := adt.WrapStore(ctx, ipldcbor.NewCborStore(&bs))
|
||||||
|
|
||||||
|
dSummaries := make([]DeadlineSummary, len(refStream))
|
||||||
|
for j, ref := range refStream {
|
||||||
|
// get miner's deadline
|
||||||
|
tsBefore, err := n.ChainGetTipSetByHeight(ctx, ref.Height, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to get tipset at epoch: %d: %w", ref.Height, err)
|
||||||
|
}
|
||||||
|
tsAfter, err := n.ChainGetTipSetByHeight(ctx, ref.Height+1, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to get tipset at epoch %d: %w", ref.Height, err)
|
||||||
|
}
|
||||||
|
addr, err := address.NewFromString(ref.To)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("faield to get address from input string: %w", err)
|
||||||
|
}
|
||||||
|
dline, err := n.StateMinerProvingDeadline(ctx, addr, tsBefore.Key())
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to read proving deadline: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// iterate through all partitions at epoch of processing
|
||||||
|
var pSummaries []PartitionSummary
|
||||||
|
psBefore, err := n.StateMinerPartitions(ctx, addr, dline.Index, tsBefore.Key())
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to get partitions: %w", err)
|
||||||
|
}
|
||||||
|
psAfter, err := n.StateMinerPartitions(ctx, addr, dline.Index, tsAfter.Key())
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to get partitions: %w", err)
|
||||||
|
}
|
||||||
|
if len(psBefore) != len(psAfter) {
|
||||||
|
return xerrors.Errorf("faield")
|
||||||
|
}
|
||||||
|
|
||||||
|
type partitionCount struct {
|
||||||
|
live int
|
||||||
|
dead int
|
||||||
|
faulty int
|
||||||
|
recovering int
|
||||||
|
}
|
||||||
|
countPartition := func(p api.Partition) (partitionCount, error) {
|
||||||
|
liveSectors, err := p.LiveSectors.All(abi.MaxSectorNumber)
|
||||||
|
if err != nil {
|
||||||
|
return partitionCount{}, xerrors.Errorf("failed to count live sectors in partition: %w", err)
|
||||||
|
}
|
||||||
|
allSectors, err := p.AllSectors.All(abi.MaxSectorNumber)
|
||||||
|
if err != nil {
|
||||||
|
return partitionCount{}, xerrors.Errorf("failed to count all sectors in partition: %w", err)
|
||||||
|
}
|
||||||
|
faultySectors, err := p.FaultySectors.All(abi.MaxSectorNumber)
|
||||||
|
if err != nil {
|
||||||
|
return partitionCount{}, xerrors.Errorf("failed to count faulty sectors in partition: %w", err)
|
||||||
|
}
|
||||||
|
recoveringSectors, err := p.RecoveringSectors.All(abi.MaxSectorNumber)
|
||||||
|
if err != nil {
|
||||||
|
return partitionCount{}, xerrors.Errorf("failed to count recovering sectors in partition: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return partitionCount{
|
||||||
|
live: len(liveSectors),
|
||||||
|
dead: len(allSectors) - len(liveSectors),
|
||||||
|
faulty: len(faultySectors),
|
||||||
|
recovering: len(recoveringSectors),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
countVestingTable := func(table cid.Cid) (int, error) {
|
||||||
|
var vestingTable miner11.VestingFunds
|
||||||
|
if err := adtStore.Get(ctx, table, &vestingTable); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return len(vestingTable.Funds), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < len(psBefore); i++ {
|
||||||
|
cntBefore, err := countPartition(psBefore[i])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cntAfter, err := countPartition(psAfter[i])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
pSummaries = append(pSummaries, PartitionSummary{
|
||||||
|
Live: cntBefore.live,
|
||||||
|
Dead: cntBefore.dead,
|
||||||
|
Faulty: cntBefore.faulty,
|
||||||
|
Diff: PartitionDiff{
|
||||||
|
Faulted: cntAfter.faulty - cntBefore.faulty,
|
||||||
|
Recovered: cntBefore.recovering - cntAfter.recovering,
|
||||||
|
Killed: cntAfter.dead - cntBefore.dead,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Precommit and vesting table data
|
||||||
|
// Before
|
||||||
|
aBefore, err := n.StateGetActor(ctx, addr, tsBefore.Key())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var st miner11.State
|
||||||
|
err = adtStore.Get(ctx, aBefore.Head, &st)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
expiryQArray, err := adt.AsArray(adtStore, st.PreCommittedSectorsCleanUp, miner11.PrecommitCleanUpAmtBitwidth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var sectorsBf bitfield.BitField
|
||||||
|
var accumulator []uint64
|
||||||
|
h := ref.Height
|
||||||
|
if err := expiryQArray.ForEach(§orsBf, func(i int64) error {
|
||||||
|
if abi.ChainEpoch(i) > h {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
sns, err := sectorsBf.All(abi.MaxSectorNumber)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
accumulator = append(accumulator, sns...)
|
||||||
|
return nil
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
vestingBefore, err := countVestingTable(st.VestingFunds)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// After
|
||||||
|
aAfter, err := n.StateGetActor(ctx, addr, tsAfter.Key())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var stAfter miner11.State
|
||||||
|
err = adtStore.Get(ctx, aAfter.Head, &stAfter)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
vestingAfter, err := countVestingTable(stAfter.VestingFunds)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
dSummaries[j] = DeadlineSummary{
|
||||||
|
Partitions: pSummaries,
|
||||||
|
PreCommitExpiry: PreCommitExpiry{
|
||||||
|
Expired: accumulator,
|
||||||
|
},
|
||||||
|
VestingDiff: VestingDiff{
|
||||||
|
PrevTableSize: vestingBefore,
|
||||||
|
NewTableSize: vestingAfter,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// output partition info
|
||||||
|
if err := json.NewEncoder(os.Stdout).Encode(dSummaries); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -219,7 +219,7 @@ func backTestWinner(ctx context.Context, miner address.Address, round abi.ChainE
|
|||||||
brand = bvals[len(bvals)-1]
|
brand = bvals[len(bvals)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
winner, err := gen.IsRoundWinner(ctx, ts, round, miner, brand, mbi, api)
|
winner, err := gen.IsRoundWinner(ctx, round, miner, brand, mbi, api)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("failed to check if we win next round: %w", err)
|
return nil, xerrors.Errorf("failed to check if we win next round: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -16,7 +16,6 @@ import (
|
|||||||
"github.com/filecoin-project/go-state-types/network"
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/beacon"
|
|
||||||
"github.com/filecoin-project/lotus/chain/beacon/drand"
|
"github.com/filecoin-project/lotus/chain/beacon/drand"
|
||||||
"github.com/filecoin-project/lotus/chain/consensus"
|
"github.com/filecoin-project/lotus/chain/consensus"
|
||||||
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||||
@ -100,15 +99,11 @@ var gasTraceCmd = &cli.Command{
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
dcs := build.DrandConfigSchedule()
|
shd, err := drand.BeaconScheduleFromDrandSchedule(build.DrandConfigSchedule(), MAINNET_GENESIS_TIME, nil)
|
||||||
shd := beacon.Schedule{}
|
if err != nil {
|
||||||
for _, dc := range dcs {
|
return err
|
||||||
bc, err := drand.NewDrandBeacon(MAINNET_GENESIS_TIME, build.BlockDelaySecs, nil, dc.Config)
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("creating drand beacon: %w", err)
|
|
||||||
}
|
|
||||||
shd = append(shd, beacon.BeaconPoint{Start: dc.Start, Beacon: bc})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
|
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
|
||||||
defer cs.Close() //nolint:errcheck
|
defer cs.Close() //nolint:errcheck
|
||||||
|
|
||||||
@ -200,14 +195,9 @@ var replayOfflineCmd = &cli.Command{
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
dcs := build.DrandConfigSchedule()
|
shd, err := drand.BeaconScheduleFromDrandSchedule(build.DrandConfigSchedule(), MAINNET_GENESIS_TIME, nil)
|
||||||
shd := beacon.Schedule{}
|
if err != nil {
|
||||||
for _, dc := range dcs {
|
return err
|
||||||
bc, err := drand.NewDrandBeacon(MAINNET_GENESIS_TIME, build.BlockDelaySecs, nil, dc.Config)
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("creating drand beacon: %w", err)
|
|
||||||
}
|
|
||||||
shd = append(shd, beacon.BeaconPoint{Start: dc.Start, Beacon: bc})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
|
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
"path"
|
||||||
@ -8,12 +9,18 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/mitchellh/go-homedir"
|
"github.com/mitchellh/go-homedir"
|
||||||
|
"github.com/multiformats/go-varint"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
builtintypes "github.com/filecoin-project/go-state-types/builtin"
|
||||||
"github.com/filecoin-project/go-state-types/crypto"
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
|
"github.com/filecoin-project/go-state-types/exitcode"
|
||||||
|
|
||||||
|
lapi "github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/chain/types/ethtypes"
|
"github.com/filecoin-project/lotus/chain/types/ethtypes"
|
||||||
lcli "github.com/filecoin-project/lotus/cli"
|
lcli "github.com/filecoin-project/lotus/cli"
|
||||||
)
|
)
|
||||||
@ -31,6 +38,291 @@ var indexesCmd = &cli.Command{
|
|||||||
withCategory("msgindex", backfillMsgIndexCmd),
|
withCategory("msgindex", backfillMsgIndexCmd),
|
||||||
withCategory("msgindex", pruneMsgIndexCmd),
|
withCategory("msgindex", pruneMsgIndexCmd),
|
||||||
withCategory("txhash", backfillTxHashCmd),
|
withCategory("txhash", backfillTxHashCmd),
|
||||||
|
withCategory("events", backfillEventsCmd),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var backfillEventsCmd = &cli.Command{
|
||||||
|
Name: "backfill-events",
|
||||||
|
Usage: "Backfill the events.db for a number of epochs starting from a specified height",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.UintFlag{
|
||||||
|
Name: "from",
|
||||||
|
Value: 0,
|
||||||
|
Usage: "the tipset height to start backfilling from (0 is head of chain)",
|
||||||
|
},
|
||||||
|
&cli.IntFlag{
|
||||||
|
Name: "epochs",
|
||||||
|
Value: 2000,
|
||||||
|
Usage: "the number of epochs to backfill",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
srv, err := lcli.GetFullNodeServices(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer srv.Close() //nolint:errcheck
|
||||||
|
|
||||||
|
api := srv.FullNodeAPI()
|
||||||
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
|
// currTs will be the tipset where we start backfilling from
|
||||||
|
currTs, err := api.ChainHead(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if cctx.IsSet("from") {
|
||||||
|
// we need to fetch the tipset after the epoch being specified since we will need to advance currTs
|
||||||
|
currTs, err = api.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(cctx.Int("from")+1), currTs.Key())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// advance currTs by one epoch and maintain prevTs as the previous tipset (this allows us to easily use the ChainGetParentMessages/Receipt API)
|
||||||
|
prevTs := currTs
|
||||||
|
currTs, err = api.ChainGetTipSet(ctx, currTs.Parents())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to load tipset %s: %w", prevTs.Parents(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
epochs := cctx.Int("epochs")
|
||||||
|
|
||||||
|
basePath, err := homedir.Expand(cctx.String("repo"))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
dbPath := path.Join(basePath, "sqlite", "events.db")
|
||||||
|
db, err := sql.Open("sqlite3", dbPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
err := db.Close()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("ERROR: closing db: %s", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
addressLookups := make(map[abi.ActorID]address.Address)
|
||||||
|
|
||||||
|
resolveFn := func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool) {
|
||||||
|
// we only want to match using f4 addresses
|
||||||
|
idAddr, err := address.NewIDAddress(uint64(emitter))
|
||||||
|
if err != nil {
|
||||||
|
return address.Undef, false
|
||||||
|
}
|
||||||
|
|
||||||
|
actor, err := api.StateGetActor(ctx, idAddr, ts.Key())
|
||||||
|
if err != nil || actor.Address == nil {
|
||||||
|
return address.Undef, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// if robust address is not f4 then we won't match against it so bail early
|
||||||
|
if actor.Address.Protocol() != address.Delegated {
|
||||||
|
return address.Undef, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// we have an f4 address, make sure it's assigned by the EAM
|
||||||
|
if namespace, _, err := varint.FromUvarint(actor.Address.Payload()); err != nil || namespace != builtintypes.EthereumAddressManagerActorID {
|
||||||
|
return address.Undef, false
|
||||||
|
}
|
||||||
|
return *actor.Address, true
|
||||||
|
}
|
||||||
|
|
||||||
|
isIndexedValue := func(b uint8) bool {
|
||||||
|
// currently we mark the full entry as indexed if either the key
|
||||||
|
// or the value are indexed; in the future we will need finer-grained
|
||||||
|
// management of indices
|
||||||
|
return b&(types.EventFlagIndexedKey|types.EventFlagIndexedValue) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
var totalEventsAffected int64
|
||||||
|
var totalEntriesAffected int64
|
||||||
|
|
||||||
|
processHeight := func(ctx context.Context, cnt int, msgs []lapi.Message, receipts []*types.MessageReceipt) error {
|
||||||
|
tx, err := db.BeginTx(ctx, nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to start transaction: %w", err)
|
||||||
|
}
|
||||||
|
defer tx.Rollback() //nolint:errcheck
|
||||||
|
|
||||||
|
stmtSelectEvent, err := tx.Prepare("SELECT MAX(id) from event WHERE height=? AND tipset_key=? and tipset_key_cid=? and emitter_addr=? and event_index=? and message_cid=? and message_index=? and reverted=false")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
stmtEvent, err := tx.Prepare("INSERT INTO event (height, tipset_key, tipset_key_cid, emitter_addr, event_index, message_cid, message_index, reverted) VALUES(?, ?, ?, ?, ?, ?, ?, ?)")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
stmtEntry, err := tx.Prepare("INSERT INTO event_entry(event_id, indexed, flags, key, codec, value) VALUES(?, ?, ?, ?, ?, ?)")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var eventsAffected int64
|
||||||
|
var entriesAffected int64
|
||||||
|
|
||||||
|
// loop over each message receipt and backfill the events
|
||||||
|
for idx, receipt := range receipts {
|
||||||
|
msg := msgs[idx]
|
||||||
|
|
||||||
|
if receipt.ExitCode != exitcode.Ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if receipt.EventsRoot == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
events, err := api.ChainGetEvents(ctx, *receipt.EventsRoot)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to load events for tipset %s: %w", currTs, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for eventIdx, event := range events {
|
||||||
|
addr, found := addressLookups[event.Emitter]
|
||||||
|
if !found {
|
||||||
|
var ok bool
|
||||||
|
addr, ok = resolveFn(ctx, event.Emitter, currTs)
|
||||||
|
if !ok {
|
||||||
|
// not an address we will be able to match against
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
addressLookups[event.Emitter] = addr
|
||||||
|
}
|
||||||
|
|
||||||
|
tsKeyCid, err := currTs.Key().Cid()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get tipset key cid: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// select the highest event id that exists in database, or null if none exists
|
||||||
|
var entryID sql.NullInt64
|
||||||
|
err = stmtSelectEvent.QueryRow(
|
||||||
|
currTs.Height(),
|
||||||
|
currTs.Key().Bytes(),
|
||||||
|
tsKeyCid.Bytes(),
|
||||||
|
addr.Bytes(),
|
||||||
|
eventIdx,
|
||||||
|
msg.Cid.Bytes(),
|
||||||
|
idx,
|
||||||
|
).Scan(&entryID)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error checking if event exists: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// we already have this event
|
||||||
|
if entryID.Valid {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// event does not exist, lets backfill it
|
||||||
|
res, err := tx.Stmt(stmtEvent).Exec(
|
||||||
|
currTs.Height(), // height
|
||||||
|
currTs.Key().Bytes(), // tipset_key
|
||||||
|
tsKeyCid.Bytes(), // tipset_key_cid
|
||||||
|
addr.Bytes(), // emitter_addr
|
||||||
|
eventIdx, // event_index
|
||||||
|
msg.Cid.Bytes(), // message_cid
|
||||||
|
idx, // message_index
|
||||||
|
false, // reverted
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error inserting event: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
entryID.Int64, err = res.LastInsertId()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not get last insert id: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rowsAffected, err := res.RowsAffected()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not get rows affected: %w", err)
|
||||||
|
}
|
||||||
|
eventsAffected += rowsAffected
|
||||||
|
|
||||||
|
// backfill the event entries
|
||||||
|
for _, entry := range event.Entries {
|
||||||
|
_, err := tx.Stmt(stmtEntry).Exec(
|
||||||
|
entryID.Int64, // event_id
|
||||||
|
isIndexedValue(entry.Flags), // indexed
|
||||||
|
[]byte{entry.Flags}, // flags
|
||||||
|
entry.Key, // key
|
||||||
|
entry.Codec, // codec
|
||||||
|
entry.Value, // value
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error inserting entry: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rowsAffected, err := res.RowsAffected()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not get rows affected: %w", err)
|
||||||
|
}
|
||||||
|
entriesAffected += rowsAffected
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = tx.Commit()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to commit transaction: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof("[%d] backfilling actor events epoch:%d, eventsAffected:%d, entriesAffected:%d", cnt, currTs.Height(), eventsAffected, entriesAffected)
|
||||||
|
|
||||||
|
totalEventsAffected += eventsAffected
|
||||||
|
totalEntriesAffected += entriesAffected
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < epochs; i++ {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
blockCid := prevTs.Blocks()[0].Cid()
|
||||||
|
|
||||||
|
// get messages for the parent of the previous tipset (which will be currTs)
|
||||||
|
msgs, err := api.ChainGetParentMessages(ctx, blockCid)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get parent messages for block %s: %w", blockCid, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// get receipts for the parent of the previous tipset (which will be currTs)
|
||||||
|
receipts, err := api.ChainGetParentReceipts(ctx, blockCid)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get parent receipts for block %s: %w", blockCid, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(msgs) != len(receipts) {
|
||||||
|
return fmt.Errorf("mismatched in message and receipt count: %d != %d", len(msgs), len(receipts))
|
||||||
|
}
|
||||||
|
|
||||||
|
err = processHeight(ctx, i, msgs, receipts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// advance prevTs and currTs up the chain
|
||||||
|
prevTs = currTs
|
||||||
|
currTs, err = api.ChainGetTipSet(ctx, currTs.Parents())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to load tipset %s: %w", currTs, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof("backfilling events complete, totalEventsAffected:%d, totalEntriesAffected:%d", totalEventsAffected, totalEntriesAffected)
|
||||||
|
|
||||||
|
return nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -24,6 +24,7 @@ func main() {
|
|||||||
local := []*cli.Command{
|
local := []*cli.Command{
|
||||||
addressCmd,
|
addressCmd,
|
||||||
statActorCmd,
|
statActorCmd,
|
||||||
|
statSnapshotCmd,
|
||||||
statObjCmd,
|
statObjCmd,
|
||||||
base64Cmd,
|
base64Cmd,
|
||||||
base32Cmd,
|
base32Cmd,
|
||||||
@ -90,6 +91,7 @@ func main() {
|
|||||||
indexesCmd,
|
indexesCmd,
|
||||||
FevmAnalyticsCmd,
|
FevmAnalyticsCmd,
|
||||||
mismatchesCmd,
|
mismatchesCmd,
|
||||||
|
blockCmd,
|
||||||
}
|
}
|
||||||
|
|
||||||
app := &cli.App{
|
app := &cli.App{
|
||||||
|
@ -26,6 +26,12 @@ var msgCmd = &cli.Command{
|
|||||||
Aliases: []string{"msg"},
|
Aliases: []string{"msg"},
|
||||||
Usage: "Translate message between various formats",
|
Usage: "Translate message between various formats",
|
||||||
ArgsUsage: "Message in any form",
|
ArgsUsage: "Message in any form",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "exec-trace",
|
||||||
|
Usage: "Print the execution trace",
|
||||||
|
},
|
||||||
|
},
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
if cctx.NArg() != 1 {
|
if cctx.NArg() != 1 {
|
||||||
return lcli.IncorrectNumArgs(cctx)
|
return lcli.IncorrectNumArgs(cctx)
|
||||||
@ -36,6 +42,48 @@ var msgCmd = &cli.Command{
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
api, closer, err := lcli.GetFullNodeAPI(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer closer()
|
||||||
|
|
||||||
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
|
// Get the CID of the message
|
||||||
|
mcid := msg.Cid()
|
||||||
|
|
||||||
|
// Search for the message on-chain
|
||||||
|
lookup, err := api.StateSearchMsg(ctx, mcid)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if lookup == nil {
|
||||||
|
fmt.Println("Message not found on-chain. Continuing...")
|
||||||
|
} else {
|
||||||
|
// Replay the message to get the execution trace
|
||||||
|
res, err := api.StateReplay(ctx, types.EmptyTSK, mcid)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("replay call failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cctx.Bool("exec-trace") {
|
||||||
|
// Print the execution trace
|
||||||
|
color.Green("Execution trace:")
|
||||||
|
trace, err := json.MarshalIndent(res.ExecutionTrace, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("marshaling execution trace: %w", err)
|
||||||
|
}
|
||||||
|
fmt.Println(string(trace))
|
||||||
|
fmt.Println()
|
||||||
|
|
||||||
|
color.Green("Receipt:")
|
||||||
|
fmt.Printf("Exit code: %d\n", res.MsgRct.ExitCode)
|
||||||
|
fmt.Printf("Return: %x\n", res.MsgRct.Return)
|
||||||
|
fmt.Printf("Gas Used: %d\n", res.MsgRct.GasUsed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
switch msg := msg.(type) {
|
switch msg := msg.(type) {
|
||||||
case *types.SignedMessage:
|
case *types.SignedMessage:
|
||||||
return printSignedMessage(cctx, msg)
|
return printSignedMessage(cctx, msg)
|
||||||
|
@ -1,11 +1,14 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"path"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/docker/go-units"
|
"github.com/docker/go-units"
|
||||||
@ -21,8 +24,12 @@ import (
|
|||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
gstactors "github.com/filecoin-project/go-state-types/actors"
|
||||||
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/blockstore"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||||
"github.com/filecoin-project/lotus/chain/consensus"
|
"github.com/filecoin-project/lotus/chain/consensus"
|
||||||
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||||
@ -49,6 +56,19 @@ type fieldItem struct {
|
|||||||
Stats api.ObjStat
|
Stats api.ObjStat
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type job struct {
|
||||||
|
c cid.Cid
|
||||||
|
key string // prefix path for the region being recorded i.e. "/state/mineractor"
|
||||||
|
}
|
||||||
|
type cidCall struct {
|
||||||
|
c cid.Cid
|
||||||
|
resp chan bool
|
||||||
|
}
|
||||||
|
type result struct {
|
||||||
|
key string
|
||||||
|
stats api.ObjStat
|
||||||
|
}
|
||||||
|
|
||||||
type cacheNodeGetter struct {
|
type cacheNodeGetter struct {
|
||||||
ds format.NodeGetter
|
ds format.NodeGetter
|
||||||
cache *lru.TwoQueueCache[cid.Cid, format.Node]
|
cache *lru.TwoQueueCache[cid.Cid, format.Node]
|
||||||
@ -166,39 +186,13 @@ var statObjCmd = &cli.Command{
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
r, err := repo.NewFS(cctx.String("repo"))
|
h, err := loadChainStore(ctx, cctx.String("repo"))
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("opening fs repo: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
exists, err := r.Exists()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !exists {
|
defer h.closer()
|
||||||
return xerrors.Errorf("lotus repo doesn't exist")
|
|
||||||
}
|
|
||||||
|
|
||||||
lr, err := r.Lock(repo.FullNode)
|
dag := merkledag.NewDAGService(blockservice.New(h.bs, offline.Exchange(h.bs)))
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer lr.Close() //nolint:errcheck
|
|
||||||
|
|
||||||
bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to open blockstore: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if c, ok := bs.(io.Closer); ok {
|
|
||||||
if err := c.Close(); err != nil {
|
|
||||||
log.Warnf("failed to close blockstore: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
dag := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs)))
|
|
||||||
dsc := &dagStatCollector{
|
dsc := &dagStatCollector{
|
||||||
ds: dag,
|
ds: dag,
|
||||||
walk: carWalkFunc,
|
walk: carWalkFunc,
|
||||||
@ -212,6 +206,376 @@ var statObjCmd = &cli.Command{
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type StoreHandle struct {
|
||||||
|
bs blockstore.Blockstore
|
||||||
|
cs *store.ChainStore
|
||||||
|
sm *stmgr.StateManager
|
||||||
|
closer func()
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadChainStore(ctx context.Context, repoPath string) (*StoreHandle, error) {
|
||||||
|
r, err := repo.NewFS(repoPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("opening fs repo: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
exists, err := r.Exists()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if !exists {
|
||||||
|
return nil, xerrors.Errorf("lotus repo doesn't exist")
|
||||||
|
}
|
||||||
|
|
||||||
|
lr, err := r.Lock(repo.FullNode)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to open blockstore: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
closer := func() {
|
||||||
|
if err := lr.Close(); err != nil {
|
||||||
|
log.Warnf("failed to close locked repo: %s", err)
|
||||||
|
}
|
||||||
|
if c, ok := bs.(io.Closer); ok {
|
||||||
|
if err := c.Close(); err != nil {
|
||||||
|
log.Warnf("failed to close blockstore: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mds, err := lr.Datastore(context.Background(), "/metadata")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
cs := store.NewChainStore(bs, bs, mds, nil, nil)
|
||||||
|
if err := cs.Load(ctx); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to load chain store: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tsExec := consensus.NewTipSetExecutor(filcns.RewardFunc)
|
||||||
|
sm, err := stmgr.NewStateManager(cs, tsExec, vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, index.DummyMsgIndex)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to open state manager: %w", err)
|
||||||
|
}
|
||||||
|
handle := StoreHandle{
|
||||||
|
bs: bs,
|
||||||
|
sm: sm,
|
||||||
|
cs: cs,
|
||||||
|
closer: closer,
|
||||||
|
}
|
||||||
|
|
||||||
|
return &handle, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func pipeline(ctx context.Context, name string, numWorkers int, createJobs func(ctx context.Context, jobCh chan job, resultCh chan result) error,
|
||||||
|
worker func(ctx context.Context, id int, jobCh chan job, resultCh chan result) error,
|
||||||
|
processResults func(ctx context.Context, resultCh chan result) error) error {
|
||||||
|
|
||||||
|
eg, egctx := errgroup.WithContext(ctx)
|
||||||
|
jobCh := make(chan job, numWorkers)
|
||||||
|
resultCh := make(chan result)
|
||||||
|
var resultWriterWg sync.WaitGroup
|
||||||
|
|
||||||
|
resultWriterWg.Add(1)
|
||||||
|
eg.Go(func() error {
|
||||||
|
defer resultWriterWg.Done()
|
||||||
|
defer func() {
|
||||||
|
close(jobCh)
|
||||||
|
}()
|
||||||
|
return createJobs(ctx, jobCh, resultCh)
|
||||||
|
})
|
||||||
|
|
||||||
|
var id int
|
||||||
|
for w := 0; w < numWorkers; w++ {
|
||||||
|
id = w
|
||||||
|
|
||||||
|
resultWriterWg.Add(1)
|
||||||
|
eg.Go(func() error {
|
||||||
|
defer resultWriterWg.Done()
|
||||||
|
return worker(egctx, id, jobCh, resultCh)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
eg.Go(func() error {
|
||||||
|
return processResults(ctx, resultCh)
|
||||||
|
})
|
||||||
|
|
||||||
|
// close result channel when workers are done sending to it.
|
||||||
|
eg.Go(func() error {
|
||||||
|
resultWriterWg.Wait()
|
||||||
|
close(resultCh)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if err := eg.Wait(); err != nil {
|
||||||
|
return fmt.Errorf("failed pipeline %s: %w", name, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var statSnapshotCmd = &cli.Command{
|
||||||
|
Name: "stat-snapshot",
|
||||||
|
Usage: "calculates the space usage of a snapshot taken from the given tipset",
|
||||||
|
Description: `Walk the chain back to lightweight snapshot size and break down space usage into high level
|
||||||
|
categories: headers, messages, receipts, latest state root, and churn from earlier state roots.
|
||||||
|
State root and churn space is further broken down by actor type and immediate top level fields
|
||||||
|
`,
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "tipset",
|
||||||
|
Usage: "specify tipset to call method on (pass comma separated array of cids)",
|
||||||
|
},
|
||||||
|
&cli.IntFlag{
|
||||||
|
Name: "workers",
|
||||||
|
Usage: "number of workers to use when processing",
|
||||||
|
Value: 10,
|
||||||
|
},
|
||||||
|
&cli.IntFlag{
|
||||||
|
Name: "dag-cache-size",
|
||||||
|
Usage: "cache size per worker (setting to 0 disables)",
|
||||||
|
Value: 8092,
|
||||||
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "pretty",
|
||||||
|
Usage: "print formated output instead of ldjson",
|
||||||
|
Value: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
h, err := loadChainStore(ctx, cctx.String("repo"))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer h.closer()
|
||||||
|
tsr := &ChainStoreTipSetResolver{
|
||||||
|
Chain: h.cs,
|
||||||
|
}
|
||||||
|
|
||||||
|
ts, err := lcli.LoadTipSet(ctx, cctx, tsr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
numWorkers := cctx.Int("workers")
|
||||||
|
dagCacheSize := cctx.Int("dag-cache-size")
|
||||||
|
|
||||||
|
cidCh := make(chan cidCall, numWorkers)
|
||||||
|
summary := make(map[string]api.ObjStat)
|
||||||
|
// snapshot root objects with no additional bytes or links
|
||||||
|
summary["/"] = api.ObjStat{Size: 0, Links: 0}
|
||||||
|
summary["/statetree"] = api.ObjStat{Size: 0, Links: 0}
|
||||||
|
|
||||||
|
combine := func(statsA, statsB api.ObjStat) api.ObjStat {
|
||||||
|
return api.ObjStat{
|
||||||
|
Size: statsA.Size + statsB.Size,
|
||||||
|
Links: statsA.Links + statsB.Links,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Threadsafe cid set lives across different pipelines so not part of error group
|
||||||
|
go func() {
|
||||||
|
seen := cid.NewSet()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case call := <-cidCh:
|
||||||
|
call.resp <- seen.Visit(call.c)
|
||||||
|
case <-ctx.Done():
|
||||||
|
log.Infof("shutting down cid set goroutine: %s", ctx.Err())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
visit := func(c cid.Cid) bool {
|
||||||
|
ch := make(chan bool)
|
||||||
|
cidCh <- cidCall{c: c, resp: ch}
|
||||||
|
out := <-ch
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
// Stage 1 walk all actors in latest state root
|
||||||
|
createJobsStage1 := func(ctx context.Context, jobCh chan job, _ chan result) error {
|
||||||
|
st, err := h.sm.StateTree(ts.ParentState())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return st.ForEach(func(_ address.Address, act *types.Actor) error {
|
||||||
|
actType := builtin.ActorNameByCode(act.Code)
|
||||||
|
actType = path.Base(actType) // strip away fil/<nv>
|
||||||
|
if actType == "<unknown>" {
|
||||||
|
actType = act.Code.String()
|
||||||
|
}
|
||||||
|
jobCh <- job{c: act.Head, key: fmt.Sprintf("/statetree/latest/%s", actType)}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
worker := func(ctx context.Context, id int, jobCh chan job, resultCh chan result) error {
|
||||||
|
var dag format.NodeGetter = merkledag.NewDAGService(blockservice.New(h.bs, offline.Exchange(h.bs)))
|
||||||
|
if dagCacheSize != 0 {
|
||||||
|
var err error
|
||||||
|
dag, err = newCacheNodeGetter(merkledag.NewDAGService(blockservice.New(h.bs, offline.Exchange(h.bs))), dagCacheSize)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for job := range jobCh {
|
||||||
|
stats, err := collectSnapshotJobStats(ctx, job, dag, visit)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, stat := range stats {
|
||||||
|
select {
|
||||||
|
case resultCh <- stat:
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
processResults := func(ctx context.Context, resultCh chan result) error {
|
||||||
|
for result := range resultCh {
|
||||||
|
if stat, ok := summary[result.key]; ok {
|
||||||
|
summary[result.key] = combine(stat, result.stats)
|
||||||
|
|
||||||
|
} else {
|
||||||
|
summary[result.key] = result.stats
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := pipeline(ctx, "Latest State Actors", numWorkers, createJobsStage1, worker, processResults); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stage 2: walk the top of the latest state root
|
||||||
|
createJobsStage2 := func(ctx context.Context, jobCh chan job, _ chan result) error {
|
||||||
|
jobCh <- job{c: ts.ParentState(), key: "/statetree/latest"}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := pipeline(ctx, "Latest State HAMT", numWorkers, createJobsStage2, worker, processResults); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stage 3 walk the rest of the chain: headers, messages, churn
|
||||||
|
// ordering:
|
||||||
|
// for each header send jobs for messages, receipts, state tree churn
|
||||||
|
// don't walk header directly as it would just walk everything including parent tipsets
|
||||||
|
|
||||||
|
churnStateRoots := cid.NewSet()
|
||||||
|
createJobsStage3 := func(ctx context.Context, jobCh chan job, resultCh chan result) error {
|
||||||
|
// walk chain
|
||||||
|
var dag format.NodeGetter = merkledag.NewDAGService(blockservice.New(h.bs, offline.Exchange(h.bs)))
|
||||||
|
if dagCacheSize != 0 {
|
||||||
|
var err error
|
||||||
|
dag, err = newCacheNodeGetter(merkledag.NewDAGService(blockservice.New(h.bs, offline.Exchange(h.bs))), dagCacheSize)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
blocksToWalk := ts.Cids()
|
||||||
|
startHeight := ts.Height()
|
||||||
|
snapshotStateLimit := abi.ChainEpoch(2000)
|
||||||
|
|
||||||
|
churnActorCache := cid.NewSet()
|
||||||
|
blocksTracked := cid.NewSet()
|
||||||
|
for len(blocksToWalk) > 0 {
|
||||||
|
blkCid := blocksToWalk[0]
|
||||||
|
blocksToWalk = blocksToWalk[1:]
|
||||||
|
nd, err := dag.Get(ctx, blkCid)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting block: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var b types.BlockHeader
|
||||||
|
if err := b.UnmarshalCBOR(bytes.NewBuffer(nd.RawData())); err != nil {
|
||||||
|
return xerrors.Errorf("unmarshaling block header (cid=%s): %w", blkCid, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// header directly to result channel
|
||||||
|
resultCh <- result{key: "/headers", stats: api.ObjStat{Size: uint64(len(nd.RawData())), Links: uint64(len(nd.Links()))}}
|
||||||
|
// message job
|
||||||
|
if b.Height > startHeight-snapshotStateLimit {
|
||||||
|
jobCh <- job{key: "/messages", c: b.Messages}
|
||||||
|
}
|
||||||
|
|
||||||
|
// state churn job
|
||||||
|
if b.Height > startHeight-snapshotStateLimit {
|
||||||
|
if churnStateRoots.Visit(b.ParentStateRoot) {
|
||||||
|
st, err := h.sm.StateTree(b.ParentStateRoot)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = st.ForEach(func(_ address.Address, act *types.Actor) error {
|
||||||
|
if churnActorCache.Visit(act.Head) {
|
||||||
|
actType := builtin.ActorNameByCode(act.Code)
|
||||||
|
actType = path.Base(actType) // strip away fil/<nv>
|
||||||
|
if actType == "<unknown>" {
|
||||||
|
actType = act.Code.String()
|
||||||
|
}
|
||||||
|
jobCh <- job{c: act.Head, key: fmt.Sprintf("/statetree/churn/%s", actType)}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, blkCid := range b.Parents {
|
||||||
|
if blocksTracked.Visit(blkCid) && b.Height != 0 {
|
||||||
|
blocksToWalk = append(blocksToWalk, blkCid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := pipeline(ctx, "Churn, Headers, Messages", numWorkers, createJobsStage3, worker, processResults); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// step 1 clean things up and get a nice abstraction to reuse
|
||||||
|
// Stage 4 walk all actor HAMTs for churn
|
||||||
|
|
||||||
|
createJobsStage4 := func(ctx context.Context, jobCh chan job, _ chan result) error {
|
||||||
|
return churnStateRoots.ForEach(func(c cid.Cid) error {
|
||||||
|
jobCh <- job{c: c, key: "/statetree/churn"}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := pipeline(ctx, "Churn HAMT", numWorkers, createJobsStage4, worker, processResults); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if cctx.Bool("pretty") {
|
||||||
|
DumpSnapshotStats(summary)
|
||||||
|
} else {
|
||||||
|
if err := DumpJSON(summary); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
var statActorCmd = &cli.Command{
|
var statActorCmd = &cli.Command{
|
||||||
Name: "stat-actor",
|
Name: "stat-actor",
|
||||||
Usage: "calculates the size of actors and their immeidate structures",
|
Usage: "calculates the size of actors and their immeidate structures",
|
||||||
@ -265,57 +629,14 @@ to reduce the number of decode operations performed by caching the decoded objec
|
|||||||
addrs = append(addrs, addr)
|
addrs = append(addrs, addr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
h, err := loadChainStore(ctx, cctx.String("repo"))
|
||||||
r, err := repo.NewFS(cctx.String("repo"))
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("opening fs repo: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
exists, err := r.Exists()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !exists {
|
|
||||||
return xerrors.Errorf("lotus repo doesn't exist")
|
|
||||||
}
|
|
||||||
|
|
||||||
lr, err := r.Lock(repo.FullNode)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer lr.Close() //nolint:errcheck
|
|
||||||
|
|
||||||
bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to open blockstore: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if c, ok := bs.(io.Closer); ok {
|
|
||||||
if err := c.Close(); err != nil {
|
|
||||||
log.Warnf("failed to close blockstore: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
mds, err := lr.Datastore(context.Background(), "/metadata")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
cs := store.NewChainStore(bs, bs, mds, nil, nil)
|
|
||||||
if err := cs.Load(ctx); err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
tsExec := consensus.NewTipSetExecutor(filcns.RewardFunc)
|
|
||||||
sm, err := stmgr.NewStateManager(cs, tsExec, vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, index.DummyMsgIndex)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
defer h.closer()
|
||||||
|
|
||||||
tsr := &ChainStoreTipSetResolver{
|
tsr := &ChainStoreTipSetResolver{
|
||||||
Chain: cs,
|
Chain: h.cs,
|
||||||
}
|
}
|
||||||
|
|
||||||
ts, err := lcli.LoadTipSet(ctx, cctx, tsr)
|
ts, err := lcli.LoadTipSet(ctx, cctx, tsr)
|
||||||
@ -327,7 +648,7 @@ to reduce the number of decode operations performed by caching the decoded objec
|
|||||||
|
|
||||||
if len(addrs) == 0 && cctx.Bool("all") {
|
if len(addrs) == 0 && cctx.Bool("all") {
|
||||||
var err error
|
var err error
|
||||||
addrs, err = sm.ListAllActors(ctx, ts)
|
addrs, err = h.sm.ListAllActors(ctx, ts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -354,15 +675,15 @@ to reduce the number of decode operations performed by caching the decoded objec
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
actor, err := sm.LoadActor(ctx, addr, ts)
|
actor, err := h.sm.LoadActor(ctx, addr, ts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var dag format.NodeGetter = merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs)))
|
var dag format.NodeGetter = merkledag.NewDAGService(blockservice.New(h.bs, offline.Exchange(h.bs)))
|
||||||
if dagCacheSize != 0 {
|
if dagCacheSize != 0 {
|
||||||
var err error
|
var err error
|
||||||
dag, err = newCacheNodeGetter(merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs))), dagCacheSize)
|
dag, err = newCacheNodeGetter(merkledag.NewDAGService(blockservice.New(h.bs, offline.Exchange(h.bs))), dagCacheSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -427,6 +748,93 @@ to reduce the number of decode operations performed by caching the decoded objec
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func collectSnapshotJobStats(ctx context.Context, in job, dag format.NodeGetter, visit func(c cid.Cid) bool) ([]result, error) {
|
||||||
|
// "state" and "churn" attempt further breakdown by actor type
|
||||||
|
if !(path.Dir(in.key) == "/statetree/latest") && !(path.Dir(in.key) == "/statetree/churn") {
|
||||||
|
dsc := &dagStatCollector{
|
||||||
|
ds: dag,
|
||||||
|
walk: carWalkFunc,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := merkledag.Walk(ctx, dsc.walkLinks, in.c, visit, merkledag.Concurrent()); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return []result{{key: in.key, stats: dsc.stats}}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// in.c is an actor head cid, try to unmarshal and create sub keys for different regions of state
|
||||||
|
nd, err := dag.Get(ctx, in.c)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
subjobs := make([]job, 0)
|
||||||
|
results := make([]result, 0)
|
||||||
|
|
||||||
|
// reconstruct actor for state parsing from key
|
||||||
|
av, err := gstactors.VersionForNetwork(network.Version20)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get actors version for network: %w", err)
|
||||||
|
}
|
||||||
|
code, ok := actors.GetActorCodeID(av, path.Base(in.key))
|
||||||
|
if !ok { // try parsing key directly
|
||||||
|
code, err = cid.Parse(path.Base(in.key))
|
||||||
|
if err != nil {
|
||||||
|
log.Debugf("failing to parse actor string: %s", path.Base(in.key))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
actor := types.ActorV5{Head: in.c, Code: code}
|
||||||
|
oif, err := vm.DumpActorState(consensus.NewTipSetExecutor(filcns.RewardFunc).NewActorRegistry(), &actor, nd.RawData())
|
||||||
|
if err != nil {
|
||||||
|
oif = nil
|
||||||
|
}
|
||||||
|
// Account actors return nil from DumpActorState as they have no state
|
||||||
|
if oif != nil {
|
||||||
|
v := reflect.Indirect(reflect.ValueOf(oif))
|
||||||
|
for i := 0; i < v.NumField(); i++ {
|
||||||
|
varName := v.Type().Field(i).Name
|
||||||
|
varType := v.Type().Field(i).Type
|
||||||
|
varValue := v.Field(i).Interface()
|
||||||
|
|
||||||
|
if varType == reflect.TypeOf(cid.Cid{}) {
|
||||||
|
subjobs = append(subjobs, job{
|
||||||
|
key: fmt.Sprintf("%s/%s", in.key, varName),
|
||||||
|
c: varValue.(cid.Cid),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Walk subfields
|
||||||
|
for _, job := range subjobs {
|
||||||
|
dsc := &dagStatCollector{
|
||||||
|
ds: dag,
|
||||||
|
walk: carWalkFunc,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := merkledag.Walk(ctx, dsc.walkLinks, job.c, visit, merkledag.Concurrent()); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var res result
|
||||||
|
res.key = job.key
|
||||||
|
res.stats = dsc.stats
|
||||||
|
|
||||||
|
results = append(results, res)
|
||||||
|
}
|
||||||
|
|
||||||
|
// now walk the top level object of actor state
|
||||||
|
dsc := &dagStatCollector{
|
||||||
|
ds: dag,
|
||||||
|
walk: carWalkFunc,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := merkledag.Walk(ctx, dsc.walkLinks, in.c, visit, merkledag.Concurrent()); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
results = append(results, result{key: in.key, stats: dsc.stats})
|
||||||
|
return results, nil
|
||||||
|
}
|
||||||
|
|
||||||
func collectStats(ctx context.Context, addr address.Address, actor *types.Actor, dag format.NodeGetter) (actorStats, error) {
|
func collectStats(ctx context.Context, addr address.Address, actor *types.Actor, dag format.NodeGetter) (actorStats, error) {
|
||||||
log.Infow("actor", "addr", addr, "code", actor.Code, "name", builtin.ActorNameByCode(actor.Code))
|
log.Infow("actor", "addr", addr, "code", actor.Code, "name", builtin.ActorNameByCode(actor.Code))
|
||||||
|
|
||||||
@ -532,3 +940,19 @@ func DumpStats(actStats actorStats) {
|
|||||||
|
|
||||||
fmt.Println("--------------------------------------------------------------------------")
|
fmt.Println("--------------------------------------------------------------------------")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func DumpSnapshotStats(stats map[string]api.ObjStat) {
|
||||||
|
// sort keys so we get subkey locality
|
||||||
|
keys := make([]string, 0, len(stats))
|
||||||
|
for k := range stats {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
sort.Strings(keys)
|
||||||
|
|
||||||
|
fmt.Printf("%-*s%-*s%-*s\n", 32, "Path", 24, "Size", 24, "\"Blocks\"")
|
||||||
|
for _, k := range keys {
|
||||||
|
stat := stats[k]
|
||||||
|
sizeStr := units.BytesSize(float64(stat.Size))
|
||||||
|
fmt.Printf("%-*s%-*s%-*s%-*d\n", 32, k, 10, sizeStr, 14, fmt.Sprintf("(%d)", stat.Size), 24, stat.Links)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -197,7 +197,7 @@ var staterootStatCmd = &cli.Command{
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("%s\t%s\t%d\n", inf.Addr, string(cmh.Digest), inf.Stat.Size)
|
fmt.Printf("%s\t%x\t%d\n", inf.Addr, cmh.Digest, inf.Stat.Size)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
|
@ -8,12 +8,14 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/ipfs/go-datastore/namespace"
|
"github.com/ipfs/go-datastore/namespace"
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
|
"github.com/multiformats/go-multiaddr"
|
||||||
manet "github.com/multiformats/go-multiaddr/net"
|
manet "github.com/multiformats/go-multiaddr/net"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
"go.opencensus.io/stats/view"
|
"go.opencensus.io/stats/view"
|
||||||
@ -320,6 +322,29 @@ var runCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check DC-environment variable
|
||||||
|
sectorSizes := []string{"2KiB", "8MiB", "512MiB", "32GiB", "64GiB"}
|
||||||
|
resourcesType := reflect.TypeOf(storiface.Resources{})
|
||||||
|
|
||||||
|
for _, sectorSize := range sectorSizes {
|
||||||
|
for i := 0; i < resourcesType.NumField(); i++ {
|
||||||
|
field := resourcesType.Field(i)
|
||||||
|
envName := field.Tag.Get("envname")
|
||||||
|
if envName != "" {
|
||||||
|
// Check if DC_[SectorSize]_[ResourceRestriction] is set
|
||||||
|
envVar, ok := os.LookupEnv("DC_" + sectorSize + "_" + envName)
|
||||||
|
if ok {
|
||||||
|
// If it is set, convert it to DC_[ResourceRestriction]
|
||||||
|
err := os.Setenv("DC_"+envName, envVar)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Error setting environment variable: %v", err)
|
||||||
|
}
|
||||||
|
log.Warnf("Converted DC_%s_%s to DC_%s, because DC is a sector-size independent job", sectorSize, envName, envName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Connect to storage-miner
|
// Connect to storage-miner
|
||||||
ctx := lcli.ReqContext(cctx)
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
@ -530,9 +555,14 @@ var runCmd = &cli.Command{
|
|||||||
|
|
||||||
log.Info("Opening local storage; connecting to master")
|
log.Info("Opening local storage; connecting to master")
|
||||||
const unspecifiedAddress = "0.0.0.0"
|
const unspecifiedAddress = "0.0.0.0"
|
||||||
|
|
||||||
address := cctx.String("listen")
|
address := cctx.String("listen")
|
||||||
addressSlice := strings.Split(address, ":")
|
host, port, err := net.SplitHostPort(address)
|
||||||
if ip := net.ParseIP(addressSlice[0]); ip != nil {
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if ip := net.ParseIP(host); ip != nil {
|
||||||
if ip.String() == unspecifiedAddress {
|
if ip.String() == unspecifiedAddress {
|
||||||
timeout, err := time.ParseDuration(cctx.String("timeout"))
|
timeout, err := time.ParseDuration(cctx.String("timeout"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -542,11 +572,21 @@ var runCmd = &cli.Command{
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
address = rip + ":" + addressSlice[1]
|
host = rip
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
localStore, err := paths.NewLocal(ctx, lr, nodeApi, []string{"http://" + address + "/remote"})
|
var newAddress string
|
||||||
|
|
||||||
|
// Check if the IP address is IPv6
|
||||||
|
ip := net.ParseIP(host)
|
||||||
|
if ip.To4() == nil && ip.To16() != nil {
|
||||||
|
newAddress = "[" + host + "]:" + port
|
||||||
|
} else {
|
||||||
|
newAddress = host + ":" + port
|
||||||
|
}
|
||||||
|
|
||||||
|
localStore, err := paths.NewLocal(ctx, lr, nodeApi, []string{"http://" + newAddress + "/remote"})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -587,7 +627,7 @@ var runCmd = &cli.Command{
|
|||||||
Storage: lr,
|
Storage: lr,
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("Setting up control endpoint at " + address)
|
log.Info("Setting up control endpoint at " + newAddress)
|
||||||
|
|
||||||
timeout, err := time.ParseDuration(cctx.String("http-server-timeout"))
|
timeout, err := time.ParseDuration(cctx.String("http-server-timeout"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -612,13 +652,13 @@ var runCmd = &cli.Command{
|
|||||||
log.Warn("Graceful shutdown successful")
|
log.Warn("Graceful shutdown successful")
|
||||||
}()
|
}()
|
||||||
|
|
||||||
nl, err := net.Listen("tcp", address)
|
nl, err := net.Listen("tcp", newAddress)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
a, err := net.ResolveTCPAddr("tcp", address)
|
a, err := net.ResolveTCPAddr("tcp", newAddress)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("parsing address: %w", err)
|
return xerrors.Errorf("parsing address: %w", err)
|
||||||
}
|
}
|
||||||
@ -699,7 +739,7 @@ var runCmd = &cli.Command{
|
|||||||
|
|
||||||
select {
|
select {
|
||||||
case <-readyCh:
|
case <-readyCh:
|
||||||
if err := nodeApi.WorkerConnect(ctx, "http://"+address+"/rpc/v0"); err != nil {
|
if err := nodeApi.WorkerConnect(ctx, "http://"+newAddress+"/rpc/v0"); err != nil {
|
||||||
log.Errorf("Registering worker failed: %+v", err)
|
log.Errorf("Registering worker failed: %+v", err)
|
||||||
cancel()
|
cancel()
|
||||||
return
|
return
|
||||||
@ -740,21 +780,46 @@ func extractRoutableIP(timeout time.Duration) (string, error) {
|
|||||||
deprecatedMinerMultiAddrKey := "STORAGE_API_INFO"
|
deprecatedMinerMultiAddrKey := "STORAGE_API_INFO"
|
||||||
env, ok := os.LookupEnv(minerMultiAddrKey)
|
env, ok := os.LookupEnv(minerMultiAddrKey)
|
||||||
if !ok {
|
if !ok {
|
||||||
// TODO remove after deprecation period
|
|
||||||
_, ok = os.LookupEnv(deprecatedMinerMultiAddrKey)
|
_, ok = os.LookupEnv(deprecatedMinerMultiAddrKey)
|
||||||
if ok {
|
if ok {
|
||||||
log.Warnf("Using a deprecated env(%s) value, please use env(%s) instead.", deprecatedMinerMultiAddrKey, minerMultiAddrKey)
|
log.Warnf("Using a deprecated env(%s) value, please use env(%s) instead.", deprecatedMinerMultiAddrKey, minerMultiAddrKey)
|
||||||
}
|
}
|
||||||
return "", xerrors.New("MINER_API_INFO environment variable required to extract IP")
|
return "", xerrors.New("MINER_API_INFO environment variable required to extract IP")
|
||||||
}
|
}
|
||||||
minerAddr := strings.Split(env, "/")
|
|
||||||
conn, err := net.DialTimeout("tcp", minerAddr[2]+":"+minerAddr[4], timeout)
|
// Splitting the env to separate the JWT from the multiaddress
|
||||||
|
splitEnv := strings.SplitN(env, ":", 2)
|
||||||
|
if len(splitEnv) < 2 {
|
||||||
|
return "", xerrors.Errorf("invalid MINER_API_INFO format")
|
||||||
|
}
|
||||||
|
// Only take the multiaddress part
|
||||||
|
maddrStr := splitEnv[1]
|
||||||
|
|
||||||
|
maddr, err := multiaddr.NewMultiaddr(maddrStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
defer conn.Close() //nolint:errcheck
|
|
||||||
|
minerIP, _ := maddr.ValueForProtocol(multiaddr.P_IP6)
|
||||||
|
if minerIP == "" {
|
||||||
|
minerIP, _ = maddr.ValueForProtocol(multiaddr.P_IP4)
|
||||||
|
}
|
||||||
|
minerPort, _ := maddr.ValueForProtocol(multiaddr.P_TCP)
|
||||||
|
|
||||||
|
// Format the address appropriately
|
||||||
|
addressToDial := net.JoinHostPort(minerIP, minerPort)
|
||||||
|
|
||||||
|
conn, err := net.DialTimeout("tcp", addressToDial, timeout)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if cerr := conn.Close(); cerr != nil {
|
||||||
|
log.Errorf("Error closing connection: %v", cerr)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
localAddr := conn.LocalAddr().(*net.TCPAddr)
|
localAddr := conn.LocalAddr().(*net.TCPAddr)
|
||||||
|
return localAddr.IP.String(), nil
|
||||||
return strings.Split(localAddr.IP.String(), ":")[0], nil
|
|
||||||
}
|
}
|
||||||
|
@ -12,6 +12,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
"path/filepath"
|
||||||
"runtime/pprof"
|
"runtime/pprof"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
@ -32,6 +33,7 @@ import (
|
|||||||
|
|
||||||
lapi "github.com/filecoin-project/lotus/api"
|
lapi "github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
"github.com/filecoin-project/lotus/chain/beacon/drand"
|
||||||
"github.com/filecoin-project/lotus/chain/consensus"
|
"github.com/filecoin-project/lotus/chain/consensus"
|
||||||
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||||
"github.com/filecoin-project/lotus/chain/index"
|
"github.com/filecoin-project/lotus/chain/index"
|
||||||
@ -119,6 +121,10 @@ var DaemonCmd = &cli.Command{
|
|||||||
Name: "import-snapshot",
|
Name: "import-snapshot",
|
||||||
Usage: "import chain state from a given chain export file or url",
|
Usage: "import chain state from a given chain export file or url",
|
||||||
},
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "remove-existing-chain",
|
||||||
|
Usage: "remove existing chain and splitstore data on a snapshot-import",
|
||||||
|
},
|
||||||
&cli.BoolFlag{
|
&cli.BoolFlag{
|
||||||
Name: "halt-after-import",
|
Name: "halt-after-import",
|
||||||
Usage: "halt the process after importing chain from file",
|
Usage: "halt the process after importing chain from file",
|
||||||
@ -263,6 +269,26 @@ var DaemonCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if cctx.Bool("remove-existing-chain") {
|
||||||
|
lr, err := repo.NewFS(cctx.String("repo"))
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("error opening fs repo: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
exists, err := lr.Exists()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !exists {
|
||||||
|
return xerrors.Errorf("lotus repo doesn't exist")
|
||||||
|
}
|
||||||
|
|
||||||
|
err = removeExistingChain(cctx, lr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
chainfile := cctx.String("import-chain")
|
chainfile := cctx.String("import-chain")
|
||||||
snapshot := cctx.String("import-snapshot")
|
snapshot := cctx.String("import-snapshot")
|
||||||
if chainfile != "" || snapshot != "" {
|
if chainfile != "" || snapshot != "" {
|
||||||
@ -380,7 +406,6 @@ var DaemonCmd = &cli.Command{
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to start json-rpc endpoint: %s", err)
|
return fmt.Errorf("failed to start json-rpc endpoint: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Monitor for shutdown.
|
// Monitor for shutdown.
|
||||||
finishCh := node.MonitorShutdown(shutdownChan,
|
finishCh := node.MonitorShutdown(shutdownChan,
|
||||||
node.ShutdownHandler{Component: "rpc server", StopFunc: rpcStopper},
|
node.ShutdownHandler{Component: "rpc server", StopFunc: rpcStopper},
|
||||||
@ -536,13 +561,17 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool)
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: We need to supply the actual beacon after v14
|
|
||||||
stm, err := stmgr.NewStateManager(cst, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, index.DummyMsgIndex)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !snapshot {
|
if !snapshot {
|
||||||
|
shd, err := drand.BeaconScheduleFromDrandSchedule(build.DrandConfigSchedule(), gb.MinTimestamp(), nil)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to construct beacon schedule: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
stm, err := stmgr.NewStateManager(cst, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd, mds, index.DummyMsgIndex)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
log.Infof("validating imported chain...")
|
log.Infof("validating imported chain...")
|
||||||
if err := stm.ValidateChain(ctx, ts); err != nil {
|
if err := stm.ValidateChain(ctx, ts); err != nil {
|
||||||
return xerrors.Errorf("chain validation failed: %w", err)
|
return xerrors.Errorf("chain validation failed: %w", err)
|
||||||
@ -574,3 +603,59 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool)
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func removeExistingChain(cctx *cli.Context, lr repo.Repo) error {
|
||||||
|
lockedRepo, err := lr.Lock(repo.FullNode)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("error locking repo: %w", err)
|
||||||
|
}
|
||||||
|
// Ensure that lockedRepo is closed when this function exits
|
||||||
|
defer func() {
|
||||||
|
if closeErr := lockedRepo.Close(); closeErr != nil {
|
||||||
|
log.Errorf("Error closing the lockedRepo: %v", closeErr)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
cfg, err := lockedRepo.Config()
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("error getting config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fullNodeConfig, ok := cfg.(*config.FullNode)
|
||||||
|
if !ok {
|
||||||
|
return xerrors.Errorf("wrong config type: %T", cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
if fullNodeConfig.Chainstore.EnableSplitstore {
|
||||||
|
log.Info("removing splitstore directory...")
|
||||||
|
err = deleteSplitstoreDir(lockedRepo)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("error removing splitstore directory: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the base repo path
|
||||||
|
repoPath := lockedRepo.Path()
|
||||||
|
|
||||||
|
// Construct the path to the chain directory
|
||||||
|
chainPath := filepath.Join(repoPath, "datastore", "chain")
|
||||||
|
|
||||||
|
log.Info("removing chain directory:", chainPath)
|
||||||
|
|
||||||
|
err = os.RemoveAll(chainPath)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("error removing chain directory: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("chain and splitstore data have been removed")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func deleteSplitstoreDir(lr repo.LockedRepo) error {
|
||||||
|
path, err := lr.SplitstorePath()
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("error getting splitstore path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return os.RemoveAll(path)
|
||||||
|
}
|
||||||
|
@ -5467,7 +5467,8 @@ Response:
|
|||||||
"UpgradeLightningHeight": 10101,
|
"UpgradeLightningHeight": 10101,
|
||||||
"UpgradeThunderHeight": 10101,
|
"UpgradeThunderHeight": 10101,
|
||||||
"UpgradeWatermelonHeight": 10101
|
"UpgradeWatermelonHeight": 10101
|
||||||
}
|
},
|
||||||
|
"Eip155ChainID": 123
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -104,6 +104,8 @@
|
|||||||
* [EthSendRawTransaction](#EthSendRawTransaction)
|
* [EthSendRawTransaction](#EthSendRawTransaction)
|
||||||
* [EthSubscribe](#EthSubscribe)
|
* [EthSubscribe](#EthSubscribe)
|
||||||
* [EthSyncing](#EthSyncing)
|
* [EthSyncing](#EthSyncing)
|
||||||
|
* [EthTraceBlock](#EthTraceBlock)
|
||||||
|
* [EthTraceReplayBlockTransactions](#EthTraceReplayBlockTransactions)
|
||||||
* [EthUninstallFilter](#EthUninstallFilter)
|
* [EthUninstallFilter](#EthUninstallFilter)
|
||||||
* [EthUnsubscribe](#EthUnsubscribe)
|
* [EthUnsubscribe](#EthUnsubscribe)
|
||||||
* [Filecoin](#Filecoin)
|
* [Filecoin](#Filecoin)
|
||||||
@ -3083,6 +3085,99 @@ Inputs: `null`
|
|||||||
|
|
||||||
Response: `false`
|
Response: `false`
|
||||||
|
|
||||||
|
### EthTraceBlock
|
||||||
|
TraceAPI related methods
|
||||||
|
|
||||||
|
Returns traces created at given block
|
||||||
|
|
||||||
|
|
||||||
|
Perms: read
|
||||||
|
|
||||||
|
Inputs:
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
"string value"
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
Response:
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"action": {
|
||||||
|
"callType": "string value",
|
||||||
|
"from": "0x5cbeecf99d3fdb3f25e309cc264f240bb0664031",
|
||||||
|
"to": "0x5cbeecf99d3fdb3f25e309cc264f240bb0664031",
|
||||||
|
"gas": "0x5",
|
||||||
|
"input": "0x07",
|
||||||
|
"value": "0x0"
|
||||||
|
},
|
||||||
|
"result": {
|
||||||
|
"gasUsed": "0x5",
|
||||||
|
"output": "0x07"
|
||||||
|
},
|
||||||
|
"subtraces": 123,
|
||||||
|
"traceAddress": [
|
||||||
|
123
|
||||||
|
],
|
||||||
|
"Type": "string value",
|
||||||
|
"blockHash": "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e",
|
||||||
|
"blockNumber": 9,
|
||||||
|
"transactionHash": "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e",
|
||||||
|
"transactionPosition": 123
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
### EthTraceReplayBlockTransactions
|
||||||
|
Replays all transactions in a block returning the requested traces for each transaction
|
||||||
|
|
||||||
|
|
||||||
|
Perms: read
|
||||||
|
|
||||||
|
Inputs:
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
"string value",
|
||||||
|
[
|
||||||
|
"string value"
|
||||||
|
]
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
Response:
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"output": "0x07",
|
||||||
|
"stateDiff": "string value",
|
||||||
|
"trace": [
|
||||||
|
{
|
||||||
|
"action": {
|
||||||
|
"callType": "string value",
|
||||||
|
"from": "0x5cbeecf99d3fdb3f25e309cc264f240bb0664031",
|
||||||
|
"to": "0x5cbeecf99d3fdb3f25e309cc264f240bb0664031",
|
||||||
|
"gas": "0x5",
|
||||||
|
"input": "0x07",
|
||||||
|
"value": "0x0"
|
||||||
|
},
|
||||||
|
"result": {
|
||||||
|
"gasUsed": "0x5",
|
||||||
|
"output": "0x07"
|
||||||
|
},
|
||||||
|
"subtraces": 123,
|
||||||
|
"traceAddress": [
|
||||||
|
123
|
||||||
|
],
|
||||||
|
"Type": "string value"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"transactionHash": "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e",
|
||||||
|
"vmTrace": "string value"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
### EthUninstallFilter
|
### EthUninstallFilter
|
||||||
Uninstalls a filter with given id.
|
Uninstalls a filter with given id.
|
||||||
|
|
||||||
@ -6981,7 +7076,8 @@ Response:
|
|||||||
"UpgradeLightningHeight": 10101,
|
"UpgradeLightningHeight": 10101,
|
||||||
"UpgradeThunderHeight": 10101,
|
"UpgradeThunderHeight": 10101,
|
||||||
"UpgradeWatermelonHeight": 10101
|
"UpgradeWatermelonHeight": 10101
|
||||||
}
|
},
|
||||||
|
"Eip155ChainID": 123
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -7,7 +7,11 @@ USAGE:
|
|||||||
lotus-miner [global options] command [command options] [arguments...]
|
lotus-miner [global options] command [command options] [arguments...]
|
||||||
|
|
||||||
VERSION:
|
VERSION:
|
||||||
|
<<<<<<< HEAD
|
||||||
1.24.0
|
1.24.0
|
||||||
|
=======
|
||||||
|
1.25.0
|
||||||
|
>>>>>>> release/v1.25.0
|
||||||
|
|
||||||
COMMANDS:
|
COMMANDS:
|
||||||
init Initialize a lotus miner repo
|
init Initialize a lotus miner repo
|
||||||
|
@ -7,7 +7,11 @@ USAGE:
|
|||||||
lotus-worker [global options] command [command options] [arguments...]
|
lotus-worker [global options] command [command options] [arguments...]
|
||||||
|
|
||||||
VERSION:
|
VERSION:
|
||||||
|
<<<<<<< HEAD
|
||||||
1.24.0
|
1.24.0
|
||||||
|
=======
|
||||||
|
1.25.0
|
||||||
|
>>>>>>> release/v1.25.0
|
||||||
|
|
||||||
COMMANDS:
|
COMMANDS:
|
||||||
run Start lotus worker
|
run Start lotus worker
|
||||||
|
@ -7,7 +7,11 @@ USAGE:
|
|||||||
lotus [global options] command [command options] [arguments...]
|
lotus [global options] command [command options] [arguments...]
|
||||||
|
|
||||||
VERSION:
|
VERSION:
|
||||||
|
<<<<<<< HEAD
|
||||||
1.24.0
|
1.24.0
|
||||||
|
=======
|
||||||
|
1.25.0
|
||||||
|
>>>>>>> release/v1.25.0
|
||||||
|
|
||||||
COMMANDS:
|
COMMANDS:
|
||||||
daemon Start a lotus daemon process
|
daemon Start a lotus daemon process
|
||||||
@ -65,6 +69,7 @@ OPTIONS:
|
|||||||
--bootstrap (default: true)
|
--bootstrap (default: true)
|
||||||
--import-chain value on first run, load chain from given file or url and validate
|
--import-chain value on first run, load chain from given file or url and validate
|
||||||
--import-snapshot value import chain state from a given chain export file or url
|
--import-snapshot value import chain state from a given chain export file or url
|
||||||
|
--remove-existing-chain remove existing chain and splitstore data on a snapshot-import (default: false)
|
||||||
--halt-after-import halt the process after importing chain from file (default: false)
|
--halt-after-import halt the process after importing chain from file (default: false)
|
||||||
--lite start lotus in lite mode (default: false)
|
--lite start lotus in lite mode (default: false)
|
||||||
--pprof value specify name of file for writing cpu profile to
|
--pprof value specify name of file for writing cpu profile to
|
||||||
|
@ -2,28 +2,25 @@
|
|||||||
|
|
||||||
# Lotus X.Y.Z Release
|
# Lotus X.Y.Z Release
|
||||||
|
|
||||||
|
[//]: # (Open this issue as [WIP] Lotus vX.Y.Z)
|
||||||
## What will be in the release
|
[//]: # (Apply the `tpm` label to it, and pin the issue on GitHub)
|
||||||
|
|
||||||
|
|
||||||
## 🚢 Estimated shipping date
|
## 🚢 Estimated shipping date
|
||||||
|
|
||||||
<Date this release will ship on if everything goes to plan (week beginning...)>
|
<Date this release will ship on if everything goes to plan (week beginning...)>
|
||||||
|
|
||||||
## 🔦 Highlights
|
|
||||||
|
|
||||||
< See Changelog>
|
|
||||||
|
|
||||||
## ✅ Release Checklist
|
## ✅ Release Checklist
|
||||||
|
|
||||||
**Note for whomever is owning the release:** please capture notes as comments in this issue for anything you noticed that could be improved for future releases. There is a *Post Release* step below for incorporating changes back into the [RELEASE_ISSUE_TEMPLATE](https://github.com/filecoin-project/lotus/blob/master/documentation/misc/RELEASE_ISSUE_TEMPLATE.md), and this is easier done by collecting notes from along the way rather than just thinking about it at the end.
|
**Note for whoever is owning the release:** please capture notes as comments in this issue for anything you noticed that could be improved for future releases. There is a *Post Release* step below for incorporating changes back into the [RELEASE_ISSUE_TEMPLATE](https://github.com/filecoin-project/lotus/blob/master/documentation/misc/RELEASE_ISSUE_TEMPLATE.md), and this is easier done by collecting notes from along the way rather than just thinking about it at the end.
|
||||||
|
|
||||||
First steps:
|
First steps:
|
||||||
|
- [ ] FFI: Fork a new branch (`release/lotus-vX.Y.Z`) from the filecoin-ffi `master` branch
|
||||||
|
- [ ] FFI: Tag the head of `release/lotus-vX.Y.Z` as `vX.Y.Z-pre1`
|
||||||
|
- [ ] Open and land a PR in lotus `master` that updates the FFI dependency to `vX.Y.Z-pre1` as cut in the previous step
|
||||||
- [ ] Fork a new branch (`release/vX.Y.Z`) from `master` and make any further release related changes to this branch. If any "non-trivial" changes get added to the release, uncheck all the checkboxes and return to this stage.
|
- [ ] Fork a new branch (`release/vX.Y.Z`) from `master` and make any further release related changes to this branch. If any "non-trivial" changes get added to the release, uncheck all the checkboxes and return to this stage.
|
||||||
- [ ] Bump the version in `build/version.go` in the `master` branch to `vX.Y.(Z+1)-dev` (bump from feature release) or `vX.(Y+1).0-dev` (bump from mandatory release). Run make gen and make docsgen-cli before committing changes
|
- [ ] Bump the version in `build/version.go` in the `master` branch to `vX.Y.(Z+1)-dev` (bump from feature release) or `vX.(Y+1).0-dev` (bump from mandatory release). Run make gen and make docsgen-cli before committing changes
|
||||||
|
|
||||||
Prepping an RC:
|
**Prepping an RC**:
|
||||||
|
|
||||||
- [ ] version string in `build/version.go` has been updated (in the `release/vX.Y.Z` branch)
|
- [ ] version string in `build/version.go` has been updated (in the `release/vX.Y.Z` branch)
|
||||||
- [ ] run `make gen && make docsgen-cli`
|
- [ ] run `make gen && make docsgen-cli`
|
||||||
@ -32,7 +29,7 @@ Prepping an RC:
|
|||||||
- [ ] tag commit with `vX.Y.Z-rcN`
|
- [ ] tag commit with `vX.Y.Z-rcN`
|
||||||
- [ ] cut a pre-release [here](https://github.com/filecoin-project/lotus/releases/new?prerelease=true)
|
- [ ] cut a pre-release [here](https://github.com/filecoin-project/lotus/releases/new?prerelease=true)
|
||||||
|
|
||||||
Testing an RC:
|
**Testing an RC**:
|
||||||
|
|
||||||
- [ ] **Stage 0 - Automated Testing**
|
- [ ] **Stage 0 - Automated Testing**
|
||||||
- Automated Testing
|
- Automated Testing
|
||||||
@ -69,7 +66,7 @@ Testing an RC:
|
|||||||
- [ ] Update the [CHANGELOG.md](https://github.com/filecoin-project/lotus/blob/master/CHANGELOG.md) to the state that can be used as release note.
|
- [ ] Update the [CHANGELOG.md](https://github.com/filecoin-project/lotus/blob/master/CHANGELOG.md) to the state that can be used as release note.
|
||||||
- [ ] Invite the wider community through (link to the release issue)
|
- [ ] Invite the wider community through (link to the release issue)
|
||||||
|
|
||||||
- [ ] **Stage 4 - Stable Release**
|
**Stable Release**
|
||||||
- [ ] Final preparation
|
- [ ] Final preparation
|
||||||
- [ ] Verify that version string in [`version.go`](https://github.com/filecoin-project/lotus/blob/master/build/version.go) has been updated.
|
- [ ] Verify that version string in [`version.go`](https://github.com/filecoin-project/lotus/blob/master/build/version.go) has been updated.
|
||||||
- [ ] Verify that codegen is up to date (`make gen && make docsgen-cli`)
|
- [ ] Verify that codegen is up to date (`make gen && make docsgen-cli`)
|
||||||
@ -79,7 +76,7 @@ Testing an RC:
|
|||||||
- [ ] Cut the release [here](https://github.com/filecoin-project/lotus/releases/new?prerelease=false&target=releases).
|
- [ ] Cut the release [here](https://github.com/filecoin-project/lotus/releases/new?prerelease=false&target=releases).
|
||||||
|
|
||||||
|
|
||||||
- [ ] **Post-Release**
|
**Post-Release**
|
||||||
- [ ] Merge the `releases` branch back into `master`, ignoring the changes to `version.go` (keep the `-dev` version from master). Do NOT delete the `releases` branch when doing so!
|
- [ ] Merge the `releases` branch back into `master`, ignoring the changes to `version.go` (keep the `-dev` version from master). Do NOT delete the `releases` branch when doing so!
|
||||||
- [ ] Update [RELEASE_ISSUE_TEMPLATE.md](https://github.com/filecoin-project/lotus/blob/master/documentation/misc/RELEASE_ISSUE_TEMPLATE.md) with any improvements determined from this latest release iteration.
|
- [ ] Update [RELEASE_ISSUE_TEMPLATE.md](https://github.com/filecoin-project/lotus/blob/master/documentation/misc/RELEASE_ISSUE_TEMPLATE.md) with any improvements determined from this latest release iteration.
|
||||||
- [ ] Create an issue using [RELEASE_ISSUE_TEMPLATE.md](https://github.com/filecoin-project/lotus/blob/master/documentation/misc/RELEASE_ISSUE_TEMPLATE.md) for the _next_ release.
|
- [ ] Create an issue using [RELEASE_ISSUE_TEMPLATE.md](https://github.com/filecoin-project/lotus/blob/master/documentation/misc/RELEASE_ISSUE_TEMPLATE.md) for the _next_ release.
|
||||||
|
2
extern/filecoin-ffi
vendored
2
extern/filecoin-ffi
vendored
@ -1 +1 @@
|
|||||||
Subproject commit fa64b5537320dbdcf8456bb6ca9e82adb07b7747
|
Subproject commit 441fa8e61189dc32c2960c1f8d8ba56269f20366
|
@ -14,6 +14,7 @@ import (
|
|||||||
"github.com/filecoin-project/go-bitfield"
|
"github.com/filecoin-project/go-bitfield"
|
||||||
"github.com/filecoin-project/go-jsonrpc"
|
"github.com/filecoin-project/go-jsonrpc"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
||||||
"github.com/filecoin-project/go-state-types/dline"
|
"github.com/filecoin-project/go-state-types/dline"
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
|
|
||||||
@ -77,6 +78,11 @@ type TargetAPI interface {
|
|||||||
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error)
|
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error)
|
||||||
StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error)
|
StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error)
|
||||||
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
|
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
|
||||||
|
StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error)
|
||||||
|
StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error)
|
||||||
|
StateGetAllocations(ctx context.Context, clientAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error)
|
||||||
|
StateGetClaim(ctx context.Context, providerAddr address.Address, claimId verifregtypes.ClaimId, tsk types.TipSetKey) (*verifregtypes.Claim, error)
|
||||||
|
StateGetClaims(ctx context.Context, providerAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error)
|
||||||
StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
|
StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
|
||||||
StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error)
|
StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error)
|
||||||
StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error)
|
StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error)
|
||||||
@ -138,6 +144,8 @@ type TargetAPI interface {
|
|||||||
EthSubscribe(ctx context.Context, params jsonrpc.RawParams) (ethtypes.EthSubscriptionID, error)
|
EthSubscribe(ctx context.Context, params jsonrpc.RawParams) (ethtypes.EthSubscriptionID, error)
|
||||||
EthUnsubscribe(ctx context.Context, id ethtypes.EthSubscriptionID) (bool, error)
|
EthUnsubscribe(ctx context.Context, id ethtypes.EthSubscriptionID) (bool, error)
|
||||||
Web3ClientVersion(ctx context.Context) (string, error)
|
Web3ClientVersion(ctx context.Context) (string, error)
|
||||||
|
EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error)
|
||||||
|
EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ TargetAPI = *new(api.FullNode) // gateway depends on latest
|
var _ TargetAPI = *new(api.FullNode) // gateway depends on latest
|
||||||
|
@ -16,18 +16,11 @@ import (
|
|||||||
"github.com/filecoin-project/go-state-types/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/chain/events/filter"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/chain/types/ethtypes"
|
"github.com/filecoin-project/lotus/chain/types/ethtypes"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (gw *Node) Web3ClientVersion(ctx context.Context) (string, error) {
|
|
||||||
if err := gw.limit(ctx, basicRateLimitTokens); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return gw.target.Web3ClientVersion(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (gw *Node) EthAccounts(ctx context.Context) ([]ethtypes.EthAddress, error) {
|
func (gw *Node) EthAccounts(ctx context.Context) ([]ethtypes.EthAddress, error) {
|
||||||
// gateway provides public API, so it can't hold user accounts
|
// gateway provides public API, so it can't hold user accounts
|
||||||
return []ethtypes.EthAddress{}, nil
|
return []ethtypes.EthAddress{}, nil
|
||||||
@ -427,7 +420,7 @@ func (gw *Node) EthGetFilterChanges(ctx context.Context, id ethtypes.EthFilterID
|
|||||||
ft.lk.Unlock()
|
ft.lk.Unlock()
|
||||||
|
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, nil
|
return nil, filter.ErrFilterNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
return gw.target.EthGetFilterChanges(ctx, id)
|
return gw.target.EthGetFilterChanges(ctx, id)
|
||||||
@ -581,6 +574,38 @@ func (gw *Node) EthUnsubscribe(ctx context.Context, id ethtypes.EthSubscriptionI
|
|||||||
return ok, nil
|
return ok, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (gw *Node) Web3ClientVersion(ctx context.Context) (string, error) {
|
||||||
|
if err := gw.limit(ctx, basicRateLimitTokens); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return gw.target.Web3ClientVersion(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gw *Node) EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error) {
|
||||||
|
if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := gw.checkBlkParam(ctx, blkNum, 0); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return gw.target.EthTraceBlock(ctx, blkNum)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gw *Node) EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) {
|
||||||
|
if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := gw.checkBlkParam(ctx, blkNum, 0); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return gw.target.EthTraceReplayBlockTransactions(ctx, blkNum, traceTypes)
|
||||||
|
}
|
||||||
|
|
||||||
var EthMaxFiltersPerConn = 16 // todo make this configurable
|
var EthMaxFiltersPerConn = 16 // todo make this configurable
|
||||||
|
|
||||||
func addUserFilterLimited(ctx context.Context, cb func() (ethtypes.EthFilterID, error)) (ethtypes.EthFilterID, error) {
|
func addUserFilterLimited(ctx context.Context, cb func() (ethtypes.EthFilterID, error)) (ethtypes.EthFilterID, error) {
|
||||||
|
@ -10,6 +10,7 @@ import (
|
|||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-bitfield"
|
"github.com/filecoin-project/go-bitfield"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
||||||
"github.com/filecoin-project/go-state-types/crypto"
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
"github.com/filecoin-project/go-state-types/dline"
|
"github.com/filecoin-project/go-state-types/dline"
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
@ -579,3 +580,53 @@ func (gw *Node) WalletBalance(ctx context.Context, k address.Address) (types.Big
|
|||||||
}
|
}
|
||||||
return gw.target.WalletBalance(ctx, k)
|
return gw.target.WalletBalance(ctx, k)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (gw *Node) StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error) {
|
||||||
|
if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := gw.checkTipsetKey(ctx, tsk); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return gw.target.StateGetAllocationForPendingDeal(ctx, dealId, tsk)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gw *Node) StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error) {
|
||||||
|
if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := gw.checkTipsetKey(ctx, tsk); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return gw.target.StateGetAllocation(ctx, clientAddr, allocationId, tsk)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gw *Node) StateGetAllocations(ctx context.Context, clientAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) {
|
||||||
|
if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := gw.checkTipsetKey(ctx, tsk); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return gw.target.StateGetAllocations(ctx, clientAddr, tsk)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gw *Node) StateGetClaim(ctx context.Context, providerAddr address.Address, claimId verifregtypes.ClaimId, tsk types.TipSetKey) (*verifregtypes.Claim, error) {
|
||||||
|
if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := gw.checkTipsetKey(ctx, tsk); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return gw.target.StateGetClaim(ctx, providerAddr, claimId, tsk)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gw *Node) StateGetClaims(ctx context.Context, providerAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) {
|
||||||
|
if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := gw.checkTipsetKey(ctx, tsk); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return gw.target.StateGetClaims(ctx, providerAddr, tsk)
|
||||||
|
}
|
||||||
|
7
go.mod
7
go.mod
@ -59,7 +59,7 @@ require (
|
|||||||
github.com/filecoin-project/specs-actors/v6 v6.0.2
|
github.com/filecoin-project/specs-actors/v6 v6.0.2
|
||||||
github.com/filecoin-project/specs-actors/v7 v7.0.1
|
github.com/filecoin-project/specs-actors/v7 v7.0.1
|
||||||
github.com/filecoin-project/specs-actors/v8 v8.0.1
|
github.com/filecoin-project/specs-actors/v8 v8.0.1
|
||||||
github.com/filecoin-project/test-vectors/schema v0.0.6-0.20230822140104-bed37e1ca04f
|
github.com/filecoin-project/test-vectors/schema v0.0.7
|
||||||
github.com/gbrlsnchs/jwt/v3 v3.0.1
|
github.com/gbrlsnchs/jwt/v3 v3.0.1
|
||||||
github.com/gdamore/tcell/v2 v2.2.0
|
github.com/gdamore/tcell/v2 v2.2.0
|
||||||
github.com/go-openapi/spec v0.19.11
|
github.com/go-openapi/spec v0.19.11
|
||||||
@ -90,6 +90,7 @@ require (
|
|||||||
github.com/ipfs/go-fs-lock v0.0.7
|
github.com/ipfs/go-fs-lock v0.0.7
|
||||||
github.com/ipfs/go-graphsync v0.14.6
|
github.com/ipfs/go-graphsync v0.14.6
|
||||||
github.com/ipfs/go-ipfs-blocksutil v0.0.1
|
github.com/ipfs/go-ipfs-blocksutil v0.0.1
|
||||||
|
github.com/ipfs/go-ipfs-exchange-offline v0.3.0
|
||||||
github.com/ipfs/go-ipld-cbor v0.0.6
|
github.com/ipfs/go-ipld-cbor v0.0.6
|
||||||
github.com/ipfs/go-ipld-format v0.5.0
|
github.com/ipfs/go-ipld-format v0.5.0
|
||||||
github.com/ipfs/go-log/v2 v2.5.1
|
github.com/ipfs/go-log/v2 v2.5.1
|
||||||
@ -124,6 +125,7 @@ require (
|
|||||||
github.com/multiformats/go-multiaddr v0.11.0
|
github.com/multiformats/go-multiaddr v0.11.0
|
||||||
github.com/multiformats/go-multiaddr-dns v0.3.1
|
github.com/multiformats/go-multiaddr-dns v0.3.1
|
||||||
github.com/multiformats/go-multibase v0.2.0
|
github.com/multiformats/go-multibase v0.2.0
|
||||||
|
github.com/multiformats/go-multicodec v0.9.0
|
||||||
github.com/multiformats/go-multihash v0.2.3
|
github.com/multiformats/go-multihash v0.2.3
|
||||||
github.com/multiformats/go-varint v0.0.7
|
github.com/multiformats/go-varint v0.0.7
|
||||||
github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333
|
github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333
|
||||||
@ -279,7 +281,6 @@ require (
|
|||||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||||
github.com/multiformats/go-base36 v0.2.0 // indirect
|
github.com/multiformats/go-base36 v0.2.0 // indirect
|
||||||
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
|
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
|
||||||
github.com/multiformats/go-multicodec v0.9.0 // indirect
|
|
||||||
github.com/multiformats/go-multistream v0.4.1 // indirect
|
github.com/multiformats/go-multistream v0.4.1 // indirect
|
||||||
github.com/nikkolasg/hexjson v0.1.0 // indirect
|
github.com/nikkolasg/hexjson v0.1.0 // indirect
|
||||||
github.com/nkovacs/streamquote v1.0.0 // indirect
|
github.com/nkovacs/streamquote v1.0.0 // indirect
|
||||||
@ -295,7 +296,7 @@ require (
|
|||||||
github.com/prometheus/procfs v0.9.0 // indirect
|
github.com/prometheus/procfs v0.9.0 // indirect
|
||||||
github.com/prometheus/statsd_exporter v0.22.7 // indirect
|
github.com/prometheus/statsd_exporter v0.22.7 // indirect
|
||||||
github.com/quic-go/qpack v0.4.0 // indirect
|
github.com/quic-go/qpack v0.4.0 // indirect
|
||||||
github.com/quic-go/qtls-go1-20 v0.3.2 // indirect
|
github.com/quic-go/qtls-go1-20 v0.3.3 // indirect
|
||||||
github.com/quic-go/quic-go v0.37.6 // indirect
|
github.com/quic-go/quic-go v0.37.6 // indirect
|
||||||
github.com/quic-go/webtransport-go v0.5.3 // indirect
|
github.com/quic-go/webtransport-go v0.5.3 // indirect
|
||||||
github.com/rivo/uniseg v0.1.0 // indirect
|
github.com/rivo/uniseg v0.1.0 // indirect
|
||||||
|
9
go.sum
9
go.sum
@ -370,8 +370,8 @@ github.com/filecoin-project/specs-actors/v7 v7.0.1 h1:w72xCxijK7xs1qzmJiw+WYJaVt
|
|||||||
github.com/filecoin-project/specs-actors/v7 v7.0.1/go.mod h1:tPLEYXoXhcpyLh69Ccq91SOuLXsPWjHiY27CzawjUEk=
|
github.com/filecoin-project/specs-actors/v7 v7.0.1/go.mod h1:tPLEYXoXhcpyLh69Ccq91SOuLXsPWjHiY27CzawjUEk=
|
||||||
github.com/filecoin-project/specs-actors/v8 v8.0.1 h1:4u0tIRJeT5G7F05lwLRIsDnsrN+bJ5Ixj6h49Q7uE2Y=
|
github.com/filecoin-project/specs-actors/v8 v8.0.1 h1:4u0tIRJeT5G7F05lwLRIsDnsrN+bJ5Ixj6h49Q7uE2Y=
|
||||||
github.com/filecoin-project/specs-actors/v8 v8.0.1/go.mod h1:UYIPg65iPWoFw5NEftREdJwv9b/5yaLKdCgTvNI/2FA=
|
github.com/filecoin-project/specs-actors/v8 v8.0.1/go.mod h1:UYIPg65iPWoFw5NEftREdJwv9b/5yaLKdCgTvNI/2FA=
|
||||||
github.com/filecoin-project/test-vectors/schema v0.0.6-0.20230822140104-bed37e1ca04f h1:Ho3kK/WetJ7wco2VhR/pOZ9HD/WWL1BDEzYRTFQK8dw=
|
github.com/filecoin-project/test-vectors/schema v0.0.7 h1:hhrcxLnQR2Oe6fjk63hZXG1fWQGyxgCVXOOlAlR/D9A=
|
||||||
github.com/filecoin-project/test-vectors/schema v0.0.6-0.20230822140104-bed37e1ca04f/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E=
|
github.com/filecoin-project/test-vectors/schema v0.0.7/go.mod h1:WqdmeJrz0V37wp7DucRR/bvrScZffqaCyIk9G0BGw1o=
|
||||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||||
github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ=
|
github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ=
|
||||||
github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
||||||
@ -746,6 +746,7 @@ github.com/ipfs/go-ipfs-exchange-interface v0.2.0/go.mod h1:z6+RhJuDQbqKguVyslSO
|
|||||||
github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0=
|
github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0=
|
||||||
github.com/ipfs/go-ipfs-exchange-offline v0.1.1/go.mod h1:vTiBRIbzSwDD0OWm+i3xeT0mO7jG2cbJYatp3HPk5XY=
|
github.com/ipfs/go-ipfs-exchange-offline v0.1.1/go.mod h1:vTiBRIbzSwDD0OWm+i3xeT0mO7jG2cbJYatp3HPk5XY=
|
||||||
github.com/ipfs/go-ipfs-exchange-offline v0.3.0 h1:c/Dg8GDPzixGd0MC8Jh6mjOwU57uYokgWRFidfvEkuA=
|
github.com/ipfs/go-ipfs-exchange-offline v0.3.0 h1:c/Dg8GDPzixGd0MC8Jh6mjOwU57uYokgWRFidfvEkuA=
|
||||||
|
github.com/ipfs/go-ipfs-exchange-offline v0.3.0/go.mod h1:MOdJ9DChbb5u37M1IcbrRB02e++Z7521fMxqCNRrz9s=
|
||||||
github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4=
|
github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4=
|
||||||
github.com/ipfs/go-ipfs-files v0.0.4/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4=
|
github.com/ipfs/go-ipfs-files v0.0.4/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4=
|
||||||
github.com/ipfs/go-ipfs-files v0.3.0 h1:fallckyc5PYjuMEitPNrjRfpwl7YFt69heCOUhsbGxQ=
|
github.com/ipfs/go-ipfs-files v0.3.0 h1:fallckyc5PYjuMEitPNrjRfpwl7YFt69heCOUhsbGxQ=
|
||||||
@ -1453,8 +1454,8 @@ github.com/puzpuzpuz/xsync/v2 v2.4.0 h1:5sXAMHrtx1bg9nbRZTOn8T4MkWe5V+o8yKRH02Ez
|
|||||||
github.com/puzpuzpuz/xsync/v2 v2.4.0/go.mod h1:gD2H2krq/w52MfPLE+Uy64TzJDVY7lP2znR9qmR35kU=
|
github.com/puzpuzpuz/xsync/v2 v2.4.0/go.mod h1:gD2H2krq/w52MfPLE+Uy64TzJDVY7lP2znR9qmR35kU=
|
||||||
github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo=
|
github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo=
|
||||||
github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A=
|
github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A=
|
||||||
github.com/quic-go/qtls-go1-20 v0.3.2 h1:rRgN3WfnKbyik4dBV8A6girlJVxGand/d+jVKbQq5GI=
|
github.com/quic-go/qtls-go1-20 v0.3.3 h1:17/glZSLI9P9fDAeyCHBFSWSqJcwx1byhLwP5eUIDCM=
|
||||||
github.com/quic-go/qtls-go1-20 v0.3.2/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k=
|
github.com/quic-go/qtls-go1-20 v0.3.3/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k=
|
||||||
github.com/quic-go/quic-go v0.37.6 h1:2IIUmQzT5YNxAiaPGjs++Z4hGOtIR0q79uS5qE9ccfY=
|
github.com/quic-go/quic-go v0.37.6 h1:2IIUmQzT5YNxAiaPGjs++Z4hGOtIR0q79uS5qE9ccfY=
|
||||||
github.com/quic-go/quic-go v0.37.6/go.mod h1:YsbH1r4mSHPJcLF4k4zruUkLBqctEMBDR6VPvcYjIsU=
|
github.com/quic-go/quic-go v0.37.6/go.mod h1:YsbH1r4mSHPJcLF4k4zruUkLBqctEMBDR6VPvcYjIsU=
|
||||||
github.com/quic-go/webtransport-go v0.5.3 h1:5XMlzemqB4qmOlgIus5zB45AcZ2kCgCy2EptUrfOPWU=
|
github.com/quic-go/webtransport-go v0.5.3 h1:5XMlzemqB4qmOlgIus5zB45AcZ2kCgCy2EptUrfOPWU=
|
||||||
|
@ -28,12 +28,17 @@ func TestAPI(t *testing.T) {
|
|||||||
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
ts := apiSuite{}
|
||||||
|
t.Run("testMiningReal", ts.testMiningReal)
|
||||||
|
ts.opts = append(ts.opts, kit.ThroughRPC())
|
||||||
|
t.Run("testMiningReal", ts.testMiningReal)
|
||||||
|
|
||||||
//stm: @CHAIN_STATE_MINER_INFO_001
|
//stm: @CHAIN_STATE_MINER_INFO_001
|
||||||
t.Run("direct", func(t *testing.T) {
|
t.Run("direct", func(t *testing.T) {
|
||||||
runAPITest(t)
|
runAPITest(t, kit.MockProofs())
|
||||||
})
|
})
|
||||||
t.Run("rpc", func(t *testing.T) {
|
t.Run("rpc", func(t *testing.T) {
|
||||||
runAPITest(t, kit.ThroughRPC())
|
runAPITest(t, kit.MockProofs(), kit.ThroughRPC())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -49,7 +54,6 @@ func runAPITest(t *testing.T, opts ...interface{}) {
|
|||||||
t.Run("id", ts.testID)
|
t.Run("id", ts.testID)
|
||||||
t.Run("testConnectTwo", ts.testConnectTwo)
|
t.Run("testConnectTwo", ts.testConnectTwo)
|
||||||
t.Run("testMining", ts.testMining)
|
t.Run("testMining", ts.testMining)
|
||||||
t.Run("testMiningReal", ts.testMiningReal)
|
|
||||||
t.Run("testSlowNotify", ts.testSlowNotify)
|
t.Run("testSlowNotify", ts.testSlowNotify)
|
||||||
t.Run("testSearchMsg", ts.testSearchMsg)
|
t.Run("testSearchMsg", ts.testSearchMsg)
|
||||||
t.Run("testOutOfGasError", ts.testOutOfGasError)
|
t.Run("testOutOfGasError", ts.testOutOfGasError)
|
||||||
|
@ -22,7 +22,7 @@ func TestDealsWithSealingAndRPC(t *testing.T) {
|
|||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(), kit.WithAllSubsystems()) // no mock proofs.
|
client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(), kit.WithAllSubsystems()) // no mock proofs.
|
||||||
ens.InterconnectAll().BeginMining(250 * time.Millisecond)
|
ens.InterconnectAll().BeginMiningMustPost(250 * time.Millisecond)
|
||||||
dh := kit.NewDealHarness(t, client, miner, miner)
|
dh := kit.NewDealHarness(t, client, miner, miner)
|
||||||
|
|
||||||
t.Run("stdretrieval", func(t *testing.T) {
|
t.Run("stdretrieval", func(t *testing.T) {
|
||||||
|
@ -325,13 +325,23 @@ func TestGetBlockByNumber(t *testing.T) {
|
|||||||
|
|
||||||
afterNullHeight := hc[0].Val.Height()
|
afterNullHeight := hc[0].Val.Height()
|
||||||
|
|
||||||
|
nullHeight := afterNullHeight - 1
|
||||||
|
for nullHeight > 0 {
|
||||||
|
ts, err := client.ChainGetTipSetByHeight(ctx, nullHeight, types.EmptyTSK)
|
||||||
|
require.NoError(t, err)
|
||||||
|
if ts.Height() == nullHeight {
|
||||||
|
nullHeight--
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Fail when trying to fetch a null round.
|
// Fail when trying to fetch a null round.
|
||||||
_, err = client.EthGetBlockByNumber(ctx, (ethtypes.EthUint64(afterNullHeight - 1)).Hex(), true)
|
_, err = client.EthGetBlockByNumber(ctx, (ethtypes.EthUint64(nullHeight)).Hex(), true)
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
|
|
||||||
// Fetch balance on a null round; should not fail and should return previous balance.
|
// Fetch balance on a null round; should not fail and should return previous balance.
|
||||||
// Should be lower than original balance.
|
bal, err := client.EthGetBalance(ctx, ethAddr, ethtypes.NewEthBlockNumberOrHashFromNumber(ethtypes.EthUint64(nullHeight)))
|
||||||
bal, err := client.EthGetBalance(ctx, ethAddr, ethtypes.NewEthBlockNumberOrHashFromNumber(ethtypes.EthUint64(afterNullHeight-1)))
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotEqual(t, big.Zero(), bal)
|
require.NotEqual(t, big.Zero(), bal)
|
||||||
require.Equal(t, types.FromFil(10).Int, bal.Int)
|
require.Equal(t, types.FromFil(10).Int, bal.Int)
|
||||||
|
@ -87,6 +87,15 @@ func NewDealHarness(t *testing.T, client *TestFullNode, main *TestMiner, market
|
|||||||
//
|
//
|
||||||
// TODO: convert input parameters to struct, and add size as an input param.
|
// TODO: convert input parameters to struct, and add size as an input param.
|
||||||
func (dh *DealHarness) MakeOnlineDeal(ctx context.Context, params MakeFullDealParams) (deal *cid.Cid, res *api.ImportRes, path string) {
|
func (dh *DealHarness) MakeOnlineDeal(ctx context.Context, params MakeFullDealParams) (deal *cid.Cid, res *api.ImportRes, path string) {
|
||||||
|
deal, res, path = dh.StartRandomDeal(ctx, params)
|
||||||
|
|
||||||
|
fmt.Printf("WAIT DEAL SEALEDS START\n")
|
||||||
|
dh.WaitDealSealed(ctx, deal, false, false, nil)
|
||||||
|
fmt.Printf("WAIT DEAL SEALEDS END\n")
|
||||||
|
return deal, res, path
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dh *DealHarness) StartRandomDeal(ctx context.Context, params MakeFullDealParams) (deal *cid.Cid, res *api.ImportRes, path string) {
|
||||||
if params.UseCARFileForStorageDeal {
|
if params.UseCARFileForStorageDeal {
|
||||||
res, _, path = dh.client.ClientImportCARFile(ctx, params.Rseed, 200)
|
res, _, path = dh.client.ClientImportCARFile(ctx, params.Rseed, 200)
|
||||||
} else {
|
} else {
|
||||||
@ -107,11 +116,6 @@ func (dh *DealHarness) MakeOnlineDeal(ctx context.Context, params MakeFullDealPa
|
|||||||
dp.FastRetrieval = params.FastRet
|
dp.FastRetrieval = params.FastRet
|
||||||
deal = dh.StartDeal(ctx, dp)
|
deal = dh.StartDeal(ctx, dp)
|
||||||
|
|
||||||
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
fmt.Printf("WAIT DEAL SEALEDS START\n")
|
|
||||||
dh.WaitDealSealed(ctx, deal, false, false, nil)
|
|
||||||
fmt.Printf("WAIT DEAL SEALEDS END\n")
|
|
||||||
return deal, res, path
|
return deal, res, path
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -169,6 +169,8 @@ func NewEnsemble(t *testing.T, opts ...EnsembleOpt) *Ensemble {
|
|||||||
require.NoError(t, build.UseNetworkBundle("testing"))
|
require.NoError(t, build.UseNetworkBundle("testing"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
build.EquivocationDelaySecs = 0
|
||||||
|
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -21,6 +21,7 @@ func QuietMiningLogs() {
|
|||||||
_ = logging.SetLogLevel("pubsub", "ERROR")
|
_ = logging.SetLogLevel("pubsub", "ERROR")
|
||||||
_ = logging.SetLogLevel("gen", "ERROR")
|
_ = logging.SetLogLevel("gen", "ERROR")
|
||||||
_ = logging.SetLogLevel("rpc", "ERROR")
|
_ = logging.SetLogLevel("rpc", "ERROR")
|
||||||
|
_ = logging.SetLogLevel("consensus-common", "ERROR")
|
||||||
_ = logging.SetLogLevel("dht/RtRefreshManager", "ERROR")
|
_ = logging.SetLogLevel("dht/RtRefreshManager", "ERROR")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -197,7 +197,7 @@ func OwnerAddr(wk *key.Key) NodeOpt {
|
|||||||
// the node.
|
// the node.
|
||||||
func ConstructorOpts(extra ...node.Option) NodeOpt {
|
func ConstructorOpts(extra ...node.Option) NodeOpt {
|
||||||
return func(opts *nodeOpts) error {
|
return func(opts *nodeOpts) error {
|
||||||
opts.extraNodeOpts = extra
|
opts.extraNodeOpts = append(opts.extraNodeOpts, extra...)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -290,6 +290,13 @@ func SplitstoreMessges() NodeOpt {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func SplitstoreDisable() NodeOpt {
|
||||||
|
return WithCfgOpt(func(cfg *config.FullNode) error {
|
||||||
|
cfg.Chainstore.EnableSplitstore = false
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func WithEthRPC() NodeOpt {
|
func WithEthRPC() NodeOpt {
|
||||||
return WithCfgOpt(func(cfg *config.FullNode) error {
|
return WithCfgOpt(func(cfg *config.FullNode) error {
|
||||||
cfg.Fevm.EnableEthRPC = true
|
cfg.Fevm.EnableEthRPC = true
|
||||||
|
@ -93,7 +93,7 @@ func testSearchMsgWithIndex(t *testing.T, makeMsgIndex func(cs *store.ChainStore
|
|||||||
// copy of apiSuite.testSearchMsgWith; needs to be copied or else CI is angry, tests are built individually there
|
// copy of apiSuite.testSearchMsgWith; needs to be copied or else CI is angry, tests are built individually there
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
full, _, ens := kit.EnsembleMinimal(t, kit.ConstructorOpts(node.Override(new(index.MsgIndex), makeMsgIndex)))
|
full, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ConstructorOpts(node.Override(new(index.MsgIndex), makeMsgIndex)))
|
||||||
|
|
||||||
senderAddr, err := full.WalletDefaultAddress(ctx)
|
senderAddr, err := full.WalletDefaultAddress(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -9,16 +9,20 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-state-types/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
"github.com/filecoin-project/go-state-types/builtin"
|
||||||
|
datacap2 "github.com/filecoin-project/go-state-types/builtin/v9/datacap"
|
||||||
verifregst "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
verifregst "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
|
|
||||||
lapi "github.com/filecoin-project/lotus/api"
|
lapi "github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/chain/actors"
|
"github.com/filecoin-project/lotus/chain/actors"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin/datacap"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
|
"github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/chain/wallet/key"
|
"github.com/filecoin-project/lotus/chain/wallet/key"
|
||||||
@ -225,36 +229,8 @@ func TestRemoveDataCap(t *testing.T) {
|
|||||||
|
|
||||||
// make the 2 verifiers
|
// make the 2 verifiers
|
||||||
|
|
||||||
makeVerifier := func(addr address.Address) error {
|
makeVerifier(ctx, t, api, rootAddr, verifier1Addr)
|
||||||
allowance := big.NewInt(100000000000)
|
makeVerifier(ctx, t, api, rootAddr, verifier2Addr)
|
||||||
params, aerr := actors.SerializeParams(&verifregst.AddVerifierParams{Address: addr, Allowance: allowance})
|
|
||||||
require.NoError(t, aerr)
|
|
||||||
|
|
||||||
msg := &types.Message{
|
|
||||||
From: rootAddr,
|
|
||||||
To: verifreg.Address,
|
|
||||||
Method: verifreg.Methods.AddVerifier,
|
|
||||||
Params: params,
|
|
||||||
Value: big.Zero(),
|
|
||||||
}
|
|
||||||
|
|
||||||
sm, err := api.MpoolPushMessage(ctx, msg, nil)
|
|
||||||
require.NoError(t, err, "AddVerifier failed")
|
|
||||||
|
|
||||||
//stm: @CHAIN_STATE_WAIT_MSG_001
|
|
||||||
res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.EqualValues(t, 0, res.Receipt.ExitCode)
|
|
||||||
|
|
||||||
verifierAllowance, err := api.StateVerifierStatus(ctx, addr, types.EmptyTSK)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, allowance, *verifierAllowance)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
require.NoError(t, makeVerifier(verifier1Addr))
|
|
||||||
require.NoError(t, makeVerifier(verifier2Addr))
|
|
||||||
|
|
||||||
// assign datacap to a client
|
// assign datacap to a client
|
||||||
datacap := big.NewInt(10000)
|
datacap := big.NewInt(10000)
|
||||||
@ -374,3 +350,156 @@ func TestRemoveDataCap(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Nil(t, dcap, "expected datacap to be nil")
|
require.Nil(t, dcap, "expected datacap to be nil")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestVerifiedClientCanCreateAllocation(t *testing.T) {
|
||||||
|
blockTime := 100 * time.Millisecond
|
||||||
|
|
||||||
|
rootKey, err := key.GenerateKey(types.KTSecp256k1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
verifier1Key, err := key.GenerateKey(types.KTSecp256k1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
verifiedClientKey, err := key.GenerateKey(types.KTBLS)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
bal, err := types.ParseFIL("100fil")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
node, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(),
|
||||||
|
kit.RootVerifier(rootKey, abi.NewTokenAmount(bal.Int64())),
|
||||||
|
kit.Account(verifier1Key, abi.NewTokenAmount(bal.Int64())),
|
||||||
|
kit.Account(verifiedClientKey, abi.NewTokenAmount(bal.Int64())),
|
||||||
|
)
|
||||||
|
|
||||||
|
ens.InterconnectAll().BeginMining(blockTime)
|
||||||
|
|
||||||
|
api := node.FullNode.(*impl.FullNodeAPI)
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// get VRH
|
||||||
|
vrh, err := api.StateVerifiedRegistryRootKey(ctx, types.TipSetKey{})
|
||||||
|
fmt.Println(vrh.String())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// import the root key.
|
||||||
|
rootAddr, err := api.WalletImport(ctx, &rootKey.KeyInfo)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// import the verifiers' keys.
|
||||||
|
verifier1Addr, err := api.WalletImport(ctx, &verifier1Key.KeyInfo)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// import the verified client's key.
|
||||||
|
verifiedClientAddr, err := api.WalletImport(ctx, &verifiedClientKey.KeyInfo)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// resolve all keys
|
||||||
|
|
||||||
|
// make the 2 verifiers
|
||||||
|
|
||||||
|
makeVerifier(ctx, t, api, rootAddr, verifier1Addr)
|
||||||
|
|
||||||
|
// assign datacap to a client
|
||||||
|
initialDatacap := big.NewInt(10000)
|
||||||
|
|
||||||
|
params, err := actors.SerializeParams(&verifregst.AddVerifiedClientParams{Address: verifiedClientAddr, Allowance: initialDatacap})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
msg := &types.Message{
|
||||||
|
From: verifier1Addr,
|
||||||
|
To: verifreg.Address,
|
||||||
|
Method: verifreg.Methods.AddVerifiedClient,
|
||||||
|
Params: params,
|
||||||
|
Value: big.Zero(),
|
||||||
|
}
|
||||||
|
|
||||||
|
sm, err := api.MpoolPushMessage(ctx, msg, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.EqualValues(t, 0, res.Receipt.ExitCode)
|
||||||
|
|
||||||
|
// check datacap balance
|
||||||
|
dcap, err := api.StateVerifiedClientStatus(ctx, verifiedClientAddr, types.EmptyTSK)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, *dcap, initialDatacap)
|
||||||
|
|
||||||
|
minerId, err := address.IDFromAddress(miner.ActorAddr)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
allocationRequest := verifregst.AllocationRequest{
|
||||||
|
Provider: abi.ActorID(minerId),
|
||||||
|
Data: cid.MustParse("bafkqaaa"),
|
||||||
|
Size: abi.PaddedPieceSize(initialDatacap.Uint64()),
|
||||||
|
TermMin: verifregst.MinimumVerifiedAllocationTerm,
|
||||||
|
TermMax: verifregst.MinimumVerifiedAllocationTerm,
|
||||||
|
Expiration: verifregst.MaximumVerifiedAllocationExpiration,
|
||||||
|
}
|
||||||
|
|
||||||
|
allocationRequests := verifregst.AllocationRequests{
|
||||||
|
Allocations: []verifregst.AllocationRequest{allocationRequest},
|
||||||
|
}
|
||||||
|
|
||||||
|
receiverParams, err := actors.SerializeParams(&allocationRequests)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
transferParams, err := actors.SerializeParams(&datacap2.TransferParams{
|
||||||
|
To: builtin.VerifiedRegistryActorAddr,
|
||||||
|
Amount: big.Mul(initialDatacap, builtin.TokenPrecision),
|
||||||
|
OperatorData: receiverParams,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
msg = &types.Message{
|
||||||
|
To: builtin.DatacapActorAddr,
|
||||||
|
From: verifiedClientAddr,
|
||||||
|
Method: datacap.Methods.TransferExported,
|
||||||
|
Params: transferParams,
|
||||||
|
Value: big.Zero(),
|
||||||
|
}
|
||||||
|
|
||||||
|
sm, err = api.MpoolPushMessage(ctx, msg, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
res, err = api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.EqualValues(t, 0, res.Receipt.ExitCode)
|
||||||
|
|
||||||
|
// check datacap balance
|
||||||
|
dcap, err = api.StateVerifiedClientStatus(ctx, verifiedClientAddr, types.EmptyTSK)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Nil(t, dcap)
|
||||||
|
|
||||||
|
allocations, err := api.StateGetAllocations(ctx, verifiedClientAddr, types.EmptyTSK)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(allocations))
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeVerifier(ctx context.Context, t *testing.T, api *impl.FullNodeAPI, rootAddr address.Address, addr address.Address) {
|
||||||
|
allowance := big.NewInt(100000000000)
|
||||||
|
params, aerr := actors.SerializeParams(&verifregst.AddVerifierParams{Address: addr, Allowance: allowance})
|
||||||
|
require.NoError(t, aerr)
|
||||||
|
|
||||||
|
msg := &types.Message{
|
||||||
|
From: rootAddr,
|
||||||
|
To: verifreg.Address,
|
||||||
|
Method: verifreg.Methods.AddVerifier,
|
||||||
|
Params: params,
|
||||||
|
Value: big.Zero(),
|
||||||
|
}
|
||||||
|
|
||||||
|
sm, err := api.MpoolPushMessage(ctx, msg, nil)
|
||||||
|
require.NoError(t, err, "AddVerifier failed")
|
||||||
|
|
||||||
|
res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.EqualValues(t, 0, res.Receipt.ExitCode)
|
||||||
|
|
||||||
|
verifierAllowance, err := api.StateVerifierStatus(ctx, addr, types.EmptyTSK)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, allowance, *verifierAllowance)
|
||||||
|
}
|
||||||
|
@ -730,3 +730,82 @@ waitForProof:
|
|||||||
require.NoError(t, params.UnmarshalCBOR(bytes.NewBuffer(slmsg.Params)))
|
require.NoError(t, params.UnmarshalCBOR(bytes.NewBuffer(slmsg.Params)))
|
||||||
require.Equal(t, abi.RegisteredPoStProof_StackedDrgWindow2KiBV1_1, params.Proofs[0].PoStProof)
|
require.Equal(t, abi.RegisteredPoStProof_StackedDrgWindow2KiBV1_1, params.Proofs[0].PoStProof)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestWorkerPledgeExpireCommit(t *testing.T) {
|
||||||
|
kit.QuietMiningLogs()
|
||||||
|
_ = logging.SetLogLevel("sectors", "debug")
|
||||||
|
|
||||||
|
var tasksNoC2 = kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTAddPiece, sealtasks.TTDataCid, sealtasks.TTPreCommit1, sealtasks.TTPreCommit2, sealtasks.TTCommit2,
|
||||||
|
sealtasks.TTUnseal, sealtasks.TTFetch, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFinalizeUnsealed})
|
||||||
|
|
||||||
|
fc := config.DefaultStorageMiner().Fees
|
||||||
|
fc.MaxCommitGasFee = types.FIL(abi.NewTokenAmount(10000)) // 10000 attofil, way too low for anything to land
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
client, miner, worker, ens := kit.EnsembleWorker(t, kit.WithAllSubsystems(), kit.ThroughRPC(), kit.WithNoLocalSealing(true),
|
||||||
|
kit.MutateSealingConfig(func(sc *config.SealingConfig) {
|
||||||
|
sc.AggregateCommits = true
|
||||||
|
}),
|
||||||
|
kit.ConstructorOpts(
|
||||||
|
node.Override(new(*sealing.Sealing), modules.SealingPipeline(fc)),
|
||||||
|
),
|
||||||
|
kit.SplitstoreDisable(), // disable splitstore because messages which take a long time may get dropped
|
||||||
|
tasksNoC2) // no mock proofs
|
||||||
|
|
||||||
|
ens.InterconnectAll().BeginMiningMustPost(2 * time.Millisecond)
|
||||||
|
|
||||||
|
e, err := worker.Enabled(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, e)
|
||||||
|
|
||||||
|
dh := kit.NewDealHarness(t, client, miner, miner)
|
||||||
|
|
||||||
|
startEpoch := abi.ChainEpoch(4 << 10)
|
||||||
|
|
||||||
|
dh.StartRandomDeal(ctx, kit.MakeFullDealParams{
|
||||||
|
Rseed: 7,
|
||||||
|
StartEpoch: startEpoch,
|
||||||
|
})
|
||||||
|
|
||||||
|
var sn abi.SectorNumber
|
||||||
|
|
||||||
|
require.Eventually(t, func() bool {
|
||||||
|
s, err := miner.SectorsListNonGenesis(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
if len(s) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if len(s) > 1 {
|
||||||
|
t.Fatalf("expected 1 sector, got %d", len(s))
|
||||||
|
}
|
||||||
|
sn = s[0]
|
||||||
|
return true
|
||||||
|
}, 30*time.Second, 1*time.Second)
|
||||||
|
|
||||||
|
t.Log("sector", sn)
|
||||||
|
|
||||||
|
t.Log("sector committing")
|
||||||
|
|
||||||
|
// wait until after startEpoch
|
||||||
|
client.WaitTillChain(ctx, kit.HeightAtLeast(startEpoch+20))
|
||||||
|
|
||||||
|
t.Log("after start")
|
||||||
|
|
||||||
|
sstate, err := miner.SectorsStatus(ctx, sn, false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, api.SectorState(sealing.SubmitCommitAggregate), sstate.State)
|
||||||
|
|
||||||
|
_, err = miner.SectorCommitFlush(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Eventually(t, func() bool {
|
||||||
|
sstate, err := miner.SectorsStatus(ctx, sn, false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
t.Logf("sector state: %s", sstate.State)
|
||||||
|
|
||||||
|
return sstate.State == api.SectorState(sealing.Removed)
|
||||||
|
}, 30*time.Second, 1*time.Second)
|
||||||
|
|
||||||
|
t.Log("sector removed")
|
||||||
|
}
|
||||||
|
@ -48,7 +48,6 @@ type Wrapper struct {
|
|||||||
dagst dagstore.Interface
|
dagst dagstore.Interface
|
||||||
minerAPI MinerAPI
|
minerAPI MinerAPI
|
||||||
failureCh chan dagstore.ShardResult
|
failureCh chan dagstore.ShardResult
|
||||||
traceCh chan dagstore.Trace
|
|
||||||
gcInterval time.Duration
|
gcInterval time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -64,9 +63,6 @@ func NewDAGStore(cfg config.DAGStoreConfig, minerApi MinerAPI, h host.Host) (*da
|
|||||||
// The dagstore will write Shard failures to the `failureCh` here.
|
// The dagstore will write Shard failures to the `failureCh` here.
|
||||||
failureCh := make(chan dagstore.ShardResult, 1)
|
failureCh := make(chan dagstore.ShardResult, 1)
|
||||||
|
|
||||||
// The dagstore will write Trace events to the `traceCh` here.
|
|
||||||
traceCh := make(chan dagstore.Trace, 32)
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
transientsDir = filepath.Join(cfg.RootDir, "transients")
|
transientsDir = filepath.Join(cfg.RootDir, "transients")
|
||||||
datastoreDir = filepath.Join(cfg.RootDir, "datastore")
|
datastoreDir = filepath.Join(cfg.RootDir, "datastore")
|
||||||
@ -90,7 +86,6 @@ func NewDAGStore(cfg config.DAGStoreConfig, minerApi MinerAPI, h host.Host) (*da
|
|||||||
Datastore: dstore,
|
Datastore: dstore,
|
||||||
MountRegistry: registry,
|
MountRegistry: registry,
|
||||||
FailureCh: failureCh,
|
FailureCh: failureCh,
|
||||||
TraceCh: traceCh,
|
|
||||||
TopLevelIndex: topIndex,
|
TopLevelIndex: topIndex,
|
||||||
// not limiting fetches globally, as the Lotus mount does
|
// not limiting fetches globally, as the Lotus mount does
|
||||||
// conditional throttling.
|
// conditional throttling.
|
||||||
@ -109,7 +104,6 @@ func NewDAGStore(cfg config.DAGStoreConfig, minerApi MinerAPI, h host.Host) (*da
|
|||||||
dagst: dagst,
|
dagst: dagst,
|
||||||
minerAPI: minerApi,
|
minerAPI: minerApi,
|
||||||
failureCh: failureCh,
|
failureCh: failureCh,
|
||||||
traceCh: traceCh,
|
|
||||||
gcInterval: time.Duration(cfg.GCInterval),
|
gcInterval: time.Duration(cfg.GCInterval),
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -146,10 +140,6 @@ func (w *Wrapper) Start(ctx context.Context) error {
|
|||||||
w.backgroundWg.Add(1)
|
w.backgroundWg.Add(1)
|
||||||
go w.gcLoop()
|
go w.gcLoop()
|
||||||
|
|
||||||
// run a go-routine to read the trace for debugging.
|
|
||||||
w.backgroundWg.Add(1)
|
|
||||||
go w.traceLoop()
|
|
||||||
|
|
||||||
// Run a go-routine for shard recovery
|
// Run a go-routine for shard recovery
|
||||||
if dss, ok := w.dagst.(*dagstore.DAGStore); ok {
|
if dss, ok := w.dagst.(*dagstore.DAGStore); ok {
|
||||||
w.backgroundWg.Add(1)
|
w.backgroundWg.Add(1)
|
||||||
@ -159,24 +149,6 @@ func (w *Wrapper) Start(ctx context.Context) error {
|
|||||||
return w.dagst.Start(ctx)
|
return w.dagst.Start(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *Wrapper) traceLoop() {
|
|
||||||
defer w.backgroundWg.Done()
|
|
||||||
|
|
||||||
for w.ctx.Err() == nil {
|
|
||||||
select {
|
|
||||||
// Log trace events from the DAG store
|
|
||||||
case tr := <-w.traceCh:
|
|
||||||
log.Debugw("trace",
|
|
||||||
"shard-key", tr.Key.String(),
|
|
||||||
"op-type", tr.Op.String(),
|
|
||||||
"after", tr.After.String())
|
|
||||||
|
|
||||||
case <-w.ctx.Done():
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Wrapper) gcLoop() {
|
func (w *Wrapper) gcLoop() {
|
||||||
defer w.backgroundWg.Done()
|
defer w.backgroundWg.Done()
|
||||||
|
|
||||||
|
@ -116,6 +116,7 @@ var (
|
|||||||
PubsubDeliverMessage = stats.Int64("pubsub/delivered", "Counter for total delivered messages", stats.UnitDimensionless)
|
PubsubDeliverMessage = stats.Int64("pubsub/delivered", "Counter for total delivered messages", stats.UnitDimensionless)
|
||||||
PubsubRejectMessage = stats.Int64("pubsub/rejected", "Counter for total rejected messages", stats.UnitDimensionless)
|
PubsubRejectMessage = stats.Int64("pubsub/rejected", "Counter for total rejected messages", stats.UnitDimensionless)
|
||||||
PubsubDuplicateMessage = stats.Int64("pubsub/duplicate", "Counter for total duplicate messages", stats.UnitDimensionless)
|
PubsubDuplicateMessage = stats.Int64("pubsub/duplicate", "Counter for total duplicate messages", stats.UnitDimensionless)
|
||||||
|
PubsubPruneMessage = stats.Int64("pubsub/prune", "Counter for total prune messages", stats.UnitDimensionless)
|
||||||
PubsubRecvRPC = stats.Int64("pubsub/recv_rpc", "Counter for total received RPCs", stats.UnitDimensionless)
|
PubsubRecvRPC = stats.Int64("pubsub/recv_rpc", "Counter for total received RPCs", stats.UnitDimensionless)
|
||||||
PubsubSendRPC = stats.Int64("pubsub/send_rpc", "Counter for total sent RPCs", stats.UnitDimensionless)
|
PubsubSendRPC = stats.Int64("pubsub/send_rpc", "Counter for total sent RPCs", stats.UnitDimensionless)
|
||||||
PubsubDropRPC = stats.Int64("pubsub/drop_rpc", "Counter for total dropped RPCs", stats.UnitDimensionless)
|
PubsubDropRPC = stats.Int64("pubsub/drop_rpc", "Counter for total dropped RPCs", stats.UnitDimensionless)
|
||||||
@ -326,6 +327,10 @@ var (
|
|||||||
Measure: PubsubDuplicateMessage,
|
Measure: PubsubDuplicateMessage,
|
||||||
Aggregation: view.Count(),
|
Aggregation: view.Count(),
|
||||||
}
|
}
|
||||||
|
PubsubPruneMessageView = &view.View{
|
||||||
|
Measure: PubsubPruneMessage,
|
||||||
|
Aggregation: view.Count(),
|
||||||
|
}
|
||||||
PubsubRecvRPCView = &view.View{
|
PubsubRecvRPCView = &view.View{
|
||||||
Measure: PubsubRecvRPC,
|
Measure: PubsubRecvRPC,
|
||||||
Aggregation: view.Count(),
|
Aggregation: view.Count(),
|
||||||
@ -760,6 +765,7 @@ var ChainNodeViews = append([]*view.View{
|
|||||||
PubsubDeliverMessageView,
|
PubsubDeliverMessageView,
|
||||||
PubsubRejectMessageView,
|
PubsubRejectMessageView,
|
||||||
PubsubDuplicateMessageView,
|
PubsubDuplicateMessageView,
|
||||||
|
PubsubPruneMessageView,
|
||||||
PubsubRecvRPCView,
|
PubsubRecvRPCView,
|
||||||
PubsubSendRPCView,
|
PubsubSendRPCView,
|
||||||
PubsubDropRPCView,
|
PubsubDropRPCView,
|
||||||
|
@ -11,6 +11,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/golang-lru/arc/v2"
|
"github.com/hashicorp/golang-lru/arc/v2"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
"go.opencensus.io/trace"
|
"go.opencensus.io/trace"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
@ -373,8 +374,9 @@ minerLoop:
|
|||||||
// MiningBase is the tipset on top of which we plan to construct our next block.
|
// MiningBase is the tipset on top of which we plan to construct our next block.
|
||||||
// Refer to godocs on GetBestMiningCandidate.
|
// Refer to godocs on GetBestMiningCandidate.
|
||||||
type MiningBase struct {
|
type MiningBase struct {
|
||||||
TipSet *types.TipSet
|
TipSet *types.TipSet
|
||||||
NullRounds abi.ChainEpoch
|
ComputeTime time.Time
|
||||||
|
NullRounds abi.ChainEpoch
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetBestMiningCandidate implements the fork choice rule from a miner's
|
// GetBestMiningCandidate implements the fork choice rule from a miner's
|
||||||
@ -412,7 +414,7 @@ func (m *Miner) GetBestMiningCandidate(ctx context.Context) (*MiningBase, error)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
m.lastWork = &MiningBase{TipSet: bts}
|
m.lastWork = &MiningBase{TipSet: bts, ComputeTime: time.Now()}
|
||||||
return m.lastWork, nil
|
return m.lastWork, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -507,13 +509,13 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (minedBlock *type
|
|||||||
rbase = bvals[len(bvals)-1]
|
rbase = bvals[len(bvals)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
ticket, err := m.computeTicket(ctx, &rbase, base, mbi)
|
ticket, err := m.computeTicket(ctx, &rbase, round, base.TipSet.MinTicket(), mbi)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = xerrors.Errorf("scratching ticket failed: %w", err)
|
err = xerrors.Errorf("scratching ticket failed: %w", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
winner, err = gen.IsRoundWinner(ctx, base.TipSet, round, m.address, rbase, mbi, m.api)
|
winner, err = gen.IsRoundWinner(ctx, round, m.address, rbase, mbi, m.api)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = xerrors.Errorf("failed to check if we win next round: %w", err)
|
err = xerrors.Errorf("failed to check if we win next round: %w", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -554,12 +556,71 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (minedBlock *type
|
|||||||
tProof := build.Clock.Now()
|
tProof := build.Clock.Now()
|
||||||
|
|
||||||
// get pending messages early,
|
// get pending messages early,
|
||||||
msgs, err := m.api.MpoolSelect(context.TODO(), base.TipSet.Key(), ticket.Quality())
|
msgs, err := m.api.MpoolSelect(ctx, base.TipSet.Key(), ticket.Quality())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = xerrors.Errorf("failed to select messages for block: %w", err)
|
err = xerrors.Errorf("failed to select messages for block: %w", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tEquivocateWait := build.Clock.Now()
|
||||||
|
|
||||||
|
// This next block exists to "catch" equivocating miners,
|
||||||
|
// who submit 2 blocks at the same height at different times in order to split the network.
|
||||||
|
// To safeguard against this, we make sure it's been EquivocationDelaySecs since our base was calculated,
|
||||||
|
// then re-calculate it.
|
||||||
|
// If the daemon detected equivocated blocks, those blocks will no longer be in the new base.
|
||||||
|
m.niceSleep(time.Until(base.ComputeTime.Add(time.Duration(build.EquivocationDelaySecs) * time.Second)))
|
||||||
|
newBase, err := m.GetBestMiningCandidate(ctx)
|
||||||
|
if err != nil {
|
||||||
|
err = xerrors.Errorf("failed to refresh best mining candidate: %w", err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the base has changed, we take the _intersection_ of our old base and new base,
|
||||||
|
// thus ejecting blocks from any equivocating miners, without taking any new blocks.
|
||||||
|
if newBase.TipSet.Height() == base.TipSet.Height() && !newBase.TipSet.Equals(base.TipSet) {
|
||||||
|
log.Warnf("base changed from %s to %s, taking intersection", base.TipSet.Key(), newBase.TipSet.Key())
|
||||||
|
newBaseMap := map[cid.Cid]struct{}{}
|
||||||
|
for _, newBaseBlk := range newBase.TipSet.Cids() {
|
||||||
|
newBaseMap[newBaseBlk] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
refreshedBaseBlocks := make([]*types.BlockHeader, 0, len(base.TipSet.Cids()))
|
||||||
|
for _, baseBlk := range base.TipSet.Blocks() {
|
||||||
|
if _, ok := newBaseMap[baseBlk.Cid()]; ok {
|
||||||
|
refreshedBaseBlocks = append(refreshedBaseBlocks, baseBlk)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(refreshedBaseBlocks) != 0 && len(refreshedBaseBlocks) != len(base.TipSet.Blocks()) {
|
||||||
|
refreshedBase, err := types.NewTipSet(refreshedBaseBlocks)
|
||||||
|
if err != nil {
|
||||||
|
err = xerrors.Errorf("failed to create new tipset when refreshing: %w", err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !base.TipSet.MinTicket().Equals(refreshedBase.MinTicket()) {
|
||||||
|
log.Warn("recomputing ticket due to base refresh")
|
||||||
|
|
||||||
|
ticket, err = m.computeTicket(ctx, &rbase, round, refreshedBase.MinTicket(), mbi)
|
||||||
|
if err != nil {
|
||||||
|
err = xerrors.Errorf("failed to refresh ticket: %w", err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Warn("re-selecting messages due to base refresh")
|
||||||
|
// refresh messages, as the selected messages may no longer be valid
|
||||||
|
msgs, err = m.api.MpoolSelect(ctx, refreshedBase.Key(), ticket.Quality())
|
||||||
|
if err != nil {
|
||||||
|
err = xerrors.Errorf("failed to re-select messages for block: %w", err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
base.TipSet = refreshedBase
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
tPending := build.Clock.Now()
|
tPending := build.Clock.Now()
|
||||||
|
|
||||||
// TODO: winning post proof
|
// TODO: winning post proof
|
||||||
@ -582,22 +643,22 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (minedBlock *type
|
|||||||
"tTicket ", tTicket.Sub(tPowercheck),
|
"tTicket ", tTicket.Sub(tPowercheck),
|
||||||
"tSeed ", tSeed.Sub(tTicket),
|
"tSeed ", tSeed.Sub(tTicket),
|
||||||
"tProof ", tProof.Sub(tSeed),
|
"tProof ", tProof.Sub(tSeed),
|
||||||
"tPending ", tPending.Sub(tProof),
|
"tEquivocateWait ", tEquivocateWait.Sub(tProof),
|
||||||
|
"tPending ", tPending.Sub(tEquivocateWait),
|
||||||
"tCreateBlock ", tCreateBlock.Sub(tPending))
|
"tCreateBlock ", tCreateBlock.Sub(tPending))
|
||||||
}
|
}
|
||||||
|
|
||||||
return minedBlock, nil
|
return minedBlock, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Miner) computeTicket(ctx context.Context, brand *types.BeaconEntry, base *MiningBase, mbi *api.MiningBaseInfo) (*types.Ticket, error) {
|
func (m *Miner) computeTicket(ctx context.Context, brand *types.BeaconEntry, round abi.ChainEpoch, chainRand *types.Ticket, mbi *api.MiningBaseInfo) (*types.Ticket, error) {
|
||||||
buf := new(bytes.Buffer)
|
buf := new(bytes.Buffer)
|
||||||
if err := m.address.MarshalCBOR(buf); err != nil {
|
if err := m.address.MarshalCBOR(buf); err != nil {
|
||||||
return nil, xerrors.Errorf("failed to marshal address to cbor: %w", err)
|
return nil, xerrors.Errorf("failed to marshal address to cbor: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
round := base.TipSet.Height() + base.NullRounds + 1
|
|
||||||
if round > build.UpgradeSmokeHeight {
|
if round > build.UpgradeSmokeHeight {
|
||||||
buf.Write(base.TipSet.MinTicket().VRFProof)
|
buf.Write(chainRand.VRFProof)
|
||||||
}
|
}
|
||||||
|
|
||||||
input, err := lrand.DrawRandomnessFromBase(brand.Data, crypto.DomainSeparationTag_TicketProduction, round-build.TicketRandomnessLookback, buf.Bytes())
|
input, err := lrand.DrawRandomnessFromBase(brand.Data, crypto.DomainSeparationTag_TicketProduction, round-build.TicketRandomnessLookback, buf.Bytes())
|
||||||
|
@ -527,7 +527,7 @@ func (a *API) ClientImport(ctx context.Context, ref api.FileRef) (res *api.Impor
|
|||||||
return nil, xerrors.Errorf("failed to read CAR header: %w", err)
|
return nil, xerrors.Errorf("failed to read CAR header: %w", err)
|
||||||
}
|
}
|
||||||
if len(hd.Roots) != 1 {
|
if len(hd.Roots) != 1 {
|
||||||
return nil, xerrors.New("car file can have one and only one header")
|
return nil, xerrors.New("car file can have one and only one root")
|
||||||
}
|
}
|
||||||
if hd.Version != 1 && hd.Version != 2 {
|
if hd.Version != 1 && hd.Version != 2 {
|
||||||
return nil, xerrors.Errorf("car version must be 1 or 2, is %d", hd.Version)
|
return nil, xerrors.Errorf("car version must be 1 or 2, is %d", hd.Version)
|
||||||
|
@ -178,5 +178,13 @@ func (e *EthModuleDummy) EthUnsubscribe(ctx context.Context, id ethtypes.EthSubs
|
|||||||
return false, ErrModuleDisabled
|
return false, ErrModuleDisabled
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (e *EthModuleDummy) EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error) {
|
||||||
|
return nil, ErrModuleDisabled
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *EthModuleDummy) EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) {
|
||||||
|
return nil, ErrModuleDisabled
|
||||||
|
}
|
||||||
|
|
||||||
var _ EthModuleAPI = &EthModuleDummy{}
|
var _ EthModuleAPI = &EthModuleDummy{}
|
||||||
var _ EthEventAPI = &EthModuleDummy{}
|
var _ EthEventAPI = &EthModuleDummy{}
|
||||||
|
@ -825,11 +825,10 @@ func (a *EthModule) EthFeeHistory(ctx context.Context, p jsonrpc.RawParams) (eth
|
|||||||
}
|
}
|
||||||
|
|
||||||
rewards, totalGasUsed := calculateRewardsAndGasUsed(rewardPercentiles, txGasRewards)
|
rewards, totalGasUsed := calculateRewardsAndGasUsed(rewardPercentiles, txGasRewards)
|
||||||
maxGas := build.BlockGasLimit * int64(len(ts.Blocks()))
|
|
||||||
|
|
||||||
// arrays should be reversed at the end
|
// arrays should be reversed at the end
|
||||||
baseFeeArray = append(baseFeeArray, ethtypes.EthBigInt(basefee))
|
baseFeeArray = append(baseFeeArray, ethtypes.EthBigInt(basefee))
|
||||||
gasUsedRatioArray = append(gasUsedRatioArray, float64(totalGasUsed)/float64(maxGas))
|
gasUsedRatioArray = append(gasUsedRatioArray, float64(totalGasUsed)/float64(build.BlockGasLimit))
|
||||||
rewardsArray = append(rewardsArray, rewards)
|
rewardsArray = append(rewardsArray, rewards)
|
||||||
oldestBlkHeight = uint64(ts.Height())
|
oldestBlkHeight = uint64(ts.Height())
|
||||||
blocksIncluded++
|
blocksIncluded++
|
||||||
@ -2278,13 +2277,7 @@ func newEthTxReceipt(ctx context.Context, tx ethtypes.EthTx, lookup *api.MsgLook
|
|||||||
return api.EthTxReceipt{}, xerrors.Errorf("failed to lookup tipset %s when constructing the eth txn receipt: %w", lookup.TipSet, err)
|
return api.EthTxReceipt{}, xerrors.Errorf("failed to lookup tipset %s when constructing the eth txn receipt: %w", lookup.TipSet, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The tx is located in the parent tipset
|
baseFee := ts.Blocks()[0].ParentBaseFee
|
||||||
parentTs, err := cs.LoadTipSet(ctx, ts.Parents())
|
|
||||||
if err != nil {
|
|
||||||
return api.EthTxReceipt{}, xerrors.Errorf("failed to lookup tipset %s when constructing the eth txn receipt: %w", ts.Parents(), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
baseFee := parentTs.Blocks()[0].ParentBaseFee
|
|
||||||
gasOutputs := vm.ComputeGasOutputs(lookup.Receipt.GasUsed, int64(tx.Gas), baseFee, big.Int(tx.MaxFeePerGas), big.Int(tx.MaxPriorityFeePerGas), true)
|
gasOutputs := vm.ComputeGasOutputs(lookup.Receipt.GasUsed, int64(tx.Gas), baseFee, big.Int(tx.MaxFeePerGas), big.Int(tx.MaxPriorityFeePerGas), true)
|
||||||
totalSpent := big.Sum(gasOutputs.BaseFeeBurn, gasOutputs.MinerTip, gasOutputs.OverEstimationBurn)
|
totalSpent := big.Sum(gasOutputs.BaseFeeBurn, gasOutputs.MinerTip, gasOutputs.OverEstimationBurn)
|
||||||
|
|
||||||
|
382
node/impl/full/eth_event.go
Normal file
382
node/impl/full/eth_event.go
Normal file
@ -0,0 +1,382 @@
|
|||||||
|
package full
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
"github.com/zyedidia/generic/queue"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-jsonrpc"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/events/filter"
|
||||||
|
"github.com/filecoin-project/lotus/chain/store"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types/ethtypes"
|
||||||
|
)
|
||||||
|
|
||||||
|
type filterEventCollector interface {
|
||||||
|
TakeCollectedEvents(context.Context) []*filter.CollectedEvent
|
||||||
|
}
|
||||||
|
|
||||||
|
type filterMessageCollector interface {
|
||||||
|
TakeCollectedMessages(context.Context) []*types.SignedMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
type filterTipSetCollector interface {
|
||||||
|
TakeCollectedTipSets(context.Context) []types.TipSetKey
|
||||||
|
}
|
||||||
|
|
||||||
|
func ethLogFromEvent(entries []types.EventEntry) (data []byte, topics []ethtypes.EthHash, ok bool) {
|
||||||
|
var (
|
||||||
|
topicsFound [4]bool
|
||||||
|
topicsFoundCount int
|
||||||
|
dataFound bool
|
||||||
|
)
|
||||||
|
// Topics must be non-nil, even if empty. So we might as well pre-allocate for 4 (the max).
|
||||||
|
topics = make([]ethtypes.EthHash, 0, 4)
|
||||||
|
for _, entry := range entries {
|
||||||
|
// Drop events with non-raw topics to avoid mistakes.
|
||||||
|
if entry.Codec != cid.Raw {
|
||||||
|
log.Warnw("did not expect an event entry with a non-raw codec", "codec", entry.Codec, "key", entry.Key)
|
||||||
|
return nil, nil, false
|
||||||
|
}
|
||||||
|
// Check if the key is t1..t4
|
||||||
|
if len(entry.Key) == 2 && "t1" <= entry.Key && entry.Key <= "t4" {
|
||||||
|
// '1' - '1' == 0, etc.
|
||||||
|
idx := int(entry.Key[1] - '1')
|
||||||
|
|
||||||
|
// Drop events with mis-sized topics.
|
||||||
|
if len(entry.Value) != 32 {
|
||||||
|
log.Warnw("got an EVM event topic with an invalid size", "key", entry.Key, "size", len(entry.Value))
|
||||||
|
return nil, nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Drop events with duplicate topics.
|
||||||
|
if topicsFound[idx] {
|
||||||
|
log.Warnw("got a duplicate EVM event topic", "key", entry.Key)
|
||||||
|
return nil, nil, false
|
||||||
|
}
|
||||||
|
topicsFound[idx] = true
|
||||||
|
topicsFoundCount++
|
||||||
|
|
||||||
|
// Extend the topics array
|
||||||
|
for len(topics) <= idx {
|
||||||
|
topics = append(topics, ethtypes.EthHash{})
|
||||||
|
}
|
||||||
|
copy(topics[idx][:], entry.Value)
|
||||||
|
} else if entry.Key == "d" {
|
||||||
|
// Drop events with duplicate data fields.
|
||||||
|
if dataFound {
|
||||||
|
log.Warnw("got duplicate EVM event data")
|
||||||
|
return nil, nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
dataFound = true
|
||||||
|
data = entry.Value
|
||||||
|
} else {
|
||||||
|
// Skip entries we don't understand (makes it easier to extend things).
|
||||||
|
// But we warn for now because we don't expect them.
|
||||||
|
log.Warnw("unexpected event entry", "key", entry.Key)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Drop events with skipped topics.
|
||||||
|
if len(topics) != topicsFoundCount {
|
||||||
|
log.Warnw("EVM event topic length mismatch", "expected", len(topics), "actual", topicsFoundCount)
|
||||||
|
return nil, nil, false
|
||||||
|
}
|
||||||
|
return data, topics, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func ethFilterResultFromEvents(evs []*filter.CollectedEvent, sa StateAPI) (*ethtypes.EthFilterResult, error) {
|
||||||
|
res := ðtypes.EthFilterResult{}
|
||||||
|
for _, ev := range evs {
|
||||||
|
log := ethtypes.EthLog{
|
||||||
|
Removed: ev.Reverted,
|
||||||
|
LogIndex: ethtypes.EthUint64(ev.EventIdx),
|
||||||
|
TransactionIndex: ethtypes.EthUint64(ev.MsgIdx),
|
||||||
|
BlockNumber: ethtypes.EthUint64(ev.Height),
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
err error
|
||||||
|
ok bool
|
||||||
|
)
|
||||||
|
|
||||||
|
log.Data, log.Topics, ok = ethLogFromEvent(ev.Entries)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Address, err = ethtypes.EthAddressFromFilecoinAddress(ev.EmitterAddr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.TransactionHash, err = ethTxHashFromMessageCid(context.TODO(), ev.MsgCid, sa)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
c, err := ev.TipSetKey.Cid()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
log.BlockHash, err = ethtypes.EthHashFromCid(c)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
res.Results = append(res.Results, log)
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ethFilterResultFromTipSets(tsks []types.TipSetKey) (*ethtypes.EthFilterResult, error) {
|
||||||
|
res := ðtypes.EthFilterResult{}
|
||||||
|
|
||||||
|
for _, tsk := range tsks {
|
||||||
|
c, err := tsk.Cid()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
hash, err := ethtypes.EthHashFromCid(c)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
res.Results = append(res.Results, hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ethFilterResultFromMessages(cs []*types.SignedMessage, sa StateAPI) (*ethtypes.EthFilterResult, error) {
|
||||||
|
res := ðtypes.EthFilterResult{}
|
||||||
|
|
||||||
|
for _, c := range cs {
|
||||||
|
hash, err := ethTxHashFromSignedMessage(context.TODO(), c, sa)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
res.Results = append(res.Results, hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type EthSubscriptionManager struct {
|
||||||
|
Chain *store.ChainStore
|
||||||
|
StateAPI StateAPI
|
||||||
|
ChainAPI ChainAPI
|
||||||
|
mu sync.Mutex
|
||||||
|
subs map[ethtypes.EthSubscriptionID]*ethSubscription
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *EthSubscriptionManager) StartSubscription(ctx context.Context, out ethSubscriptionCallback, dropFilter func(context.Context, filter.Filter) error) (*ethSubscription, error) { // nolint
|
||||||
|
rawid, err := uuid.NewRandom()
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("new uuid: %w", err)
|
||||||
|
}
|
||||||
|
id := ethtypes.EthSubscriptionID{}
|
||||||
|
copy(id[:], rawid[:]) // uuid is 16 bytes
|
||||||
|
|
||||||
|
ctx, quit := context.WithCancel(ctx)
|
||||||
|
|
||||||
|
sub := ðSubscription{
|
||||||
|
Chain: e.Chain,
|
||||||
|
StateAPI: e.StateAPI,
|
||||||
|
ChainAPI: e.ChainAPI,
|
||||||
|
uninstallFilter: dropFilter,
|
||||||
|
id: id,
|
||||||
|
in: make(chan interface{}, 200),
|
||||||
|
out: out,
|
||||||
|
quit: quit,
|
||||||
|
|
||||||
|
toSend: queue.New[[]byte](),
|
||||||
|
sendCond: make(chan struct{}, 1),
|
||||||
|
}
|
||||||
|
|
||||||
|
e.mu.Lock()
|
||||||
|
if e.subs == nil {
|
||||||
|
e.subs = make(map[ethtypes.EthSubscriptionID]*ethSubscription)
|
||||||
|
}
|
||||||
|
e.subs[sub.id] = sub
|
||||||
|
e.mu.Unlock()
|
||||||
|
|
||||||
|
go sub.start(ctx)
|
||||||
|
go sub.startOut(ctx)
|
||||||
|
|
||||||
|
return sub, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *EthSubscriptionManager) StopSubscription(ctx context.Context, id ethtypes.EthSubscriptionID) error {
|
||||||
|
e.mu.Lock()
|
||||||
|
defer e.mu.Unlock()
|
||||||
|
|
||||||
|
sub, ok := e.subs[id]
|
||||||
|
if !ok {
|
||||||
|
return xerrors.Errorf("subscription not found")
|
||||||
|
}
|
||||||
|
sub.stop()
|
||||||
|
delete(e.subs, id)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type ethSubscriptionCallback func(context.Context, jsonrpc.RawParams) error
|
||||||
|
|
||||||
|
const maxSendQueue = 20000
|
||||||
|
|
||||||
|
type ethSubscription struct {
|
||||||
|
Chain *store.ChainStore
|
||||||
|
StateAPI StateAPI
|
||||||
|
ChainAPI ChainAPI
|
||||||
|
uninstallFilter func(context.Context, filter.Filter) error
|
||||||
|
id ethtypes.EthSubscriptionID
|
||||||
|
in chan interface{}
|
||||||
|
out ethSubscriptionCallback
|
||||||
|
|
||||||
|
mu sync.Mutex
|
||||||
|
filters []filter.Filter
|
||||||
|
quit func()
|
||||||
|
|
||||||
|
sendLk sync.Mutex
|
||||||
|
sendQueueLen int
|
||||||
|
toSend *queue.Queue[[]byte]
|
||||||
|
sendCond chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ethSubscription) addFilter(ctx context.Context, f filter.Filter) {
|
||||||
|
e.mu.Lock()
|
||||||
|
defer e.mu.Unlock()
|
||||||
|
|
||||||
|
f.SetSubChannel(e.in)
|
||||||
|
e.filters = append(e.filters, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// sendOut processes the final subscription queue. It's here in case the subscriber
|
||||||
|
// is slow, and we need to buffer the messages.
|
||||||
|
func (e *ethSubscription) startOut(ctx context.Context) {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case <-e.sendCond:
|
||||||
|
e.sendLk.Lock()
|
||||||
|
|
||||||
|
for !e.toSend.Empty() {
|
||||||
|
front := e.toSend.Dequeue()
|
||||||
|
e.sendQueueLen--
|
||||||
|
|
||||||
|
e.sendLk.Unlock()
|
||||||
|
|
||||||
|
if err := e.out(ctx, front); err != nil {
|
||||||
|
log.Warnw("error sending subscription response, killing subscription", "sub", e.id, "error", err)
|
||||||
|
e.stop()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
e.sendLk.Lock()
|
||||||
|
}
|
||||||
|
|
||||||
|
e.sendLk.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ethSubscription) send(ctx context.Context, v interface{}) {
|
||||||
|
resp := ethtypes.EthSubscriptionResponse{
|
||||||
|
SubscriptionID: e.id,
|
||||||
|
Result: v,
|
||||||
|
}
|
||||||
|
|
||||||
|
outParam, err := json.Marshal(resp)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnw("marshaling subscription response", "sub", e.id, "error", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
e.sendLk.Lock()
|
||||||
|
defer e.sendLk.Unlock()
|
||||||
|
|
||||||
|
e.toSend.Enqueue(outParam)
|
||||||
|
|
||||||
|
e.sendQueueLen++
|
||||||
|
if e.sendQueueLen > maxSendQueue {
|
||||||
|
log.Warnw("subscription send queue full, killing subscription", "sub", e.id)
|
||||||
|
e.stop()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case e.sendCond <- struct{}{}:
|
||||||
|
default: // already signalled, and we're holding the lock so we know that the event will be processed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ethSubscription) start(ctx context.Context) {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case v := <-e.in:
|
||||||
|
switch vt := v.(type) {
|
||||||
|
case *filter.CollectedEvent:
|
||||||
|
evs, err := ethFilterResultFromEvents([]*filter.CollectedEvent{vt}, e.StateAPI)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, r := range evs.Results {
|
||||||
|
e.send(ctx, r)
|
||||||
|
}
|
||||||
|
case *types.TipSet:
|
||||||
|
ev, err := newEthBlockFromFilecoinTipSet(ctx, vt, true, e.Chain, e.StateAPI)
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
e.send(ctx, ev)
|
||||||
|
case *types.SignedMessage: // mpool txid
|
||||||
|
evs, err := ethFilterResultFromMessages([]*types.SignedMessage{vt}, e.StateAPI)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, r := range evs.Results {
|
||||||
|
e.send(ctx, r)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
log.Warnf("unexpected subscription value type: %T", vt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ethSubscription) stop() {
|
||||||
|
e.mu.Lock()
|
||||||
|
if e.quit == nil {
|
||||||
|
e.mu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if e.quit != nil {
|
||||||
|
e.quit()
|
||||||
|
e.quit = nil
|
||||||
|
e.mu.Unlock()
|
||||||
|
|
||||||
|
for _, f := range e.filters {
|
||||||
|
// note: the context in actually unused in uninstallFilter
|
||||||
|
if err := e.uninstallFilter(context.TODO(), f); err != nil {
|
||||||
|
// this will leave the filter a zombie, collecting events up to the maximum allowed
|
||||||
|
log.Warnf("failed to remove filter when unsubscribing: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
353
node/impl/full/eth_trace.go
Normal file
353
node/impl/full/eth_trace.go
Normal file
@ -0,0 +1,353 @@
|
|||||||
|
package full
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/multiformats/go-multicodec"
|
||||||
|
cbg "github.com/whyrusleeping/cbor-gen"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/builtin"
|
||||||
|
"github.com/filecoin-project/go-state-types/builtin/v10/evm"
|
||||||
|
"github.com/filecoin-project/go-state-types/exitcode"
|
||||||
|
|
||||||
|
builtinactors "github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types/ethtypes"
|
||||||
|
)
|
||||||
|
|
||||||
|
// decodePayload is a utility function which decodes the payload using the given codec
|
||||||
|
func decodePayload(payload []byte, codec uint64) (ethtypes.EthBytes, error) {
|
||||||
|
if len(payload) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch multicodec.Code(codec) {
|
||||||
|
case multicodec.Identity:
|
||||||
|
return nil, nil
|
||||||
|
case multicodec.DagCbor, multicodec.Cbor:
|
||||||
|
buf, err := cbg.ReadByteArray(bytes.NewReader(payload), uint64(len(payload)))
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("decodePayload: failed to decode cbor payload: %w", err)
|
||||||
|
}
|
||||||
|
return buf, nil
|
||||||
|
case multicodec.Raw:
|
||||||
|
return ethtypes.EthBytes(payload), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, xerrors.Errorf("decodePayload: unsupported codec: %d", codec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildTraces recursively builds the traces for a given ExecutionTrace by walking the subcalls
|
||||||
|
func buildTraces(ctx context.Context, traces *[]*ethtypes.EthTrace, parent *ethtypes.EthTrace, addr []int, et types.ExecutionTrace, height int64, sa StateAPI) error {
|
||||||
|
// lookup the eth address from the from/to addresses. Note that this may fail but to support
|
||||||
|
// this we need to include the ActorID in the trace. For now, just log a warning and skip
|
||||||
|
// this trace.
|
||||||
|
//
|
||||||
|
// TODO: Add ActorID in trace, see https://github.com/filecoin-project/lotus/pull/11100#discussion_r1302442288
|
||||||
|
from, err := lookupEthAddress(ctx, et.Msg.From, sa)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("buildTraces: failed to lookup from address %s: %v", et.Msg.From, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
to, err := lookupEthAddress(ctx, et.Msg.To, sa)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("buildTraces: failed to lookup to address %s: %w", et.Msg.To, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
trace := ðtypes.EthTrace{
|
||||||
|
Action: ethtypes.EthTraceAction{
|
||||||
|
From: from,
|
||||||
|
To: to,
|
||||||
|
Gas: ethtypes.EthUint64(et.Msg.GasLimit),
|
||||||
|
Input: nil,
|
||||||
|
Value: ethtypes.EthBigInt(et.Msg.Value),
|
||||||
|
|
||||||
|
FilecoinFrom: et.Msg.From,
|
||||||
|
FilecoinTo: et.Msg.To,
|
||||||
|
FilecoinMethod: et.Msg.Method,
|
||||||
|
FilecoinCodeCid: et.Msg.CodeCid,
|
||||||
|
},
|
||||||
|
Result: ethtypes.EthTraceResult{
|
||||||
|
GasUsed: ethtypes.EthUint64(et.SumGas().TotalGas),
|
||||||
|
Output: nil,
|
||||||
|
},
|
||||||
|
Subtraces: 0, // will be updated by the children once they are added to the trace
|
||||||
|
TraceAddress: addr,
|
||||||
|
|
||||||
|
Parent: parent,
|
||||||
|
LastByteCode: nil,
|
||||||
|
}
|
||||||
|
|
||||||
|
trace.SetCallType("call")
|
||||||
|
|
||||||
|
if et.Msg.Method == builtin.MethodsEVM.InvokeContract {
|
||||||
|
log.Debugf("COND1 found InvokeContract call at height: %d", height)
|
||||||
|
|
||||||
|
// TODO: ignore return errors since actors can send gibberish and we don't want
|
||||||
|
// to fail the whole trace in that case
|
||||||
|
trace.Action.Input, err = decodePayload(et.Msg.Params, et.Msg.ParamsCodec)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("buildTraces: %w", err)
|
||||||
|
}
|
||||||
|
trace.Result.Output, err = decodePayload(et.MsgRct.Return, et.MsgRct.ReturnCodec)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("buildTraces: %w", err)
|
||||||
|
}
|
||||||
|
} else if et.Msg.To == builtin.EthereumAddressManagerActorAddr &&
|
||||||
|
et.Msg.Method == builtin.MethodsEAM.CreateExternal {
|
||||||
|
log.Debugf("COND2 found CreateExternal call at height: %d", height)
|
||||||
|
trace.Action.Input, err = decodePayload(et.Msg.Params, et.Msg.ParamsCodec)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("buildTraces: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if et.MsgRct.ExitCode.IsSuccess() {
|
||||||
|
// ignore return value
|
||||||
|
trace.Result.Output = nil
|
||||||
|
} else {
|
||||||
|
// return value is the error message
|
||||||
|
trace.Result.Output, err = decodePayload(et.MsgRct.Return, et.MsgRct.ReturnCodec)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("buildTraces: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// treat this as a contract creation
|
||||||
|
trace.SetCallType("create")
|
||||||
|
} else {
|
||||||
|
// we are going to assume a native method, but we may change it in one of the edge cases below
|
||||||
|
// TODO: only do this if we know it's a native method (optimization)
|
||||||
|
trace.Action.Input, err = handleFilecoinMethodInput(et.Msg.Method, et.Msg.ParamsCodec, et.Msg.Params)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("buildTraces: %w", err)
|
||||||
|
}
|
||||||
|
trace.Result.Output, err = handleFilecoinMethodOutput(et.MsgRct.ExitCode, et.MsgRct.ReturnCodec, et.MsgRct.Return)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("buildTraces: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: is it OK to check this here or is this only specific to certain edge case (evm to evm)?
|
||||||
|
if et.Msg.ReadOnly {
|
||||||
|
trace.SetCallType("staticcall")
|
||||||
|
}
|
||||||
|
|
||||||
|
// there are several edge cases that require special handling when displaying the traces. Note that while iterating over
|
||||||
|
// the traces we update the trace backwards (through the parent pointer)
|
||||||
|
if parent != nil {
|
||||||
|
// Handle Native actor creation
|
||||||
|
//
|
||||||
|
// Actor A calls to the init actor on method 2 and The init actor creates the target actor B then calls it on method 1
|
||||||
|
if parent.Action.FilecoinTo == builtin.InitActorAddr &&
|
||||||
|
parent.Action.FilecoinMethod == builtin.MethodsInit.Exec &&
|
||||||
|
et.Msg.Method == builtin.MethodConstructor {
|
||||||
|
log.Debugf("COND3 Native actor creation! method:%d, code:%s, height:%d", et.Msg.Method, et.Msg.CodeCid.String(), height)
|
||||||
|
parent.SetCallType("create")
|
||||||
|
parent.Action.To = to
|
||||||
|
parent.Action.Input = []byte{0xFE}
|
||||||
|
parent.Result.Output = nil
|
||||||
|
|
||||||
|
// there should never be any subcalls when creating a native actor
|
||||||
|
//
|
||||||
|
// TODO: add support for native actors calling another when created
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle EVM contract creation
|
||||||
|
//
|
||||||
|
// To detect EVM contract creation we need to check for the following sequence of events:
|
||||||
|
//
|
||||||
|
// 1) EVM contract A calls the EAM (Ethereum Address Manager) on method 2 (create) or 3 (create2).
|
||||||
|
// 2) The EAM calls the init actor on method 3 (Exec4).
|
||||||
|
// 3) The init actor creates the target actor B then calls it on method 1.
|
||||||
|
if parent.Parent != nil {
|
||||||
|
calledCreateOnEAM := parent.Parent.Action.FilecoinTo == builtin.EthereumAddressManagerActorAddr &&
|
||||||
|
(parent.Parent.Action.FilecoinMethod == builtin.MethodsEAM.Create || parent.Parent.Action.FilecoinMethod == builtin.MethodsEAM.Create2)
|
||||||
|
eamCalledInitOnExec4 := parent.Action.FilecoinTo == builtin.InitActorAddr &&
|
||||||
|
parent.Action.FilecoinMethod == builtin.MethodsInit.Exec4
|
||||||
|
initCreatedActor := trace.Action.FilecoinMethod == builtin.MethodConstructor
|
||||||
|
|
||||||
|
// TODO: We need to handle failures in contract creations and support resurrections on an existing but dead EVM actor)
|
||||||
|
if calledCreateOnEAM && eamCalledInitOnExec4 && initCreatedActor {
|
||||||
|
log.Debugf("COND4 EVM contract creation method:%d, code:%s, height:%d", et.Msg.Method, et.Msg.CodeCid.String(), height)
|
||||||
|
|
||||||
|
if parent.Parent.Action.FilecoinMethod == builtin.MethodsEAM.Create {
|
||||||
|
parent.Parent.SetCallType("create")
|
||||||
|
} else {
|
||||||
|
parent.Parent.SetCallType("create2")
|
||||||
|
}
|
||||||
|
|
||||||
|
// update the parent.parent to make this
|
||||||
|
parent.Parent.Action.To = trace.Action.To
|
||||||
|
parent.Parent.Subtraces = 0
|
||||||
|
|
||||||
|
// delete the parent (the EAM) and skip the current trace (init)
|
||||||
|
*traces = (*traces)[:len(*traces)-1]
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if builtinactors.IsEvmActor(parent.Action.FilecoinCodeCid) {
|
||||||
|
// Handle delegate calls
|
||||||
|
//
|
||||||
|
// 1) Look for trace from an EVM actor to itself on InvokeContractDelegate, method 6.
|
||||||
|
// 2) Check that the previous trace calls another actor on method 3 (GetByteCode) and they are at the same level (same parent)
|
||||||
|
// 3) Treat this as a delegate call to actor A.
|
||||||
|
if parent.LastByteCode != nil && trace.Action.From == trace.Action.To &&
|
||||||
|
trace.Action.FilecoinMethod == builtin.MethodsEVM.InvokeContractDelegate {
|
||||||
|
log.Debugf("COND7 found delegate call, height: %d", height)
|
||||||
|
prev := parent.LastByteCode
|
||||||
|
if prev.Action.From == trace.Action.From && prev.Action.FilecoinMethod == builtin.MethodsEVM.GetBytecode && prev.Parent == trace.Parent {
|
||||||
|
trace.SetCallType("delegatecall")
|
||||||
|
trace.Action.To = prev.Action.To
|
||||||
|
|
||||||
|
var dp evm.DelegateCallParams
|
||||||
|
err := dp.UnmarshalCBOR(bytes.NewReader(et.Msg.Params))
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed UnmarshalCBOR: %w", err)
|
||||||
|
}
|
||||||
|
trace.Action.Input = dp.Input
|
||||||
|
|
||||||
|
trace.Result.Output, err = decodePayload(et.MsgRct.Return, et.MsgRct.ReturnCodec)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed decodePayload: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Handle EVM call special casing
|
||||||
|
//
|
||||||
|
// Any outbound call from an EVM actor on methods 1-1023 are side-effects from EVM instructions
|
||||||
|
// and should be dropped from the trace.
|
||||||
|
if et.Msg.Method > 0 &&
|
||||||
|
et.Msg.Method <= 1023 {
|
||||||
|
log.Debugf("Infof found outbound call from an EVM actor on method 1-1023 method:%d, code:%s, height:%d", et.Msg.Method, parent.Action.FilecoinCodeCid.String(), height)
|
||||||
|
|
||||||
|
if et.Msg.Method == builtin.MethodsEVM.GetBytecode {
|
||||||
|
// save the last bytecode trace to handle delegate calls
|
||||||
|
parent.LastByteCode = trace
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// we are adding trace to the traces so update the parent subtraces count as it was originally set to zero
|
||||||
|
if parent != nil {
|
||||||
|
parent.Subtraces++
|
||||||
|
}
|
||||||
|
|
||||||
|
*traces = append(*traces, trace)
|
||||||
|
|
||||||
|
for i, call := range et.Subcalls {
|
||||||
|
err := buildTraces(ctx, traces, trace, append(addr, i), call, height, sa)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func writePadded(w io.Writer, data any, size int) error {
|
||||||
|
tmp := &bytes.Buffer{}
|
||||||
|
|
||||||
|
// first write data to tmp buffer to get the size
|
||||||
|
err := binary.Write(tmp, binary.BigEndian, data)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("writePadded: failed writing tmp data to buffer: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if tmp.Len() > size {
|
||||||
|
return fmt.Errorf("writePadded: data is larger than size")
|
||||||
|
}
|
||||||
|
|
||||||
|
// write tailing zeros to pad up to size
|
||||||
|
cnt := size - tmp.Len()
|
||||||
|
for i := 0; i < cnt; i++ {
|
||||||
|
err = binary.Write(w, binary.BigEndian, uint8(0))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("writePadded: failed writing tailing zeros to buffer: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// finally write the actual value
|
||||||
|
err = binary.Write(w, binary.BigEndian, tmp.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("writePadded: failed writing data to buffer: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleFilecoinMethodInput(method abi.MethodNum, codec uint64, params []byte) ([]byte, error) {
|
||||||
|
NATIVE_METHOD_SELECTOR := []byte{0x86, 0x8e, 0x10, 0xc4}
|
||||||
|
EVM_WORD_SIZE := 32
|
||||||
|
|
||||||
|
staticArgs := []uint64{
|
||||||
|
uint64(method),
|
||||||
|
codec,
|
||||||
|
uint64(EVM_WORD_SIZE) * 3,
|
||||||
|
uint64(len(params)),
|
||||||
|
}
|
||||||
|
totalWords := len(staticArgs) + (len(params) / EVM_WORD_SIZE)
|
||||||
|
if len(params)%EVM_WORD_SIZE != 0 {
|
||||||
|
totalWords++
|
||||||
|
}
|
||||||
|
len := 4 + totalWords*EVM_WORD_SIZE
|
||||||
|
|
||||||
|
w := &bytes.Buffer{}
|
||||||
|
err := binary.Write(w, binary.BigEndian, NATIVE_METHOD_SELECTOR)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("handleFilecoinMethodInput: failed writing method selector: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, arg := range staticArgs {
|
||||||
|
err := writePadded(w, arg, 32)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("handleFilecoinMethodInput: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = binary.Write(w, binary.BigEndian, params)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("handleFilecoinMethodInput: failed writing params: %w", err)
|
||||||
|
}
|
||||||
|
remain := len - w.Len()
|
||||||
|
for i := 0; i < remain; i++ {
|
||||||
|
err = binary.Write(w, binary.BigEndian, uint8(0))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("handleFilecoinMethodInput: failed writing tailing zeros: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleFilecoinMethodOutput(exitCode exitcode.ExitCode, codec uint64, data []byte) ([]byte, error) {
|
||||||
|
w := &bytes.Buffer{}
|
||||||
|
|
||||||
|
values := []interface{}{uint32(exitCode), codec, uint32(w.Len()), uint32(len(data))}
|
||||||
|
for _, v := range values {
|
||||||
|
err := writePadded(w, v, 32)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("handleFilecoinMethodOutput: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err := binary.Write(w, binary.BigEndian, data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("handleFilecoinMethodOutput: failed writing data: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.Bytes(), nil
|
||||||
|
}
|
695
node/impl/full/eth_utils.go
Normal file
695
node/impl/full/eth_utils.go
Normal file
@ -0,0 +1,695 @@
|
|||||||
|
package full
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
builtintypes "github.com/filecoin-project/go-state-types/builtin"
|
||||||
|
"github.com/filecoin-project/go-state-types/builtin/v10/eam"
|
||||||
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors"
|
||||||
|
"github.com/filecoin-project/lotus/chain/store"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types/ethtypes"
|
||||||
|
"github.com/filecoin-project/lotus/chain/vm"
|
||||||
|
)
|
||||||
|
|
||||||
|
func getTipsetByBlockNumber(ctx context.Context, chain *store.ChainStore, blkParam string, strict bool) (*types.TipSet, error) {
|
||||||
|
if blkParam == "earliest" {
|
||||||
|
return nil, fmt.Errorf("block param \"earliest\" is not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
head := chain.GetHeaviestTipSet()
|
||||||
|
switch blkParam {
|
||||||
|
case "pending":
|
||||||
|
return head, nil
|
||||||
|
case "latest":
|
||||||
|
parent, err := chain.GetTipSetFromKey(ctx, head.Parents())
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot get parent tipset")
|
||||||
|
}
|
||||||
|
return parent, nil
|
||||||
|
default:
|
||||||
|
var num ethtypes.EthUint64
|
||||||
|
err := num.UnmarshalJSON([]byte(`"` + blkParam + `"`))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot parse block number: %v", err)
|
||||||
|
}
|
||||||
|
if abi.ChainEpoch(num) > head.Height()-1 {
|
||||||
|
return nil, fmt.Errorf("requested a future epoch (beyond 'latest')")
|
||||||
|
}
|
||||||
|
ts, err := chain.GetTipsetByHeight(ctx, abi.ChainEpoch(num), head, true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot get tipset at height: %v", num)
|
||||||
|
}
|
||||||
|
if strict && ts.Height() != abi.ChainEpoch(num) {
|
||||||
|
return nil, ErrNullRound
|
||||||
|
}
|
||||||
|
return ts, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTipsetByEthBlockNumberOrHash(ctx context.Context, chain *store.ChainStore, blkParam ethtypes.EthBlockNumberOrHash) (*types.TipSet, error) {
|
||||||
|
head := chain.GetHeaviestTipSet()
|
||||||
|
|
||||||
|
predefined := blkParam.PredefinedBlock
|
||||||
|
if predefined != nil {
|
||||||
|
if *predefined == "earliest" {
|
||||||
|
return nil, fmt.Errorf("block param \"earliest\" is not supported")
|
||||||
|
} else if *predefined == "pending" {
|
||||||
|
return head, nil
|
||||||
|
} else if *predefined == "latest" {
|
||||||
|
parent, err := chain.GetTipSetFromKey(ctx, head.Parents())
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot get parent tipset")
|
||||||
|
}
|
||||||
|
return parent, nil
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("unknown predefined block %s", *predefined)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if blkParam.BlockNumber != nil {
|
||||||
|
height := abi.ChainEpoch(*blkParam.BlockNumber)
|
||||||
|
if height > head.Height()-1 {
|
||||||
|
return nil, fmt.Errorf("requested a future epoch (beyond 'latest')")
|
||||||
|
}
|
||||||
|
ts, err := chain.GetTipsetByHeight(ctx, height, head, true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot get tipset at height: %v", height)
|
||||||
|
}
|
||||||
|
return ts, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if blkParam.BlockHash != nil {
|
||||||
|
ts, err := chain.GetTipSetByCid(ctx, blkParam.BlockHash.ToCid())
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot get tipset by hash: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// verify that the tipset is in the canonical chain
|
||||||
|
if blkParam.RequireCanonical {
|
||||||
|
// walk up the current chain (our head) until we reach ts.Height()
|
||||||
|
walkTs, err := chain.GetTipsetByHeight(ctx, ts.Height(), head, true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot get tipset at height: %v", ts.Height())
|
||||||
|
}
|
||||||
|
|
||||||
|
// verify that it equals the expected tipset
|
||||||
|
if !walkTs.Equals(ts) {
|
||||||
|
return nil, fmt.Errorf("tipset is not canonical")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ts, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, errors.New("invalid block param")
|
||||||
|
}
|
||||||
|
|
||||||
|
func ethCallToFilecoinMessage(ctx context.Context, tx ethtypes.EthCall) (*types.Message, error) {
|
||||||
|
var from address.Address
|
||||||
|
if tx.From == nil || *tx.From == (ethtypes.EthAddress{}) {
|
||||||
|
// Send from the filecoin "system" address.
|
||||||
|
var err error
|
||||||
|
from, err = (ethtypes.EthAddress{}).ToFilecoinAddress()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to construct the ethereum system address: %w", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// The from address must be translatable to an f4 address.
|
||||||
|
var err error
|
||||||
|
from, err = tx.From.ToFilecoinAddress()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to translate sender address (%s): %w", tx.From.String(), err)
|
||||||
|
}
|
||||||
|
if p := from.Protocol(); p != address.Delegated {
|
||||||
|
return nil, fmt.Errorf("expected a class 4 address, got: %d: %w", p, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var params []byte
|
||||||
|
if len(tx.Data) > 0 {
|
||||||
|
initcode := abi.CborBytes(tx.Data)
|
||||||
|
params2, err := actors.SerializeParams(&initcode)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to serialize params: %w", err)
|
||||||
|
}
|
||||||
|
params = params2
|
||||||
|
}
|
||||||
|
|
||||||
|
var to address.Address
|
||||||
|
var method abi.MethodNum
|
||||||
|
if tx.To == nil {
|
||||||
|
// this is a contract creation
|
||||||
|
to = builtintypes.EthereumAddressManagerActorAddr
|
||||||
|
method = builtintypes.MethodsEAM.CreateExternal
|
||||||
|
} else {
|
||||||
|
addr, err := tx.To.ToFilecoinAddress()
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("cannot get Filecoin address: %w", err)
|
||||||
|
}
|
||||||
|
to = addr
|
||||||
|
method = builtintypes.MethodsEVM.InvokeContract
|
||||||
|
}
|
||||||
|
|
||||||
|
return &types.Message{
|
||||||
|
From: from,
|
||||||
|
To: to,
|
||||||
|
Value: big.Int(tx.Value),
|
||||||
|
Method: method,
|
||||||
|
Params: params,
|
||||||
|
GasLimit: build.BlockGasLimit,
|
||||||
|
GasFeeCap: big.Zero(),
|
||||||
|
GasPremium: big.Zero(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newEthBlockFromFilecoinTipSet(ctx context.Context, ts *types.TipSet, fullTxInfo bool, cs *store.ChainStore, sa StateAPI) (ethtypes.EthBlock, error) {
|
||||||
|
parentKeyCid, err := ts.Parents().Cid()
|
||||||
|
if err != nil {
|
||||||
|
return ethtypes.EthBlock{}, err
|
||||||
|
}
|
||||||
|
parentBlkHash, err := ethtypes.EthHashFromCid(parentKeyCid)
|
||||||
|
if err != nil {
|
||||||
|
return ethtypes.EthBlock{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
bn := ethtypes.EthUint64(ts.Height())
|
||||||
|
|
||||||
|
blkCid, err := ts.Key().Cid()
|
||||||
|
if err != nil {
|
||||||
|
return ethtypes.EthBlock{}, err
|
||||||
|
}
|
||||||
|
blkHash, err := ethtypes.EthHashFromCid(blkCid)
|
||||||
|
if err != nil {
|
||||||
|
return ethtypes.EthBlock{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
msgs, rcpts, err := messagesAndReceipts(ctx, ts, cs, sa)
|
||||||
|
if err != nil {
|
||||||
|
return ethtypes.EthBlock{}, xerrors.Errorf("failed to retrieve messages and receipts: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
block := ethtypes.NewEthBlock(len(msgs) > 0)
|
||||||
|
|
||||||
|
gasUsed := int64(0)
|
||||||
|
for i, msg := range msgs {
|
||||||
|
rcpt := rcpts[i]
|
||||||
|
ti := ethtypes.EthUint64(i)
|
||||||
|
gasUsed += rcpt.GasUsed
|
||||||
|
var smsg *types.SignedMessage
|
||||||
|
switch msg := msg.(type) {
|
||||||
|
case *types.SignedMessage:
|
||||||
|
smsg = msg
|
||||||
|
case *types.Message:
|
||||||
|
smsg = &types.SignedMessage{
|
||||||
|
Message: *msg,
|
||||||
|
Signature: crypto.Signature{
|
||||||
|
Type: crypto.SigTypeBLS,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return ethtypes.EthBlock{}, xerrors.Errorf("failed to get signed msg %s: %w", msg.Cid(), err)
|
||||||
|
}
|
||||||
|
tx, err := newEthTxFromSignedMessage(ctx, smsg, sa)
|
||||||
|
if err != nil {
|
||||||
|
return ethtypes.EthBlock{}, xerrors.Errorf("failed to convert msg to ethTx: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tx.ChainID = ethtypes.EthUint64(build.Eip155ChainId)
|
||||||
|
tx.BlockHash = &blkHash
|
||||||
|
tx.BlockNumber = &bn
|
||||||
|
tx.TransactionIndex = &ti
|
||||||
|
|
||||||
|
if fullTxInfo {
|
||||||
|
block.Transactions = append(block.Transactions, tx)
|
||||||
|
} else {
|
||||||
|
block.Transactions = append(block.Transactions, tx.Hash.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
block.Hash = blkHash
|
||||||
|
block.Number = bn
|
||||||
|
block.ParentHash = parentBlkHash
|
||||||
|
block.Timestamp = ethtypes.EthUint64(ts.Blocks()[0].Timestamp)
|
||||||
|
block.BaseFeePerGas = ethtypes.EthBigInt{Int: ts.Blocks()[0].ParentBaseFee.Int}
|
||||||
|
block.GasUsed = ethtypes.EthUint64(gasUsed)
|
||||||
|
return block, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func messagesAndReceipts(ctx context.Context, ts *types.TipSet, cs *store.ChainStore, sa StateAPI) ([]types.ChainMsg, []types.MessageReceipt, error) {
|
||||||
|
msgs, err := cs.MessagesForTipset(ctx, ts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, xerrors.Errorf("error loading messages for tipset: %v: %w", ts, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, rcptRoot, err := sa.StateManager.TipSetState(ctx, ts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, xerrors.Errorf("failed to compute state: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rcpts, err := cs.ReadReceipts(ctx, rcptRoot)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, xerrors.Errorf("error loading receipts for tipset: %v: %w", ts, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(msgs) != len(rcpts) {
|
||||||
|
return nil, nil, xerrors.Errorf("receipts and message array lengths didn't match for tipset: %v: %w", ts, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return msgs, rcpts, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const errorFunctionSelector = "\x08\xc3\x79\xa0" // Error(string)
|
||||||
|
const panicFunctionSelector = "\x4e\x48\x7b\x71" // Panic(uint256)
|
||||||
|
// Eth ABI (solidity) panic codes.
|
||||||
|
var panicErrorCodes map[uint64]string = map[uint64]string{
|
||||||
|
0x00: "Panic()",
|
||||||
|
0x01: "Assert()",
|
||||||
|
0x11: "ArithmeticOverflow()",
|
||||||
|
0x12: "DivideByZero()",
|
||||||
|
0x21: "InvalidEnumVariant()",
|
||||||
|
0x22: "InvalidStorageArray()",
|
||||||
|
0x31: "PopEmptyArray()",
|
||||||
|
0x32: "ArrayIndexOutOfBounds()",
|
||||||
|
0x41: "OutOfMemory()",
|
||||||
|
0x51: "CalledUninitializedFunction()",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse an ABI encoded revert reason. This reason should be encoded as if it were the parameters to
|
||||||
|
// an `Error(string)` function call.
|
||||||
|
//
|
||||||
|
// See https://docs.soliditylang.org/en/latest/control-structures.html#panic-via-assert-and-error-via-require
|
||||||
|
func parseEthRevert(ret []byte) string {
|
||||||
|
if len(ret) == 0 {
|
||||||
|
return "none"
|
||||||
|
}
|
||||||
|
var cbytes abi.CborBytes
|
||||||
|
if err := cbytes.UnmarshalCBOR(bytes.NewReader(ret)); err != nil {
|
||||||
|
return "ERROR: revert reason is not cbor encoded bytes"
|
||||||
|
}
|
||||||
|
if len(cbytes) == 0 {
|
||||||
|
return "none"
|
||||||
|
}
|
||||||
|
// If it's not long enough to contain an ABI encoded response, return immediately.
|
||||||
|
if len(cbytes) < 4+32 {
|
||||||
|
return ethtypes.EthBytes(cbytes).String()
|
||||||
|
}
|
||||||
|
switch string(cbytes[:4]) {
|
||||||
|
case panicFunctionSelector:
|
||||||
|
cbytes := cbytes[4 : 4+32]
|
||||||
|
// Read the and check the code.
|
||||||
|
code, err := ethtypes.EthUint64FromBytes(cbytes)
|
||||||
|
if err != nil {
|
||||||
|
// If it's too big, just return the raw value.
|
||||||
|
codeInt := big.PositiveFromUnsignedBytes(cbytes)
|
||||||
|
return fmt.Sprintf("Panic(%s)", ethtypes.EthBigInt(codeInt).String())
|
||||||
|
}
|
||||||
|
if s, ok := panicErrorCodes[uint64(code)]; ok {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("Panic(0x%x)", code)
|
||||||
|
case errorFunctionSelector:
|
||||||
|
cbytes := cbytes[4:]
|
||||||
|
cbytesLen := ethtypes.EthUint64(len(cbytes))
|
||||||
|
// Read the and check the offset.
|
||||||
|
offset, err := ethtypes.EthUint64FromBytes(cbytes[:32])
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if cbytesLen < offset {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read and check the length.
|
||||||
|
if cbytesLen-offset < 32 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
start := offset + 32
|
||||||
|
length, err := ethtypes.EthUint64FromBytes(cbytes[offset : offset+32])
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if cbytesLen-start < length {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Slice the error message.
|
||||||
|
return fmt.Sprintf("Error(%s)", cbytes[start:start+length])
|
||||||
|
}
|
||||||
|
return ethtypes.EthBytes(cbytes).String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// lookupEthAddress makes its best effort at finding the Ethereum address for a
|
||||||
|
// Filecoin address. It does the following:
|
||||||
|
//
|
||||||
|
// 1. If the supplied address is an f410 address, we return its payload as the EthAddress.
|
||||||
|
// 2. Otherwise (f0, f1, f2, f3), we look up the actor on the state tree. If it has a delegated address, we return it if it's f410 address.
|
||||||
|
// 3. Otherwise, we fall back to returning a masked ID Ethereum address. If the supplied address is an f0 address, we
|
||||||
|
// use that ID to form the masked ID address.
|
||||||
|
// 4. Otherwise, we fetch the actor's ID from the state tree and form the masked ID with it.
|
||||||
|
func lookupEthAddress(ctx context.Context, addr address.Address, sa StateAPI) (ethtypes.EthAddress, error) {
|
||||||
|
// BLOCK A: We are trying to get an actual Ethereum address from an f410 address.
|
||||||
|
// Attempt to convert directly, if it's an f4 address.
|
||||||
|
ethAddr, err := ethtypes.EthAddressFromFilecoinAddress(addr)
|
||||||
|
if err == nil && !ethAddr.IsMaskedID() {
|
||||||
|
return ethAddr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lookup on the target actor and try to get an f410 address.
|
||||||
|
if actor, err := sa.StateGetActor(ctx, addr, types.EmptyTSK); err != nil {
|
||||||
|
return ethtypes.EthAddress{}, err
|
||||||
|
} else if actor.Address != nil {
|
||||||
|
if ethAddr, err := ethtypes.EthAddressFromFilecoinAddress(*actor.Address); err == nil && !ethAddr.IsMaskedID() {
|
||||||
|
return ethAddr, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BLOCK B: We gave up on getting an actual Ethereum address and are falling back to a Masked ID address.
|
||||||
|
// Check if we already have an ID addr, and use it if possible.
|
||||||
|
if err == nil && ethAddr.IsMaskedID() {
|
||||||
|
return ethAddr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise, resolve the ID addr.
|
||||||
|
idAddr, err := sa.StateLookupID(ctx, addr, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return ethtypes.EthAddress{}, err
|
||||||
|
}
|
||||||
|
return ethtypes.EthAddressFromFilecoinAddress(idAddr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseEthTopics(topics ethtypes.EthTopicSpec) (map[string][][]byte, error) {
|
||||||
|
keys := map[string][][]byte{}
|
||||||
|
for idx, vals := range topics {
|
||||||
|
if len(vals) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Ethereum topics are emitted using `LOG{0..4}` opcodes resulting in topics1..4
|
||||||
|
key := fmt.Sprintf("t%d", idx+1)
|
||||||
|
for _, v := range vals {
|
||||||
|
v := v // copy the ethhash to avoid repeatedly referencing the same one.
|
||||||
|
keys[key] = append(keys[key], v[:])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return keys, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ethTxHashFromMessageCid(ctx context.Context, c cid.Cid, sa StateAPI) (ethtypes.EthHash, error) {
|
||||||
|
smsg, err := sa.Chain.GetSignedMessage(ctx, c)
|
||||||
|
if err == nil {
|
||||||
|
// This is an Eth Tx, Secp message, Or BLS message in the mpool
|
||||||
|
return ethTxHashFromSignedMessage(ctx, smsg, sa)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = sa.Chain.GetMessage(ctx, c)
|
||||||
|
if err == nil {
|
||||||
|
// This is a BLS message
|
||||||
|
return ethtypes.EthHashFromCid(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ethtypes.EmptyEthHash, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ethTxHashFromSignedMessage(ctx context.Context, smsg *types.SignedMessage, sa StateAPI) (ethtypes.EthHash, error) {
|
||||||
|
if smsg.Signature.Type == crypto.SigTypeDelegated {
|
||||||
|
ethTx, err := newEthTxFromSignedMessage(ctx, smsg, sa)
|
||||||
|
if err != nil {
|
||||||
|
return ethtypes.EmptyEthHash, err
|
||||||
|
}
|
||||||
|
return ethTx.Hash, nil
|
||||||
|
} else if smsg.Signature.Type == crypto.SigTypeSecp256k1 {
|
||||||
|
return ethtypes.EthHashFromCid(smsg.Cid())
|
||||||
|
} else { // BLS message
|
||||||
|
return ethtypes.EthHashFromCid(smsg.Message.Cid())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newEthTxFromSignedMessage(ctx context.Context, smsg *types.SignedMessage, sa StateAPI) (ethtypes.EthTx, error) {
|
||||||
|
var tx ethtypes.EthTx
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// This is an eth tx
|
||||||
|
if smsg.Signature.Type == crypto.SigTypeDelegated {
|
||||||
|
tx, err = ethtypes.EthTxFromSignedEthMessage(smsg)
|
||||||
|
if err != nil {
|
||||||
|
return ethtypes.EthTx{}, xerrors.Errorf("failed to convert from signed message: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tx.Hash, err = tx.TxHash()
|
||||||
|
if err != nil {
|
||||||
|
return ethtypes.EthTx{}, xerrors.Errorf("failed to calculate hash for ethTx: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fromAddr, err := lookupEthAddress(ctx, smsg.Message.From, sa)
|
||||||
|
if err != nil {
|
||||||
|
return ethtypes.EthTx{}, xerrors.Errorf("failed to resolve Ethereum address: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tx.From = fromAddr
|
||||||
|
} else if smsg.Signature.Type == crypto.SigTypeSecp256k1 { // Secp Filecoin Message
|
||||||
|
tx = ethTxFromNativeMessage(ctx, smsg.VMMessage(), sa)
|
||||||
|
tx.Hash, err = ethtypes.EthHashFromCid(smsg.Cid())
|
||||||
|
if err != nil {
|
||||||
|
return tx, err
|
||||||
|
}
|
||||||
|
} else { // BLS Filecoin message
|
||||||
|
tx = ethTxFromNativeMessage(ctx, smsg.VMMessage(), sa)
|
||||||
|
tx.Hash, err = ethtypes.EthHashFromCid(smsg.Message.Cid())
|
||||||
|
if err != nil {
|
||||||
|
return tx, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return tx, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ethTxFromNativeMessage does NOT populate:
|
||||||
|
// - BlockHash
|
||||||
|
// - BlockNumber
|
||||||
|
// - TransactionIndex
|
||||||
|
// - Hash
|
||||||
|
func ethTxFromNativeMessage(ctx context.Context, msg *types.Message, sa StateAPI) ethtypes.EthTx {
|
||||||
|
// We don't care if we error here, conversion is best effort for non-eth transactions
|
||||||
|
from, _ := lookupEthAddress(ctx, msg.From, sa)
|
||||||
|
to, _ := lookupEthAddress(ctx, msg.To, sa)
|
||||||
|
return ethtypes.EthTx{
|
||||||
|
To: &to,
|
||||||
|
From: from,
|
||||||
|
Nonce: ethtypes.EthUint64(msg.Nonce),
|
||||||
|
ChainID: ethtypes.EthUint64(build.Eip155ChainId),
|
||||||
|
Value: ethtypes.EthBigInt(msg.Value),
|
||||||
|
Type: ethtypes.Eip1559TxType,
|
||||||
|
Gas: ethtypes.EthUint64(msg.GasLimit),
|
||||||
|
MaxFeePerGas: ethtypes.EthBigInt(msg.GasFeeCap),
|
||||||
|
MaxPriorityFeePerGas: ethtypes.EthBigInt(msg.GasPremium),
|
||||||
|
AccessList: []ethtypes.EthHash{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getSignedMessage(ctx context.Context, cs *store.ChainStore, msgCid cid.Cid) (*types.SignedMessage, error) {
|
||||||
|
smsg, err := cs.GetSignedMessage(ctx, msgCid)
|
||||||
|
if err != nil {
|
||||||
|
// We couldn't find the signed message, it might be a BLS message, so search for a regular message.
|
||||||
|
msg, err := cs.GetMessage(ctx, msgCid)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("failed to find msg %s: %w", msgCid, err)
|
||||||
|
}
|
||||||
|
smsg = &types.SignedMessage{
|
||||||
|
Message: *msg,
|
||||||
|
Signature: crypto.Signature{
|
||||||
|
Type: crypto.SigTypeBLS,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return smsg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// newEthTxFromMessageLookup creates an ethereum transaction from filecoin message lookup. If a negative txIdx is passed
|
||||||
|
// into the function, it looks up the transaction index of the message in the tipset, otherwise it uses the txIdx passed into the
|
||||||
|
// function
|
||||||
|
func newEthTxFromMessageLookup(ctx context.Context, msgLookup *api.MsgLookup, txIdx int, cs *store.ChainStore, sa StateAPI) (ethtypes.EthTx, error) {
|
||||||
|
ts, err := cs.LoadTipSet(ctx, msgLookup.TipSet)
|
||||||
|
if err != nil {
|
||||||
|
return ethtypes.EthTx{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// This tx is located in the parent tipset
|
||||||
|
parentTs, err := cs.LoadTipSet(ctx, ts.Parents())
|
||||||
|
if err != nil {
|
||||||
|
return ethtypes.EthTx{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
parentTsCid, err := parentTs.Key().Cid()
|
||||||
|
if err != nil {
|
||||||
|
return ethtypes.EthTx{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// lookup the transactionIndex
|
||||||
|
if txIdx < 0 {
|
||||||
|
msgs, err := cs.MessagesForTipset(ctx, parentTs)
|
||||||
|
if err != nil {
|
||||||
|
return ethtypes.EthTx{}, err
|
||||||
|
}
|
||||||
|
for i, msg := range msgs {
|
||||||
|
if msg.Cid() == msgLookup.Message {
|
||||||
|
txIdx = i
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if txIdx < 0 {
|
||||||
|
return ethtypes.EthTx{}, fmt.Errorf("cannot find the msg in the tipset")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
blkHash, err := ethtypes.EthHashFromCid(parentTsCid)
|
||||||
|
if err != nil {
|
||||||
|
return ethtypes.EthTx{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
smsg, err := getSignedMessage(ctx, cs, msgLookup.Message)
|
||||||
|
if err != nil {
|
||||||
|
return ethtypes.EthTx{}, xerrors.Errorf("failed to get signed msg: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tx, err := newEthTxFromSignedMessage(ctx, smsg, sa)
|
||||||
|
if err != nil {
|
||||||
|
return ethtypes.EthTx{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
bn = ethtypes.EthUint64(parentTs.Height())
|
||||||
|
ti = ethtypes.EthUint64(txIdx)
|
||||||
|
)
|
||||||
|
|
||||||
|
tx.ChainID = ethtypes.EthUint64(build.Eip155ChainId)
|
||||||
|
tx.BlockHash = &blkHash
|
||||||
|
tx.BlockNumber = &bn
|
||||||
|
tx.TransactionIndex = &ti
|
||||||
|
return tx, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newEthTxReceipt(ctx context.Context, tx ethtypes.EthTx, lookup *api.MsgLookup, events []types.Event, cs *store.ChainStore, sa StateAPI) (api.EthTxReceipt, error) {
|
||||||
|
var (
|
||||||
|
transactionIndex ethtypes.EthUint64
|
||||||
|
blockHash ethtypes.EthHash
|
||||||
|
blockNumber ethtypes.EthUint64
|
||||||
|
)
|
||||||
|
|
||||||
|
if tx.TransactionIndex != nil {
|
||||||
|
transactionIndex = *tx.TransactionIndex
|
||||||
|
}
|
||||||
|
if tx.BlockHash != nil {
|
||||||
|
blockHash = *tx.BlockHash
|
||||||
|
}
|
||||||
|
if tx.BlockNumber != nil {
|
||||||
|
blockNumber = *tx.BlockNumber
|
||||||
|
}
|
||||||
|
|
||||||
|
receipt := api.EthTxReceipt{
|
||||||
|
TransactionHash: tx.Hash,
|
||||||
|
From: tx.From,
|
||||||
|
To: tx.To,
|
||||||
|
TransactionIndex: transactionIndex,
|
||||||
|
BlockHash: blockHash,
|
||||||
|
BlockNumber: blockNumber,
|
||||||
|
Type: ethtypes.EthUint64(2),
|
||||||
|
Logs: []ethtypes.EthLog{}, // empty log array is compulsory when no logs, or libraries like ethers.js break
|
||||||
|
LogsBloom: ethtypes.EmptyEthBloom[:],
|
||||||
|
}
|
||||||
|
|
||||||
|
if lookup.Receipt.ExitCode.IsSuccess() {
|
||||||
|
receipt.Status = 1
|
||||||
|
} else {
|
||||||
|
receipt.Status = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
receipt.GasUsed = ethtypes.EthUint64(lookup.Receipt.GasUsed)
|
||||||
|
|
||||||
|
// TODO: handle CumulativeGasUsed
|
||||||
|
receipt.CumulativeGasUsed = ethtypes.EmptyEthInt
|
||||||
|
|
||||||
|
// TODO: avoid loading the tipset twice (once here, once when we convert the message to a txn)
|
||||||
|
ts, err := cs.GetTipSetFromKey(ctx, lookup.TipSet)
|
||||||
|
if err != nil {
|
||||||
|
return api.EthTxReceipt{}, xerrors.Errorf("failed to lookup tipset %s when constructing the eth txn receipt: %w", lookup.TipSet, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The tx is located in the parent tipset
|
||||||
|
parentTs, err := cs.LoadTipSet(ctx, ts.Parents())
|
||||||
|
if err != nil {
|
||||||
|
return api.EthTxReceipt{}, xerrors.Errorf("failed to lookup tipset %s when constructing the eth txn receipt: %w", ts.Parents(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
baseFee := parentTs.Blocks()[0].ParentBaseFee
|
||||||
|
gasOutputs := vm.ComputeGasOutputs(lookup.Receipt.GasUsed, int64(tx.Gas), baseFee, big.Int(tx.MaxFeePerGas), big.Int(tx.MaxPriorityFeePerGas), true)
|
||||||
|
totalSpent := big.Sum(gasOutputs.BaseFeeBurn, gasOutputs.MinerTip, gasOutputs.OverEstimationBurn)
|
||||||
|
|
||||||
|
effectiveGasPrice := big.Zero()
|
||||||
|
if lookup.Receipt.GasUsed > 0 {
|
||||||
|
effectiveGasPrice = big.Div(totalSpent, big.NewInt(lookup.Receipt.GasUsed))
|
||||||
|
}
|
||||||
|
receipt.EffectiveGasPrice = ethtypes.EthBigInt(effectiveGasPrice)
|
||||||
|
|
||||||
|
if receipt.To == nil && lookup.Receipt.ExitCode.IsSuccess() {
|
||||||
|
// Create and Create2 return the same things.
|
||||||
|
var ret eam.CreateExternalReturn
|
||||||
|
if err := ret.UnmarshalCBOR(bytes.NewReader(lookup.Receipt.Return)); err != nil {
|
||||||
|
return api.EthTxReceipt{}, xerrors.Errorf("failed to parse contract creation result: %w", err)
|
||||||
|
}
|
||||||
|
addr := ethtypes.EthAddress(ret.EthAddress)
|
||||||
|
receipt.ContractAddress = &addr
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(events) > 0 {
|
||||||
|
receipt.Logs = make([]ethtypes.EthLog, 0, len(events))
|
||||||
|
for i, evt := range events {
|
||||||
|
l := ethtypes.EthLog{
|
||||||
|
Removed: false,
|
||||||
|
LogIndex: ethtypes.EthUint64(i),
|
||||||
|
TransactionHash: tx.Hash,
|
||||||
|
TransactionIndex: transactionIndex,
|
||||||
|
BlockHash: blockHash,
|
||||||
|
BlockNumber: blockNumber,
|
||||||
|
}
|
||||||
|
|
||||||
|
data, topics, ok := ethLogFromEvent(evt.Entries)
|
||||||
|
if !ok {
|
||||||
|
// not an eth event.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, topic := range topics {
|
||||||
|
ethtypes.EthBloomSet(receipt.LogsBloom, topic[:])
|
||||||
|
}
|
||||||
|
l.Data = data
|
||||||
|
l.Topics = topics
|
||||||
|
|
||||||
|
addr, err := address.NewIDAddress(uint64(evt.Emitter))
|
||||||
|
if err != nil {
|
||||||
|
return api.EthTxReceipt{}, xerrors.Errorf("failed to create ID address: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
l.Address, err = lookupEthAddress(ctx, addr, sa)
|
||||||
|
if err != nil {
|
||||||
|
return api.EthTxReceipt{}, xerrors.Errorf("failed to resolve Ethereum address: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ethtypes.EthBloomSet(receipt.LogsBloom, l.Address[:])
|
||||||
|
receipt.Logs = append(receipt.Logs, l)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return receipt, nil
|
||||||
|
}
|
@ -1887,6 +1887,7 @@ func (a *StateAPI) StateGetNetworkParams(ctx context.Context) (*api.NetworkParam
|
|||||||
ConsensusMinerMinPower: build.ConsensusMinerMinPower,
|
ConsensusMinerMinPower: build.ConsensusMinerMinPower,
|
||||||
SupportedProofTypes: build.SupportedProofTypes,
|
SupportedProofTypes: build.SupportedProofTypes,
|
||||||
PreCommitChallengeDelay: build.PreCommitChallengeDelay,
|
PreCommitChallengeDelay: build.PreCommitChallengeDelay,
|
||||||
|
Eip155ChainID: build.Eip155ChainId,
|
||||||
ForkUpgradeParams: api.ForkUpgradeParams{
|
ForkUpgradeParams: api.ForkUpgradeParams{
|
||||||
UpgradeSmokeHeight: build.UpgradeSmokeHeight,
|
UpgradeSmokeHeight: build.UpgradeSmokeHeight,
|
||||||
UpgradeBreezeHeight: build.UpgradeBreezeHeight,
|
UpgradeBreezeHeight: build.UpgradeBreezeHeight,
|
||||||
|
129
node/impl/full/txhashmanager.go
Normal file
129
node/impl/full/txhashmanager.go
Normal file
@ -0,0 +1,129 @@
|
|||||||
|
package full
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
"github.com/filecoin-project/lotus/chain/ethhashlookup"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type EthTxHashManager struct {
|
||||||
|
StateAPI StateAPI
|
||||||
|
TransactionHashLookup *ethhashlookup.EthTxHashLookup
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *EthTxHashManager) Revert(ctx context.Context, from, to *types.TipSet) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *EthTxHashManager) PopulateExistingMappings(ctx context.Context, minHeight abi.ChainEpoch) error {
|
||||||
|
if minHeight < build.UpgradeHyggeHeight {
|
||||||
|
minHeight = build.UpgradeHyggeHeight
|
||||||
|
}
|
||||||
|
|
||||||
|
ts := m.StateAPI.Chain.GetHeaviestTipSet()
|
||||||
|
for ts.Height() > minHeight {
|
||||||
|
for _, block := range ts.Blocks() {
|
||||||
|
msgs, err := m.StateAPI.Chain.SecpkMessagesForBlock(ctx, block)
|
||||||
|
if err != nil {
|
||||||
|
// If we can't find the messages, we've either imported from snapshot or pruned the store
|
||||||
|
log.Debug("exiting message mapping population at epoch ", ts.Height())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, msg := range msgs {
|
||||||
|
m.ProcessSignedMessage(ctx, msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
ts, err = m.StateAPI.Chain.GetTipSetFromKey(ctx, ts.Parents())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *EthTxHashManager) Apply(ctx context.Context, from, to *types.TipSet) error {
|
||||||
|
for _, blk := range to.Blocks() {
|
||||||
|
_, smsgs, err := m.StateAPI.Chain.MessagesForBlock(ctx, blk)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, smsg := range smsgs {
|
||||||
|
if smsg.Signature.Type != crypto.SigTypeDelegated {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
hash, err := ethTxHashFromSignedMessage(ctx, smsg, m.StateAPI)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = m.TransactionHashLookup.UpsertHash(hash, smsg.Cid())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *EthTxHashManager) ProcessSignedMessage(ctx context.Context, msg *types.SignedMessage) {
|
||||||
|
if msg.Signature.Type != crypto.SigTypeDelegated {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ethTx, err := newEthTxFromSignedMessage(ctx, msg, m.StateAPI)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("error converting filecoin message to eth tx: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = m.TransactionHashLookup.UpsertHash(ethTx.Hash, msg.Cid())
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("error inserting tx mapping to db: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func WaitForMpoolUpdates(ctx context.Context, ch <-chan api.MpoolUpdate, manager *EthTxHashManager) {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case u := <-ch:
|
||||||
|
if u.Type != api.MpoolAdd {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
manager.ProcessSignedMessage(ctx, u.Message)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func EthTxHashGC(ctx context.Context, retentionDays int, manager *EthTxHashManager) {
|
||||||
|
if retentionDays == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
gcPeriod := 1 * time.Hour
|
||||||
|
for {
|
||||||
|
entriesDeleted, err := manager.TransactionHashLookup.DeleteEntriesOlderThan(retentionDays)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("error garbage collecting eth transaction hash database: %s", err)
|
||||||
|
}
|
||||||
|
log.Info("garbage collection run on eth transaction hash lookup database. %d entries deleted", entriesDeleted)
|
||||||
|
time.Sleep(gcPeriod)
|
||||||
|
}
|
||||||
|
}
|
@ -278,7 +278,16 @@ func (sm *StorageMinerAPI) SectorUnseal(ctx context.Context, sectorNum abi.Secto
|
|||||||
ProofType: status.SealProof,
|
ProofType: status.SealProof,
|
||||||
}
|
}
|
||||||
|
|
||||||
return sm.StorageMgr.SectorsUnsealPiece(ctx, sector, storiface.UnpaddedByteIndex(0), abi.UnpaddedPieceSize(0), status.Ticket.Value, status.CommD)
|
bgCtx := context.Background()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
err := sm.StorageMgr.SectorsUnsealPiece(bgCtx, sector, storiface.UnpaddedByteIndex(0), abi.UnpaddedPieceSize(0), status.Ticket.Value, status.CommD)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("unseal for sector %d failed: %+v", sectorNum, err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// List all staged sectors
|
// List all staged sectors
|
||||||
|
@ -559,6 +559,7 @@ func (trw *tracerWrapper) Trace(evt *pubsub_pb.TraceEvent) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
case pubsub_pb.TraceEvent_PRUNE:
|
case pubsub_pb.TraceEvent_PRUNE:
|
||||||
|
stats.Record(context.TODO(), metrics.PubsubPruneMessage.M(1))
|
||||||
if trw.traceMessage(evt.GetPrune().GetTopic()) {
|
if trw.traceMessage(evt.GetPrune().GetTopic()) {
|
||||||
if trw.lp2pTracer != nil {
|
if trw.lp2pTracer != nil {
|
||||||
trw.lp2pTracer.Trace(evt)
|
trw.lp2pTracer.Trace(evt)
|
||||||
|
@ -11,8 +11,8 @@ import (
|
|||||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||||
"github.com/libp2p/go-libp2p/core/event"
|
"github.com/libp2p/go-libp2p/core/event"
|
||||||
"github.com/libp2p/go-libp2p/core/host"
|
"github.com/libp2p/go-libp2p/core/host"
|
||||||
|
"github.com/libp2p/go-libp2p/core/network"
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
"github.com/libp2p/go-libp2p/core/protocol"
|
|
||||||
"github.com/libp2p/go-libp2p/p2p/host/eventbus"
|
"github.com/libp2p/go-libp2p/p2p/host/eventbus"
|
||||||
"go.uber.org/fx"
|
"go.uber.org/fx"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
@ -66,18 +66,22 @@ func RunHello(mctx helpers.MetricsCtx, lc fx.Lifecycle, h host.Host, svc *hello.
|
|||||||
ctx := helpers.LifecycleCtx(mctx, lc)
|
ctx := helpers.LifecycleCtx(mctx, lc)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
|
// We want to get information on connected peers, we don't want to trigger new connections.
|
||||||
|
ctx := network.WithNoDial(ctx, "filecoin hello")
|
||||||
for evt := range sub.Out() {
|
for evt := range sub.Out() {
|
||||||
pic := evt.(event.EvtPeerIdentificationCompleted)
|
pic := evt.(event.EvtPeerIdentificationCompleted)
|
||||||
|
// We just finished identifying the peer, that means we should know what
|
||||||
|
// protocols it speaks. Check if it speeks the Filecoin hello protocol
|
||||||
|
// before continuing.
|
||||||
|
if p, _ := h.Peerstore().FirstSupportedProtocol(pic.Peer, hello.ProtocolID); p != hello.ProtocolID {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
if err := svc.SayHello(ctx, pic.Peer); err != nil {
|
if err := svc.SayHello(ctx, pic.Peer); err != nil {
|
||||||
protos, _ := h.Peerstore().GetProtocols(pic.Peer)
|
protos, _ := h.Peerstore().GetProtocols(pic.Peer)
|
||||||
agent, _ := h.Peerstore().Get(pic.Peer, "AgentVersion")
|
agent, _ := h.Peerstore().Get(pic.Peer, "AgentVersion")
|
||||||
if protosContains(protos, hello.ProtocolID) {
|
log.Warnw("failed to say hello", "error", err, "peer", pic.Peer, "supported", protos, "agent", agent)
|
||||||
log.Warnw("failed to say hello", "error", err, "peer", pic.Peer, "supported", protos, "agent", agent)
|
|
||||||
} else {
|
|
||||||
log.Debugw("failed to say hello", "error", err, "peer", pic.Peer, "supported", protos, "agent", agent)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
@ -85,15 +89,6 @@ func RunHello(mctx helpers.MetricsCtx, lc fx.Lifecycle, h host.Host, svc *hello.
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func protosContains(protos []protocol.ID, search protocol.ID) bool {
|
|
||||||
for _, p := range protos {
|
|
||||||
if p == search {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func RunPeerMgr(mctx helpers.MetricsCtx, lc fx.Lifecycle, pmgr *peermgr.PeerMgr) {
|
func RunPeerMgr(mctx helpers.MetricsCtx, lc fx.Lifecycle, pmgr *peermgr.PeerMgr) {
|
||||||
go pmgr.Run(helpers.LifecycleCtx(mctx, lc))
|
go pmgr.Run(helpers.LifecycleCtx(mctx, lc))
|
||||||
}
|
}
|
||||||
@ -265,13 +260,9 @@ func RandomSchedule(lc fx.Lifecycle, mctx helpers.MetricsCtx, p RandomBeaconPara
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
shd := beacon.Schedule{}
|
shd, err := drand.BeaconScheduleFromDrandSchedule(p.DrandConfig, gen.Timestamp, p.PubSub)
|
||||||
for _, dc := range p.DrandConfig {
|
if err != nil {
|
||||||
bc, err := drand.NewDrandBeacon(gen.Timestamp, build.BlockDelaySecs, p.PubSub, dc.Config)
|
return nil, xerrors.Errorf("failed to create beacon schedule: %w", err)
|
||||||
if err != nil {
|
|
||||||
return nil, xerrors.Errorf("creating drand beacon: %w", err)
|
|
||||||
}
|
|
||||||
shd = append(shd, beacon.BeaconPoint{Start: dc.Start, Beacon: bc})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return shd, nil
|
return shd, nil
|
||||||
|
30
scripts/snapshot-summary.py
Normal file
30
scripts/snapshot-summary.py
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
import plotly.express as px
|
||||||
|
import sys, json
|
||||||
|
import pathlib
|
||||||
|
|
||||||
|
snapshot_data = json.load(sys.stdin)
|
||||||
|
|
||||||
|
# Possible extensions:
|
||||||
|
# 1. parameterize to use block count as value instead of byte size
|
||||||
|
# 2. parameterize on different types of px chart types
|
||||||
|
# 3. parameterize on output port so we can serve this from infra
|
||||||
|
|
||||||
|
parents = []
|
||||||
|
names = []
|
||||||
|
values = []
|
||||||
|
|
||||||
|
for key in snapshot_data:
|
||||||
|
path = pathlib.Path(key)
|
||||||
|
name = key
|
||||||
|
parent = str(path.parent)
|
||||||
|
if key == '/':
|
||||||
|
parent = ''
|
||||||
|
stats = snapshot_data[key]
|
||||||
|
parents.append(parent)
|
||||||
|
names.append(name)
|
||||||
|
values.append(stats['Size'])
|
||||||
|
|
||||||
|
data = dict(names=names, parents=parents, values=values)
|
||||||
|
fig = px.treemap(data, names='names', parents='parents', values='values')
|
||||||
|
fig.show()
|
||||||
|
|
@ -91,7 +91,7 @@ func FetchWithTemp(ctx context.Context, urls []string, dest string, header http.
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := move(tempDest, dest); err != nil {
|
if err := Move(tempDest, dest); err != nil {
|
||||||
return "", xerrors.Errorf("fetch move error %s -> %s: %w", tempDest, dest, err)
|
return "", xerrors.Errorf("fetch move error %s -> %s: %w", tempDest, dest, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -366,7 +366,7 @@ loop:
|
|||||||
if !sid.primary && primary {
|
if !sid.primary && primary {
|
||||||
sid.primary = true
|
sid.primary = true
|
||||||
} else {
|
} else {
|
||||||
log.Warnf("sector %v redeclared in %s", s, storageID)
|
log.Debugf("sector %v redeclared in %s", s, storageID)
|
||||||
}
|
}
|
||||||
continue loop
|
continue loop
|
||||||
}
|
}
|
||||||
|
@ -548,7 +548,7 @@ func (st *Local) AcquireSector(ctx context.Context, sid storiface.SectorRef, exi
|
|||||||
}
|
}
|
||||||
|
|
||||||
if best == "" {
|
if best == "" {
|
||||||
return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.Errorf("couldn't find a suitable path for a sector")
|
return storiface.SectorPaths{}, storiface.SectorPaths{}, storiface.Err(storiface.ErrTempAllocateSpace, xerrors.Errorf("couldn't find a suitable path for a sector"))
|
||||||
}
|
}
|
||||||
|
|
||||||
storiface.SetPathByType(&out, fileType, best)
|
storiface.SetPathByType(&out, fileType, best)
|
||||||
@ -720,7 +720,7 @@ func (st *Local) MoveStorage(ctx context.Context, s storiface.SectorRef, types s
|
|||||||
return xerrors.Errorf("dropping source sector from index: %w", err)
|
return xerrors.Errorf("dropping source sector from index: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := move(storiface.PathByType(src, fileType), storiface.PathByType(dest, fileType)); err != nil {
|
if err := Move(storiface.PathByType(src, fileType), storiface.PathByType(dest, fileType)); err != nil {
|
||||||
// TODO: attempt some recovery (check if src is still there, re-declare)
|
// TODO: attempt some recovery (check if src is still there, re-declare)
|
||||||
return xerrors.Errorf("moving sector %v(%d): %w", s, fileType, err)
|
return xerrors.Errorf("moving sector %v(%d): %w", s, fileType, err)
|
||||||
}
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user