diff --git a/.circleci/config.yml b/.circleci/config.yml
index d4104686c..e91c41129 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -997,6 +997,7 @@ workflows:
suite: utest-unit-cli
target: "./cli/... ./cmd/... ./api/..."
get-params: true
+ executor: golang-2xl
- test:
name: test-unit-node
requires:
@@ -1004,12 +1005,14 @@ workflows:
suite: utest-unit-node
target: "./node/..."
+
- test:
name: test-unit-rest
requires:
- build
suite: utest-unit-rest
target: "./blockstore/... ./build/... ./chain/... ./conformance/... ./gateway/... ./journal/... ./lib/... ./markets/... ./paychmgr/... ./tools/..."
+
executor: golang-2xl
- test:
name: test-unit-storage
@@ -1018,6 +1021,7 @@ workflows:
suite: utest-unit-storage
target: "./storage/... ./extern/..."
+
- test:
go-test-flags: "-run=TestMulticoreSDR"
requires:
diff --git a/.circleci/template.yml b/.circleci/template.yml
index de2cf47cc..71616f05f 100644
--- a/.circleci/template.yml
+++ b/.circleci/template.yml
@@ -558,6 +558,7 @@ workflows:
suite: utest-[[ $suite ]]
target: "[[ $pkgs ]]"
[[if eq $suite "unit-cli"]]get-params: true[[end]]
+ [[if eq $suite "unit-cli"]]executor: golang-2xl[[end]]
[[- if eq $suite "unit-rest"]]executor: golang-2xl[[end]]
[[- end]]
- test:
diff --git a/.github/ISSUE_TEMPLATE/task.md b/.github/ISSUE_TEMPLATE/task.md
new file mode 100644
index 000000000..205c82770
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/task.md
@@ -0,0 +1,31 @@
+---
+name: New Task
+about: A larger yet well-scoped task
+title: '
'
+labels: Needs Triage
+assignees: ''
+
+---
+
+## User Story
+
+
+## Acceptance Criteria
+
+
+
+```[tasklist]
+### Deliverables
+
+```
+
+## Technical Breakdown
+```[tasklist]
+### Development
+
+```
+
+```[tasklist]
+### Testing
+
+```
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
index b6ef5fa3c..b5843c5b3 100644
--- a/.github/pull_request_template.md
+++ b/.github/pull_request_template.md
@@ -16,6 +16,7 @@ Before you mark the PR ready for review, please make sure that:
- example: ` fix: mempool: Introduce a cache for valid signatures`
- `PR type`: fix, feat, build, chore, ci, docs, perf, refactor, revert, style, test
- `area`, e.g. api, chain, state, market, mempool, multisig, networking, paych, proving, sealing, wallet, deps
+- [ ] If the PR affects users (e.g., new feature, bug fix, system requirements change), update the CHANGELOG.md and add details to the UNRELEASED section.
- [ ] New features have usage guidelines and / or documentation updates in
- [ ] [Lotus Documentation](https://lotus.filecoin.io)
- [ ] [Discussion Tutorials](https://github.com/filecoin-project/lotus/discussions/categories/tutorials)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index b07cb1f7b..dd2888747 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,128 @@
# Lotus changelog
+# v 1.25.0 / 2023-11-22
+
+This is a highly recommended feature release of Lotus. This optional release supports the Filecoin network version 21 upgrade, codenamed Watermelon 🍉, in addition to the numerous improvements and enhancements for node operators, ETH RPC-providers and storage providers.
+
+**The Filecoin network upgrade v21, codenamed Watermelon 🍉, is at epoch 3469380 - 2023-12-12T13:30:00Z**
+
+The full list of [protocol improvements delivered in the network upgrade can be found here](https://github.com/filecoin-project/core-devs/blob/master/Network%20Upgrades/v21.md).
+
+## ☢️ Upgrade Warnings ☢️
+
+- Read through the [changelog of the mandatory v1.24.0 release](https://github.com/filecoin-project/lotus/releases/tag/v1.24.0). Especially the `Migration` and `v12 Builtin Actor Bundle` sections.
+- Please remove and clone a new Lotus repo (`git clone https://github.com/filecoin-project/lotus.git`) when upgrading to this release.
+- This feature release requires a minimum Go version of v1.20.7 or higher to successfully build Lotus. Go version 1.21.x is not supported yet.
+- EthRPC providers, please check out the [new tracing API to Lotus RPC](https://github.com/filecoin-project/lotus/pull/11100)
+
+## ⭐️ Highlights ⭐️
+
+**Unsealing bugfixes and enhancements**
+
+This feature release introduces significant improvements and bugfixes with regards to unsealing, and ensures that unsealing operates as one would expect. Consequently, unsealing of all sector types (deal sectors, snap-sectors without sector keys, and snap-sectors with sector keys) now all function seamlessly.
+
+Some additional unsealing improvements are:
+- Unsealing on workers with only sealing paths works. :tada:
+- Transferring unsealed files to long-term storage upon successful unsealing. :arrow_right:
+- Ensuring no residual files in sealing paths post a successful unsealing operation. :broom:
+
+**SupraSeal C2**
+
+Lotus-workers can now be built to leverage the SupraSeal C2 sealing optimizations in your sealing pipeline. The code optimizations are currently behind the `FFI_USE_CUDA_SUPRASEAL` feature flag. We advice users to test this feature on a test-network, before trying to use it on the mainnet. Users can test out the feature by building their lotus-workers by exporting the `FFI_USE_CUDA_SUPRASEAL=1` enviroment variable, and building from source. For questions about the SupraSeal C2 sealing optimizations, reach out in the #fil-proofs or the #dsa-sealing slack channel.
+
+## New features
+- feat: add Eip155ChainID to StateGetNetworkParams ([filecoin-project/lotus#10987](https://github.com/filecoin-project/lotus/pull/10987))
+- feat: profiling: state summary and visualization ([filecoin-project/lotus#11012](https://github.com/filecoin-project/lotus/pull/11012))
+- feat: snapshot: remove existing chain ([filecoin-project/lotus#11032](https://github.com/filecoin-project/lotus/pull/11032))
+- feat: Add a metric to display pruning of the node's peer ([filecoin-project/lotus#11058](https://github.com/filecoin-project/lotus/pull/11058))
+- feat:shed:gather partition metadata ([filecoin-project/lotus#11078](https://github.com/filecoin-project/lotus/pull/11078))
+- feat: vm: allow raw "cbor" in state and use the new go-multicodec ([filecoin-project/lotus#11081](https://github.com/filecoin-project/lotus/pull/11081))
+- Add new lotus-shed command for backfillling actor events ([filecoin-project/lotus#11088](https://github.com/filecoin-project/lotus/pull/11088))
+- feat: Add new tracing API ([filecoin-project/lotus#11100](https://github.com/filecoin-project/lotus/pull/11100))
+- feat: FVM: do not error on unsuccessful implicit messages ([filecoin-project/lotus#11120](https://github.com/filecoin-project/lotus/pull/11120))
+- feat: chain node: Move consensus slasher to internal service ([filecoin-project/lotus#11126](https://github.com/filecoin-project/lotus/pull/11126))
+- feat: miner: implement FRC-0051 ([filecoin-project/lotus#11157](https://github.com/filecoin-project/lotus/pull/11157))
+ - feat: chainstore: FRC-0051: Remove all equivocated blocks from tipsets ([filecoin-project/lotus#11104](https://github.com/filecoin-project/lotus/pull/11104))
+ - feat: miner: 2 minor refactors ([filecoin-project/lotus#11158](https://github.com/filecoin-project/lotus/pull/11158))
+- feat: refactor: return randomness base to FVM without hashing ([filecoin-project/lotus#11167](https://github.com/filecoin-project/lotus/pull/11167))
+- feat: Lotus Gateway: add allocation and claim related GET APIs to gateway ([filecoin-project/lotus#11183](https://github.com/filecoin-project/lotus/pull/11183))
+- feat: shed: Add exec traces to `lotus-shed msg` ([filecoin-project/lotus#11188](https://github.com/filecoin-project/lotus/pull/11188))
+- feat: miner: defensive check for equivocation ([filecoin-project/lotus#11328](https://github.com/filecoin-project/lotus/pull/11328))
+
+## Improvements
+- feat: daemon: improvemens to the consensus slasher ([filecoin-project/lotus#10979](https://github.com/filecoin-project/lotus/pull/10979))
+- fix: Snapdeals unsealing fixes ([filecoin-project/lotus#11011](https://github.com/filecoin-project/lotus/pull/11011))
+- refactor: Make all validation error actions explicit ([filecoin-project/lotus#11016](https://github.com/filecoin-project/lotus/pull/11016))
+- feat: shed: command for decoding block headers ([filecoin-project/lotus#11031](https://github.com/filecoin-project/lotus/pull/11031))
+- fix: stores: Tune down StorageDeclareSector` log-lvl ([filecoin-project/lotus#11045](https://github.com/filecoin-project/lotus/pull/11045))
+- feat: types: apply a max length when decoding events ([filecoin-project/lotus#11054](https://github.com/filecoin-project/lotus/pull/11054))
+- feat: slasher: improve UX ([filecoin-project/lotus#11060](https://github.com/filecoin-project/lotus/pull/11060))
+- feat: daemon: improvemens to the consensus slasher ([filecoin-project/lotus#11063](https://github.com/filecoin-project/lotus/pull/11063))
+- fix: events: Improve performance of event migration from V1 to V2 ([filecoin-project/lotus#11064](https://github.com/filecoin-project/lotus/pull/11064))
+- feat:lotus-bench:AMT benchmarking ([filecoin-project/lotus#11075](https://github.com/filecoin-project/lotus/pull/11075))
+- fix: DecodeRLP can panic ([filecoin-project/lotus#11079](https://github.com/filecoin-project/lotus/pull/11079))
+- fix: daemon: set real beacon schedule when importing chain ([filecoin-project/lotus#11080](https://github.com/filecoin-project/lotus/pull/11080))
+- fix: ethtypes: handle length overflow case ([filecoin-project/lotus#11082](https://github.com/filecoin-project/lotus/pull/11082))
+- chore: stmgr: migrations: do not log noisily on cache misses ([filecoin-project/lotus#11083](https://github.com/filecoin-project/lotus/pull/11083))
+- feat: daemon: import: only setup stmgr if validating chain (#11084) ([filecoin-project/lotus#11084](https://github.com/filecoin-project/lotus/pull/11084))
+- fix: sealing pipeline: Fix PC1 retry loop ([filecoin-project/lotus#11087](https://github.com/filecoin-project/lotus/pull/11087))
+- chore: legacy syscalls: Cleanup ComputeUnsealedSectorCID ([filecoin-project/lotus#11119](https://github.com/filecoin-project/lotus/pull/11119))
+- sector import: fix evaluating randomness when importing a sector ([filecoin-project/lotus#11123](https://github.com/filecoin-project/lotus/pull/11123))
+- fix: cli: Only display `warning` if behind sync ([filecoin-project/lotus#11140](https://github.com/filecoin-project/lotus/pull/11140))
+- fix: worker: Support IPv6 formatted API-keys ([filecoin-project/lotus#11141](https://github.com/filecoin-project/lotus/pull/11141))
+- fix: sealing: Switch to calling PreCommitSectorBatch2 ([filecoin-project/lotus#11142](https://github.com/filecoin-project/lotus/pull/11142))
+- fix: downgrade harmless warning to debug ([filecoin-project/lotus#11145](https://github.com/filecoin-project/lotus/pull/11145))
+- fix: sealing: Fix RetryCommitWait loop when sector cron activation fails ([filecoin-project/lotus#11046](https://github.com/filecoin-project/lotus/pull/11046))
+- fix: gateway: return an error when an Eth filter is not found ([filecoin-project/lotus#11152](https://github.com/filecoin-project/lotus/pull/11152))
+- fix: chainstore: do not get stuck in unhappy equivocation cases ([filecoin-project/lotus#11159](https://github.com/filecoin-project/lotus/pull/11159))
+- fix: sealing: Run unsealing in the background for better ux ([filecoin-project/lotus#11177](https://github.com/filecoin-project/lotus/pull/11177))
+- fix: build: Allow lotus-wallet to be built independently ([filecoin-project/lotus#11187](https://github.com/filecoin-project/lotus/pull/11187))
+- fix: wallet: Make import handle SIGINT/SIGTERM ([filecoin-project/lotus#11190](https://github.com/filecoin-project/lotus/pull/11190))
+- fix: markets/dagstore: remove trace goroutine for dagstore wrapper ([filecoin-project/lotus#11191](https://github.com/filecoin-project/lotus/pull/11191))
+- fix: chain: Do not update message info cache until after message validation ([filecoin-project/lotus#11202](https://github.com/filecoin-project/lotus/pull/11202))
+- fix: chain: cancel long operations upon ctx cancelation ([filecoin-project/lotus#11206](https://github.com/filecoin-project/lotus/pull/11206))
+- fix(client): single-root error message ([filecoin-project/lotus#11214](https://github.com/filecoin-project/lotus/pull/11214))
+- fix: worker: Convert `DC_[SectorSize]_[ResourceRestriction]` if set ([filecoin-project/lotus#11224](https://github.com/filecoin-project/lotus/pull/11224))
+- chore: backport #11338 onto release/v1.25.0 ([filecoin-project/lotus#11350](https://github.com/filecoin-project/lotus/pull/11350))
+
+## Dependencies
+- deps: update go-libp2p to v0.28.1 ([filecoin-project/lotus#10998](https://github.com/filecoin-project/lotus/pull/10998))
+- deps: update go-libp2p to v0.29.2 ([filecoin-project/lotus#11164](https://github.com/filecoin-project/lotus/pull/11164))
+- deps: update go-libp2p to v0.30.0 ([filecoin-project/lotus#11189](https://github.com/filecoin-project/lotus/pull/11189))
+- fix: build: use tagged releases ([filecoin-project/lotus#11194](https://github.com/filecoin-project/lotus/pull/11194))
+- chore: test-vectors: update ([filecoin-project/lotus#11196](https://github.com/filecoin-project/lotus/pull/11196))
+- chore: backport #11365 to release/v1.25.0 ([filecoin-project/lotus#11369](https://github.com/filecoin-project/lotus/pull/11369))
+- chore: deps: update to go-state-types v0.12.8 ([filecoin-project/lotus#11339](https://github.com/filecoin-project/lotus/pull/11437))
+- chore: deps: update to final actors ([filecoin-project/lotus#11330](https://github.com/filecoin-project/lotus/pull/11440))
+- github.com/filecoin-project/go-amt-ipld/v4 (v4.0.0 -> v4.2.0)
+- github.com/filecoin-project/test-vectors/schema (v0.0.5 -> v0.0.7)
+
+## Others
+- chore: Extract stable release and post release portion outside of RC testing in template ([filecoin-project/lotus#11000](https://github.com/filecoin-project/lotus/pull/11000))
+- fix: docs: include FFI steps in lotus RELEASE_ISSUE_TEMPLATE ([filecoin-project/lotus#11047](https://github.com/filecoin-project/lotus/pull/11047))
+- chore: build: update to v1.23.4-dev ([filecoin-project/lotus#11049](https://github.com/filecoin-project/lotus/pull/11049))
+- fix: deflake: Use MockProofs ([filecoin-project/lotus#11059](https://github.com/filecoin-project/lotus/pull/11059))
+- fix: failing test: Tweak TestWindowPostV1P1NV20 test condition ([filecoin-project/lotus#11121](https://github.com/filecoin-project/lotus/pull/11121))
+- fix: CI: make test-unit-rest actually be the rest of the tests ([filecoin-project/lotus#11147](https://github.com/filecoin-project/lotus/pull/11147))
+- chore: merge releases into master ([filecoin-project/lotus#11154](https://github.com/filecoin-project/lotus/pull/11154))
+- tests: deflake: TestGetBlockByNumber ([filecoin-project/lotus#11155](https://github.com/filecoin-project/lotus/pull/11155))
+- tests: mac seal test ([filecoin-project/lotus#11180](https://github.com/filecoin-project/lotus/pull/11180))
+- tests: Take Download out of Sealer time ([filecoin-project/lotus#11182](https://github.com/filecoin-project/lotus/pull/11182))
+- feat: test: Test that verified clients can directly transfer datacap, creating allocations ([filecoin-project/lotus#11169](https://github.com/filecoin-project/lotus/pull/11169))
+- chore: merge feat/nv21 into master ([filecoin-project/lotus#11201](https://github.com/filecoin-project/lotus/pull/11201))
+- ci: Use larger executor for cli tests ([filecoin-project/lotus#11212](https://github.com/filecoin-project/lotus/pull/11212))
+- fix: dockerfile: Bump to Go 1.20.7 image ([filecoin-project/lotus#11221](https://github.com/filecoin-project/lotus/pull/11221))
+- docs: Update PR template to callout remembering to update CHANGELOG ([filecoin-project/lotus#11232](https://github.com/filecoin-project/lotus/pull/11232))
+- chore: release: 1.23.4rc1 prep ([filecoin-project/lotus#11248](https://github.com/filecoin-project/lotus/pull/11248))
+- chore: backport #11262 (#11265) ([filecoin-project/lotus#11265](https://github.com/filecoin-project/lotus/pull/11265))
+- chore: backport #11294 into `release/v1.23.4` ([filecoin-project/lotus#11295](https://github.com/filecoin-project/lotus/pull/11295))
+- chore: release: V1.25 rebase ([filecoin-project/lotus#11342](https://github.com/filecoin-project/lotus/pull/11342))
+- backport: tests: add SynthPorep layers to cachefiles ([filecoin-project/lotus#11344](https://github.com/filecoin-project/lotus/pull/11344))
+- chore: backport #11408 to release/v1.25.0 ([filecoin-project/lotus#11414](https://github.com/filecoin-project/lotus/pull/11414))
+- chore: backport calibnet lightweight patch ([filecoin-project/lotus#11422](https://github.com/filecoin-project/lotus/pull/11422))
+- chore: update bootstrap nodes ([filecoin-project/lotus#11288](https://github.com/filecoin-project/lotus/pull/11288))
+- chore: add bootstrap node on calibration ([filecoin-project/lotus#11175](https://github.com/filecoin-project/lotus/pull/11175))
+
# 1.24.0 / 2023-11-22
This is the stable release for the upcoming **MANDATORY** Filecoin network upgrade v21, codenamed Watermelon 🍉, at **epoch 3469380 - 2023-12-12T13:30:00Z**.
@@ -96,6 +219,7 @@ There is a new protocol limit on how many partition could be submited in one PoS
The [Forest team](https://filecoinproject.slack.com/archives/C029LPZ5N73) at Chainsafe has launched a brand new lightweight snapshot service that is backed up by forest nodes! This is a great alternative service along with the fil-infra one, and it is compatible with lotus! We recommend lotus users to check it out [here](https://docs.filecoin.io/networks/mainnet#resources)!
+
# v1.23.3 / 2023-08-01
This feature release of Lotus includes numerous improvements and enhancements for node operators, ETH RPC-providers and storage providers.
diff --git a/Makefile b/Makefile
index 429e93f53..b94c13c0d 100644
--- a/Makefile
+++ b/Makefile
@@ -193,7 +193,7 @@ lotus-health:
.PHONY: lotus-health
BINS+=lotus-health
-lotus-wallet:
+lotus-wallet: $(BUILD_DEPS)
rm -f lotus-wallet
$(GOCC) build $(GOFLAGS) -o lotus-wallet ./cmd/lotus-wallet
.PHONY: lotus-wallet
diff --git a/api/api_full.go b/api/api_full.go
index e5f88936a..f919bc13b 100644
--- a/api/api_full.go
+++ b/api/api_full.go
@@ -867,6 +867,13 @@ type FullNode interface {
// Returns the client version
Web3ClientVersion(ctx context.Context) (string, error) //perm:read
+ // TraceAPI related methods
+ //
+ // Returns traces created at given block
+ EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error) //perm:read
+ // Replays all transactions in a block returning the requested traces for each transaction
+ EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) //perm:read
+
// CreateBackup creates node backup onder the specified file name. The
// method requires that the lotus daemon is running with the
// LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
diff --git a/api/api_gateway.go b/api/api_gateway.go
index 3f7bc21cc..27e725457 100644
--- a/api/api_gateway.go
+++ b/api/api_gateway.go
@@ -9,6 +9,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/go-state-types/abi"
+ verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/go-state-types/dline"
apitypes "github.com/filecoin-project/lotus/api/types"
@@ -65,6 +66,11 @@ type Gateway interface {
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (DealCollateralBounds, error)
StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error)
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
+ StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error)
+ StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error)
+ StateGetAllocations(ctx context.Context, clientAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error)
+ StateGetClaim(ctx context.Context, providerAddr address.Address, claimId verifregtypes.ClaimId, tsk types.TipSetKey) (*verifregtypes.Claim, error)
+ StateGetClaims(ctx context.Context, providerAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error)
StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*ActorState, error)
StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error)
StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
@@ -121,4 +127,6 @@ type Gateway interface {
EthSubscribe(ctx context.Context, params jsonrpc.RawParams) (ethtypes.EthSubscriptionID, error)
EthUnsubscribe(ctx context.Context, id ethtypes.EthSubscriptionID) (bool, error)
Web3ClientVersion(ctx context.Context) (string, error)
+ EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error)
+ EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error)
}
diff --git a/api/eth_aliases.go b/api/eth_aliases.go
index ca0f861ac..eb0c51005 100644
--- a/api/eth_aliases.go
+++ b/api/eth_aliases.go
@@ -40,6 +40,9 @@ func CreateEthRPCAliases(as apitypes.Aliaser) {
as.AliasMethod("eth_subscribe", "Filecoin.EthSubscribe")
as.AliasMethod("eth_unsubscribe", "Filecoin.EthUnsubscribe")
+ as.AliasMethod("trace_block", "Filecoin.EthTraceBlock")
+ as.AliasMethod("trace_replayBlockTransactions", "Filecoin.EthTraceReplayBlockTransactions")
+
as.AliasMethod("net_version", "Filecoin.NetVersion")
as.AliasMethod("net_listening", "Filecoin.NetListening")
diff --git a/api/mocks/mock_full.go b/api/mocks/mock_full.go
index d2f2e528e..856d83813 100644
--- a/api/mocks/mock_full.go
+++ b/api/mocks/mock_full.go
@@ -1491,6 +1491,36 @@ func (mr *MockFullNodeMockRecorder) EthSyncing(arg0 interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthSyncing", reflect.TypeOf((*MockFullNode)(nil).EthSyncing), arg0)
}
+// EthTraceBlock mocks base method.
+func (m *MockFullNode) EthTraceBlock(arg0 context.Context, arg1 string) ([]*ethtypes.EthTraceBlock, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "EthTraceBlock", arg0, arg1)
+ ret0, _ := ret[0].([]*ethtypes.EthTraceBlock)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// EthTraceBlock indicates an expected call of EthTraceBlock.
+func (mr *MockFullNodeMockRecorder) EthTraceBlock(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthTraceBlock", reflect.TypeOf((*MockFullNode)(nil).EthTraceBlock), arg0, arg1)
+}
+
+// EthTraceReplayBlockTransactions mocks base method.
+func (m *MockFullNode) EthTraceReplayBlockTransactions(arg0 context.Context, arg1 string, arg2 []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "EthTraceReplayBlockTransactions", arg0, arg1, arg2)
+ ret0, _ := ret[0].([]*ethtypes.EthTraceReplayBlockTransaction)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// EthTraceReplayBlockTransactions indicates an expected call of EthTraceReplayBlockTransactions.
+func (mr *MockFullNodeMockRecorder) EthTraceReplayBlockTransactions(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthTraceReplayBlockTransactions", reflect.TypeOf((*MockFullNode)(nil).EthTraceReplayBlockTransactions), arg0, arg1, arg2)
+}
+
// EthUninstallFilter mocks base method.
func (m *MockFullNode) EthUninstallFilter(arg0 context.Context, arg1 ethtypes.EthFilterID) (bool, error) {
m.ctrl.T.Helper()
diff --git a/api/proxy_gen.go b/api/proxy_gen.go
index 79bf0a738..2d1333495 100644
--- a/api/proxy_gen.go
+++ b/api/proxy_gen.go
@@ -315,6 +315,10 @@ type FullNodeMethods struct {
EthSyncing func(p0 context.Context) (ethtypes.EthSyncingResult, error) `perm:"read"`
+ EthTraceBlock func(p0 context.Context, p1 string) ([]*ethtypes.EthTraceBlock, error) `perm:"read"`
+
+ EthTraceReplayBlockTransactions func(p0 context.Context, p1 string, p2 []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) `perm:"read"`
+
EthUninstallFilter func(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) `perm:"read"`
EthUnsubscribe func(p0 context.Context, p1 ethtypes.EthSubscriptionID) (bool, error) `perm:"read"`
@@ -731,6 +735,10 @@ type GatewayMethods struct {
EthSyncing func(p0 context.Context) (ethtypes.EthSyncingResult, error) ``
+ EthTraceBlock func(p0 context.Context, p1 string) ([]*ethtypes.EthTraceBlock, error) ``
+
+ EthTraceReplayBlockTransactions func(p0 context.Context, p1 string, p2 []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) ``
+
EthUninstallFilter func(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) ``
EthUnsubscribe func(p0 context.Context, p1 ethtypes.EthSubscriptionID) (bool, error) ``
@@ -769,6 +777,16 @@ type GatewayMethods struct {
StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) ``
+ StateGetAllocation func(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) ``
+
+ StateGetAllocationForPendingDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) ``
+
+ StateGetAllocations func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) ``
+
+ StateGetClaim func(p0 context.Context, p1 address.Address, p2 verifregtypes.ClaimId, p3 types.TipSetKey) (*verifregtypes.Claim, error) ``
+
+ StateGetClaims func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) ``
+
StateListMiners func(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) ``
StateLookupID func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) ``
@@ -2446,6 +2464,28 @@ func (s *FullNodeStub) EthSyncing(p0 context.Context) (ethtypes.EthSyncingResult
return *new(ethtypes.EthSyncingResult), ErrNotSupported
}
+func (s *FullNodeStruct) EthTraceBlock(p0 context.Context, p1 string) ([]*ethtypes.EthTraceBlock, error) {
+ if s.Internal.EthTraceBlock == nil {
+ return *new([]*ethtypes.EthTraceBlock), ErrNotSupported
+ }
+ return s.Internal.EthTraceBlock(p0, p1)
+}
+
+func (s *FullNodeStub) EthTraceBlock(p0 context.Context, p1 string) ([]*ethtypes.EthTraceBlock, error) {
+ return *new([]*ethtypes.EthTraceBlock), ErrNotSupported
+}
+
+func (s *FullNodeStruct) EthTraceReplayBlockTransactions(p0 context.Context, p1 string, p2 []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) {
+ if s.Internal.EthTraceReplayBlockTransactions == nil {
+ return *new([]*ethtypes.EthTraceReplayBlockTransaction), ErrNotSupported
+ }
+ return s.Internal.EthTraceReplayBlockTransactions(p0, p1, p2)
+}
+
+func (s *FullNodeStub) EthTraceReplayBlockTransactions(p0 context.Context, p1 string, p2 []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) {
+ return *new([]*ethtypes.EthTraceReplayBlockTransaction), ErrNotSupported
+}
+
func (s *FullNodeStruct) EthUninstallFilter(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) {
if s.Internal.EthUninstallFilter == nil {
return false, ErrNotSupported
@@ -4668,6 +4708,28 @@ func (s *GatewayStub) EthSyncing(p0 context.Context) (ethtypes.EthSyncingResult,
return *new(ethtypes.EthSyncingResult), ErrNotSupported
}
+func (s *GatewayStruct) EthTraceBlock(p0 context.Context, p1 string) ([]*ethtypes.EthTraceBlock, error) {
+ if s.Internal.EthTraceBlock == nil {
+ return *new([]*ethtypes.EthTraceBlock), ErrNotSupported
+ }
+ return s.Internal.EthTraceBlock(p0, p1)
+}
+
+func (s *GatewayStub) EthTraceBlock(p0 context.Context, p1 string) ([]*ethtypes.EthTraceBlock, error) {
+ return *new([]*ethtypes.EthTraceBlock), ErrNotSupported
+}
+
+func (s *GatewayStruct) EthTraceReplayBlockTransactions(p0 context.Context, p1 string, p2 []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) {
+ if s.Internal.EthTraceReplayBlockTransactions == nil {
+ return *new([]*ethtypes.EthTraceReplayBlockTransaction), ErrNotSupported
+ }
+ return s.Internal.EthTraceReplayBlockTransactions(p0, p1, p2)
+}
+
+func (s *GatewayStub) EthTraceReplayBlockTransactions(p0 context.Context, p1 string, p2 []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) {
+ return *new([]*ethtypes.EthTraceReplayBlockTransaction), ErrNotSupported
+}
+
func (s *GatewayStruct) EthUninstallFilter(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) {
if s.Internal.EthUninstallFilter == nil {
return false, ErrNotSupported
@@ -4877,6 +4939,61 @@ func (s *GatewayStub) StateGetActor(p0 context.Context, p1 address.Address, p2 t
return nil, ErrNotSupported
}
+func (s *GatewayStruct) StateGetAllocation(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) {
+ if s.Internal.StateGetAllocation == nil {
+ return nil, ErrNotSupported
+ }
+ return s.Internal.StateGetAllocation(p0, p1, p2, p3)
+}
+
+func (s *GatewayStub) StateGetAllocation(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) {
+ return nil, ErrNotSupported
+}
+
+func (s *GatewayStruct) StateGetAllocationForPendingDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) {
+ if s.Internal.StateGetAllocationForPendingDeal == nil {
+ return nil, ErrNotSupported
+ }
+ return s.Internal.StateGetAllocationForPendingDeal(p0, p1, p2)
+}
+
+func (s *GatewayStub) StateGetAllocationForPendingDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) {
+ return nil, ErrNotSupported
+}
+
+func (s *GatewayStruct) StateGetAllocations(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) {
+ if s.Internal.StateGetAllocations == nil {
+ return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported
+ }
+ return s.Internal.StateGetAllocations(p0, p1, p2)
+}
+
+func (s *GatewayStub) StateGetAllocations(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) {
+ return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported
+}
+
+func (s *GatewayStruct) StateGetClaim(p0 context.Context, p1 address.Address, p2 verifregtypes.ClaimId, p3 types.TipSetKey) (*verifregtypes.Claim, error) {
+ if s.Internal.StateGetClaim == nil {
+ return nil, ErrNotSupported
+ }
+ return s.Internal.StateGetClaim(p0, p1, p2, p3)
+}
+
+func (s *GatewayStub) StateGetClaim(p0 context.Context, p1 address.Address, p2 verifregtypes.ClaimId, p3 types.TipSetKey) (*verifregtypes.Claim, error) {
+ return nil, ErrNotSupported
+}
+
+func (s *GatewayStruct) StateGetClaims(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) {
+ if s.Internal.StateGetClaims == nil {
+ return *new(map[verifregtypes.ClaimId]verifregtypes.Claim), ErrNotSupported
+ }
+ return s.Internal.StateGetClaims(p0, p1, p2)
+}
+
+func (s *GatewayStub) StateGetClaims(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) {
+ return *new(map[verifregtypes.ClaimId]verifregtypes.Claim), ErrNotSupported
+}
+
func (s *GatewayStruct) StateListMiners(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) {
if s.Internal.StateListMiners == nil {
return *new([]address.Address), ErrNotSupported
diff --git a/api/types.go b/api/types.go
index 6e98a4de4..b1ec23f74 100644
--- a/api/types.go
+++ b/api/types.go
@@ -312,6 +312,7 @@ type NetworkParams struct {
SupportedProofTypes []abi.RegisteredSealProof
PreCommitChallengeDelay abi.ChainEpoch
ForkUpgradeParams ForkUpgradeParams
+ Eip155ChainID int
}
type ForkUpgradeParams struct {
diff --git a/api/v0api/gateway.go b/api/v0api/gateway.go
index 60885a1b7..1a7f7d3ac 100644
--- a/api/v0api/gateway.go
+++ b/api/v0api/gateway.go
@@ -8,6 +8,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
+ verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/go-state-types/dline"
abinetwork "github.com/filecoin-project/go-state-types/network"
@@ -61,6 +62,11 @@ type Gateway interface {
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error)
StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error)
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
+ StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error)
+ StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error)
+ StateGetAllocations(ctx context.Context, clientAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error)
+ StateGetClaim(ctx context.Context, providerAddr address.Address, claimId verifregtypes.ClaimId, tsk types.TipSetKey) (*verifregtypes.Claim, error)
+ StateGetClaims(ctx context.Context, providerAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error)
StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error)
StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error)
StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
diff --git a/api/v0api/proxy_gen.go b/api/v0api/proxy_gen.go
index f2f3973c1..bd37f6429 100644
--- a/api/v0api/proxy_gen.go
+++ b/api/v0api/proxy_gen.go
@@ -478,6 +478,16 @@ type GatewayMethods struct {
StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) ``
+ StateGetAllocation func(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) ``
+
+ StateGetAllocationForPendingDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) ``
+
+ StateGetAllocations func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) ``
+
+ StateGetClaim func(p0 context.Context, p1 address.Address, p2 verifregtypes.ClaimId, p3 types.TipSetKey) (*verifregtypes.Claim, error) ``
+
+ StateGetClaims func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) ``
+
StateGetReceipt func(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) ``
StateListMiners func(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) ``
@@ -2850,6 +2860,61 @@ func (s *GatewayStub) StateGetActor(p0 context.Context, p1 address.Address, p2 t
return nil, ErrNotSupported
}
+func (s *GatewayStruct) StateGetAllocation(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) {
+ if s.Internal.StateGetAllocation == nil {
+ return nil, ErrNotSupported
+ }
+ return s.Internal.StateGetAllocation(p0, p1, p2, p3)
+}
+
+func (s *GatewayStub) StateGetAllocation(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) {
+ return nil, ErrNotSupported
+}
+
+func (s *GatewayStruct) StateGetAllocationForPendingDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) {
+ if s.Internal.StateGetAllocationForPendingDeal == nil {
+ return nil, ErrNotSupported
+ }
+ return s.Internal.StateGetAllocationForPendingDeal(p0, p1, p2)
+}
+
+func (s *GatewayStub) StateGetAllocationForPendingDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) {
+ return nil, ErrNotSupported
+}
+
+func (s *GatewayStruct) StateGetAllocations(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) {
+ if s.Internal.StateGetAllocations == nil {
+ return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported
+ }
+ return s.Internal.StateGetAllocations(p0, p1, p2)
+}
+
+func (s *GatewayStub) StateGetAllocations(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) {
+ return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported
+}
+
+func (s *GatewayStruct) StateGetClaim(p0 context.Context, p1 address.Address, p2 verifregtypes.ClaimId, p3 types.TipSetKey) (*verifregtypes.Claim, error) {
+ if s.Internal.StateGetClaim == nil {
+ return nil, ErrNotSupported
+ }
+ return s.Internal.StateGetClaim(p0, p1, p2, p3)
+}
+
+func (s *GatewayStub) StateGetClaim(p0 context.Context, p1 address.Address, p2 verifregtypes.ClaimId, p3 types.TipSetKey) (*verifregtypes.Claim, error) {
+ return nil, ErrNotSupported
+}
+
+func (s *GatewayStruct) StateGetClaims(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) {
+ if s.Internal.StateGetClaims == nil {
+ return *new(map[verifregtypes.ClaimId]verifregtypes.Claim), ErrNotSupported
+ }
+ return s.Internal.StateGetClaims(p0, p1, p2)
+}
+
+func (s *GatewayStub) StateGetClaims(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) {
+ return *new(map[verifregtypes.ClaimId]verifregtypes.Claim), ErrNotSupported
+}
+
func (s *GatewayStruct) StateGetReceipt(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) {
if s.Internal.StateGetReceipt == nil {
return nil, ErrNotSupported
diff --git a/build/openrpc/full.json.gz b/build/openrpc/full.json.gz
index f66f0d586..b72419e4f 100644
Binary files a/build/openrpc/full.json.gz and b/build/openrpc/full.json.gz differ
diff --git a/build/openrpc/gateway.json.gz b/build/openrpc/gateway.json.gz
index 68b9752e0..90f41df1d 100644
Binary files a/build/openrpc/gateway.json.gz and b/build/openrpc/gateway.json.gz differ
diff --git a/build/openrpc/miner.json.gz b/build/openrpc/miner.json.gz
index ec6b20dd2..f5d61d7a8 100644
Binary files a/build/openrpc/miner.json.gz and b/build/openrpc/miner.json.gz differ
diff --git a/build/openrpc/worker.json.gz b/build/openrpc/worker.json.gz
index 009fa01fd..b46638397 100644
Binary files a/build/openrpc/worker.json.gz and b/build/openrpc/worker.json.gz differ
diff --git a/build/params_2k.go b/build/params_2k.go
index 901706d97..4826d421d 100644
--- a/build/params_2k.go
+++ b/build/params_2k.go
@@ -138,6 +138,8 @@ const BlockDelaySecs = uint64(4)
const PropagationDelaySecs = uint64(1)
+var EquivocationDelaySecs = uint64(0)
+
// SlashablePowerDelay is the number of epochs after ElectionPeriodStart, after
// which the miner is slashed
//
diff --git a/build/params_butterfly.go b/build/params_butterfly.go
index 26e716dab..864518df5 100644
--- a/build/params_butterfly.go
+++ b/build/params_butterfly.go
@@ -89,6 +89,8 @@ const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
const PropagationDelaySecs = uint64(6)
+var EquivocationDelaySecs = uint64(2)
+
// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
const BootstrapPeerThreshold = 2
diff --git a/build/params_calibnet.go b/build/params_calibnet.go
index d242fd677..c22eef2fe 100644
--- a/build/params_calibnet.go
+++ b/build/params_calibnet.go
@@ -129,6 +129,8 @@ const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
var PropagationDelaySecs = uint64(10)
+var EquivocationDelaySecs = uint64(2)
+
// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
const BootstrapPeerThreshold = 4
diff --git a/build/params_interop.go b/build/params_interop.go
index da8029fe6..9fd0d0ff8 100644
--- a/build/params_interop.go
+++ b/build/params_interop.go
@@ -127,6 +127,8 @@ const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
const PropagationDelaySecs = uint64(6)
+var EquivocationDelaySecs = uint64(2)
+
// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
const BootstrapPeerThreshold = 2
diff --git a/build/params_mainnet.go b/build/params_mainnet.go
index 6c6059693..8176c4e6d 100644
--- a/build/params_mainnet.go
+++ b/build/params_mainnet.go
@@ -112,6 +112,8 @@ var ConsensusMinerMinPower = abi.NewStoragePower(10 << 40)
var PreCommitChallengeDelay = abi.ChainEpoch(150)
var PropagationDelaySecs = uint64(10)
+var EquivocationDelaySecs = uint64(2)
+
func init() {
if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" {
SetAddressNetwork(address.Mainnet)
diff --git a/build/params_testground.go b/build/params_testground.go
index c7ad6381e..05249e7e2 100644
--- a/build/params_testground.go
+++ b/build/params_testground.go
@@ -9,7 +9,6 @@ package build
import (
"math/big"
- "time"
"github.com/ipfs/go-cid"
@@ -34,6 +33,7 @@ var (
MinimumBaseFee = int64(100)
BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
PropagationDelaySecs = uint64(6)
+ EquivocationDelaySecs = uint64(2)
SupportedProofTypes = []abi.RegisteredSealProof{
abi.RegisteredSealProof_StackedDrg32GiBV1,
abi.RegisteredSealProof_StackedDrg64GiBV1,
@@ -141,7 +141,3 @@ const BootstrapPeerThreshold = 1
// ChainId defines the chain ID used in the Ethereum JSON-RPC endpoint.
// As per https://github.com/ethereum-lists/chains
const Eip155ChainId = 31415926
-
-// Reducing the delivery delay for equivocation of
-// consistent broadcast to just half a second.
-var CBDeliveryDelay = 500 * time.Millisecond
diff --git a/build/version.go b/build/version.go
index 66bca2b2b..d97b0099a 100644
--- a/build/version.go
+++ b/build/version.go
@@ -37,7 +37,7 @@ func BuildTypeString() string {
}
// BuildVersion is the local build version
-const BuildVersion = "1.24.0"
+const BuildVersion = "1.25.0"
func UserVersion() string {
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {
diff --git a/chain/beacon/drand/drand.go b/chain/beacon/drand/drand.go
index 9b62a7928..aa5e36717 100644
--- a/chain/beacon/drand/drand.go
+++ b/chain/beacon/drand/drand.go
@@ -235,3 +235,16 @@ func (db *DrandBeacon) maxBeaconRoundV2(latestTs uint64) uint64 {
}
var _ beacon.RandomBeacon = (*DrandBeacon)(nil)
+
+func BeaconScheduleFromDrandSchedule(dcs dtypes.DrandSchedule, genesisTime uint64, ps *pubsub.PubSub) (beacon.Schedule, error) {
+ shd := beacon.Schedule{}
+ for _, dc := range dcs {
+ bc, err := NewDrandBeacon(genesisTime, build.BlockDelaySecs, ps, dc.Config)
+ if err != nil {
+ return nil, xerrors.Errorf("creating drand beacon: %w", err)
+ }
+ shd = append(shd, beacon.BeaconPoint{Start: dc.Start, Beacon: bc})
+ }
+
+ return shd, nil
+}
diff --git a/chain/gen/gen.go b/chain/gen/gen.go
index 087f0e00c..9f8d0834d 100644
--- a/chain/gen/gen.go
+++ b/chain/gen/gen.go
@@ -362,7 +362,7 @@ func (cg *ChainGen) nextBlockProof(ctx context.Context, pts *types.TipSet, m add
rbase = entries[len(entries)-1]
}
- eproof, err := IsRoundWinner(ctx, pts, round, m, rbase, mbi, mc)
+ eproof, err := IsRoundWinner(ctx, round, m, rbase, mbi, mc)
if err != nil {
return nil, nil, nil, xerrors.Errorf("checking round winner failed: %w", err)
}
@@ -449,18 +449,19 @@ func (cg *ChainGen) NextTipSetFromMiners(base *types.TipSet, miners []address.Ad
}
func (cg *ChainGen) NextTipSetFromMinersWithMessagesAndNulls(base *types.TipSet, miners []address.Address, msgs [][]*types.SignedMessage, nulls abi.ChainEpoch) (*store.FullTipSet, error) {
+ ctx := context.TODO()
var blks []*types.FullBlock
for round := base.Height() + nulls + 1; len(blks) == 0; round++ {
for mi, m := range miners {
- bvals, et, ticket, err := cg.nextBlockProof(context.TODO(), base, m, round)
+ bvals, et, ticket, err := cg.nextBlockProof(ctx, base, m, round)
if err != nil {
return nil, xerrors.Errorf("next block proof: %w", err)
}
if et != nil {
// TODO: maybe think about passing in more real parameters to this?
- wpost, err := cg.eppProvs[m].ComputeProof(context.TODO(), nil, nil, round, network.Version0)
+ wpost, err := cg.eppProvs[m].ComputeProof(ctx, nil, nil, round, network.Version0)
if err != nil {
return nil, err
}
@@ -476,8 +477,18 @@ func (cg *ChainGen) NextTipSetFromMinersWithMessagesAndNulls(base *types.TipSet,
}
fts := store.NewFullTipSet(blks)
- if err := cg.cs.PutTipSet(context.TODO(), fts.TipSet()); err != nil {
- return nil, err
+ if err := cg.cs.PersistTipsets(ctx, []*types.TipSet{fts.TipSet()}); err != nil {
+ return nil, xerrors.Errorf("failed to persist tipset: %w", err)
+ }
+
+ for _, blk := range blks {
+ if err := cg.cs.AddToTipSetTracker(ctx, blk.Header); err != nil {
+ return nil, xerrors.Errorf("failed to add to tipset tracker: %w", err)
+ }
+ }
+
+ if err := cg.cs.RefreshHeaviestTipSet(ctx, fts.TipSet().Height()); err != nil {
+ return nil, xerrors.Errorf("failed to put tipset: %w", err)
}
cg.CurTipset = fts
@@ -628,7 +639,7 @@ func (wpp *wppProvider) ComputeProof(context.Context, []proof7.ExtendedSectorInf
return ValidWpostForTesting, nil
}
-func IsRoundWinner(ctx context.Context, ts *types.TipSet, round abi.ChainEpoch,
+func IsRoundWinner(ctx context.Context, round abi.ChainEpoch,
miner address.Address, brand types.BeaconEntry, mbi *api.MiningBaseInfo, a MiningCheckAPI) (*types.ElectionProof, error) {
buf := new(bytes.Buffer)
diff --git a/chain/messagepool/messagepool.go b/chain/messagepool/messagepool.go
index 50f64f903..6dc3f2239 100644
--- a/chain/messagepool/messagepool.go
+++ b/chain/messagepool/messagepool.go
@@ -63,6 +63,9 @@ var MaxNonceGap = uint64(4)
const MaxMessageSize = 64 << 10 // 64KiB
+// NOTE: When adding a new error type, please make sure to add the new error type in
+// func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub.Message)
+// in /chain/sub/incoming.go
var (
ErrMessageTooBig = errors.New("message too big")
diff --git a/chain/stmgr/supply.go b/chain/stmgr/supply.go
index b48f9af43..9486cb936 100644
--- a/chain/stmgr/supply.go
+++ b/chain/stmgr/supply.go
@@ -388,6 +388,14 @@ func (sm *StateManager) GetCirculatingSupply(ctx context.Context, height abi.Cha
circ := big.Zero()
unCirc := big.Zero()
err := st.ForEach(func(a address.Address, actor *types.Actor) error {
+ // this can be a lengthy operation, we need to cancel early when
+ // the context is cancelled to avoid resource exhaustion
+ select {
+ case <-ctx.Done():
+ // this will cause ForEach to return
+ return ctx.Err()
+ default:
+ }
switch {
case actor.Balance.IsZero():
// Do nothing for zero-balance actors
diff --git a/chain/store/checkpoint_test.go b/chain/store/checkpoint_test.go
index bc2cb5e73..c5dff94a8 100644
--- a/chain/store/checkpoint_test.go
+++ b/chain/store/checkpoint_test.go
@@ -70,7 +70,7 @@ func TestChainCheckpoint(t *testing.T) {
}
// See if the chain will take the fork, it shouldn't.
- err = cs.MaybeTakeHeavierTipSet(context.Background(), last)
+ err = cs.RefreshHeaviestTipSet(context.Background(), last.Height())
require.NoError(t, err)
head = cs.GetHeaviestTipSet()
require.True(t, head.Equals(checkpoint))
@@ -80,7 +80,7 @@ func TestChainCheckpoint(t *testing.T) {
require.NoError(t, err)
// Now switch to the other fork.
- err = cs.MaybeTakeHeavierTipSet(context.Background(), last)
+ err = cs.RefreshHeaviestTipSet(context.Background(), last.Height())
require.NoError(t, err)
head = cs.GetHeaviestTipSet()
require.True(t, head.Equals(last))
diff --git a/chain/store/index_test.go b/chain/store/index_test.go
index 63a1abad0..a3a4ad6ce 100644
--- a/chain/store/index_test.go
+++ b/chain/store/index_test.go
@@ -16,6 +16,7 @@ import (
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/gen"
"github.com/filecoin-project/lotus/chain/store"
+ "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/mock"
)
@@ -47,28 +48,29 @@ func TestIndexSeeks(t *testing.T) {
}
cur := mock.TipSet(gen)
- if err := cs.PutTipSet(ctx, mock.TipSet(gen)); err != nil {
- t.Fatal(err)
- }
+
assert.NoError(t, cs.SetGenesis(ctx, gen))
// Put 113 blocks from genesis
for i := 0; i < 113; i++ {
- nextts := mock.TipSet(mock.MkBlock(cur, 1, 1))
-
- if err := cs.PutTipSet(ctx, nextts); err != nil {
- t.Fatal(err)
- }
+ nextBlk := mock.MkBlock(cur, 1, 1)
+ nextts := mock.TipSet(nextBlk)
+ assert.NoError(t, cs.PersistTipsets(ctx, []*types.TipSet{nextts}))
+ assert.NoError(t, cs.AddToTipSetTracker(ctx, nextBlk))
cur = nextts
}
+ assert.NoError(t, cs.RefreshHeaviestTipSet(ctx, cur.Height()))
+
// Put 50 null epochs + 1 block
skip := mock.MkBlock(cur, 1, 1)
skip.Height += 50
-
skipts := mock.TipSet(skip)
- if err := cs.PutTipSet(ctx, skipts); err != nil {
+ assert.NoError(t, cs.PersistTipsets(ctx, []*types.TipSet{skipts}))
+ assert.NoError(t, cs.AddToTipSetTracker(ctx, skip))
+
+ if err := cs.RefreshHeaviestTipSet(ctx, skip.Height); err != nil {
t.Fatal(err)
}
diff --git a/chain/store/snapshot.go b/chain/store/snapshot.go
index 92bc238a6..5e218fa36 100644
--- a/chain/store/snapshot.go
+++ b/chain/store/snapshot.go
@@ -15,7 +15,7 @@ import (
"github.com/ipld/go-car"
carutil "github.com/ipld/go-car/util"
carv2 "github.com/ipld/go-car/v2"
- mh "github.com/multiformats/go-multihash"
+ "github.com/multiformats/go-multicodec"
cbg "github.com/whyrusleeping/cbor-gen"
"go.uber.org/atomic"
"golang.org/x/sync/errgroup"
@@ -369,14 +369,16 @@ func (s *walkScheduler) Wait() error {
}
func (s *walkScheduler) enqueueIfNew(task walkTask) {
- if task.c.Prefix().MhType == mh.IDENTITY {
+ if multicodec.Code(task.c.Prefix().MhType) == multicodec.Identity {
//log.Infow("ignored", "cid", todo.c.String())
return
}
- // This lets through RAW and CBOR blocks, the only two types that we
- // end up writing to the exported CAR.
- if task.c.Prefix().Codec != cid.Raw && task.c.Prefix().Codec != cid.DagCBOR {
+ // This lets through RAW, CBOR, and DagCBOR blocks, the only types that we end up writing to
+ // the exported CAR.
+ switch multicodec.Code(task.c.Prefix().Codec) {
+ case multicodec.Cbor, multicodec.DagCbor, multicodec.Raw:
+ default:
//log.Infow("ignored", "cid", todo.c.String())
return
}
@@ -450,7 +452,8 @@ func (s *walkScheduler) processTask(t walkTask, workerN int) error {
// We exported the ipld block. If it wasn't a CBOR block, there's nothing
// else to do and we can bail out early as it won't have any links
// etc.
- if t.c.Prefix().Codec != cid.DagCBOR || t.c.Prefix().MhType == mh.IDENTITY {
+ if multicodec.Code(t.c.Prefix().Codec) != multicodec.DagCbor ||
+ multicodec.Code(t.c.Prefix().MhType) == multicodec.Identity {
return nil
}
@@ -683,14 +686,13 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe
prefix := c.Prefix()
// Don't include identity CIDs.
- if prefix.MhType == mh.IDENTITY {
+ if multicodec.Code(prefix.MhType) == multicodec.Identity {
continue
}
- // We only include raw and dagcbor, for now.
- // Raw for "code" CIDs.
- switch prefix.Codec {
- case cid.Raw, cid.DagCBOR:
+ // We only include raw, cbor, and dagcbor, for now.
+ switch multicodec.Code(prefix.Codec) {
+ case multicodec.Cbor, multicodec.DagCbor, multicodec.Raw:
default:
continue
}
@@ -722,7 +724,7 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe
}
func recurseLinks(ctx context.Context, bs bstore.Blockstore, walked *cid.Set, root cid.Cid, in []cid.Cid) ([]cid.Cid, error) {
- if root.Prefix().Codec != cid.DagCBOR {
+ if multicodec.Code(root.Prefix().Codec) != multicodec.DagCbor {
return in, nil
}
diff --git a/chain/store/store.go b/chain/store/store.go
index 342939daf..f2826fc2f 100644
--- a/chain/store/store.go
+++ b/chain/store/store.go
@@ -367,49 +367,32 @@ func (cs *ChainStore) UnmarkBlockAsValidated(ctx context.Context, blkid cid.Cid)
func (cs *ChainStore) SetGenesis(ctx context.Context, b *types.BlockHeader) error {
ts, err := types.NewTipSet([]*types.BlockHeader{b})
if err != nil {
- return err
+ return xerrors.Errorf("failed to construct genesis tipset: %w", err)
}
- if err := cs.PutTipSet(ctx, ts); err != nil {
- return err
+ if err := cs.PersistTipsets(ctx, []*types.TipSet{ts}); err != nil {
+ return xerrors.Errorf("failed to persist genesis tipset: %w", err)
+ }
+
+ if err := cs.AddToTipSetTracker(ctx, b); err != nil {
+ return xerrors.Errorf("failed to add genesis tipset to tracker: %w", err)
+ }
+
+ if err := cs.RefreshHeaviestTipSet(ctx, ts.Height()); err != nil {
+ return xerrors.Errorf("failed to put genesis tipset: %w", err)
}
return cs.metadataDs.Put(ctx, dstore.NewKey("0"), b.Cid().Bytes())
}
-func (cs *ChainStore) PutTipSet(ctx context.Context, ts *types.TipSet) error {
- if err := cs.PersistTipsets(ctx, []*types.TipSet{ts}); err != nil {
- return xerrors.Errorf("failed to persist tipset: %w", err)
- }
-
- expanded, err := cs.expandTipset(ctx, ts.Blocks()[0])
- if err != nil {
- return xerrors.Errorf("errored while expanding tipset: %w", err)
- }
-
- if expanded.Key() != ts.Key() {
- log.Debugf("expanded %s into %s\n", ts.Cids(), expanded.Cids())
-
- tsBlk, err := expanded.Key().ToStorageBlock()
- if err != nil {
- return xerrors.Errorf("failed to get tipset key block: %w", err)
- }
-
- if err = cs.chainLocalBlockstore.Put(ctx, tsBlk); err != nil {
- return xerrors.Errorf("failed to put tipset key block: %w", err)
- }
- }
-
- if err := cs.MaybeTakeHeavierTipSet(ctx, expanded); err != nil {
- return xerrors.Errorf("MaybeTakeHeavierTipSet failed in PutTipSet: %w", err)
- }
- return nil
-}
-
-// MaybeTakeHeavierTipSet evaluates the incoming tipset and locks it in our
-// internal state as our new head, if and only if it is heavier than the current
-// head and does not exceed the maximum fork length.
-func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipSet) error {
+// RefreshHeaviestTipSet receives a newTsHeight at which a new tipset might exist. It then:
+// - "refreshes" the heaviest tipset that can be formed at its current heaviest height
+// - if equivocation is detected among the miners of the current heaviest tipset, the head is immediately updated to the heaviest tipset that can be formed in a range of 5 epochs
+//
+// - forms the best tipset that can be formed at the _input_ height
+// - compares the three tipset weights: "current" heaviest tipset, "refreshed" tipset, and best tipset at newTsHeight
+// - updates "current" heaviest to the heaviest of those 3 tipsets (if an update is needed), assuming it doesn't violate the maximum fork rule
+func (cs *ChainStore) RefreshHeaviestTipSet(ctx context.Context, newTsHeight abi.ChainEpoch) error {
for {
cs.heaviestLk.Lock()
if len(cs.reorgCh) < reorgChBuf/2 {
@@ -426,39 +409,90 @@ func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipS
defer cs.heaviestLk.Unlock()
- if ts.Equals(cs.heaviest) {
+ heaviestWeight, err := cs.weight(ctx, cs.StateBlockstore(), cs.heaviest)
+ if err != nil {
+ return xerrors.Errorf("failed to calculate currentHeaviest's weight: %w", err)
+ }
+
+ heaviestHeight := abi.ChainEpoch(0)
+ if cs.heaviest != nil {
+ heaviestHeight = cs.heaviest.Height()
+ }
+
+ // Before we look at newTs, let's refresh best tipset at current head's height -- this is done to detect equivocation
+ newHeaviest, newHeaviestWeight, err := cs.FormHeaviestTipSetForHeight(ctx, heaviestHeight)
+ if err != nil {
+ return xerrors.Errorf("failed to reform head at same height: %w", err)
+ }
+
+ // Equivocation has occurred! We need a new head NOW!
+ if newHeaviest == nil || newHeaviestWeight.LessThan(heaviestWeight) {
+ log.Warnf("chainstore heaviest tipset's weight SHRANK from %d (%s) to %d (%s) due to equivocation", heaviestWeight, cs.heaviest, newHeaviestWeight, newHeaviest)
+ // Unfortunately, we don't know what the right height to form a new heaviest tipset is.
+ // It is _probably_, but not _necessarily_, heaviestHeight.
+ // So, we need to explore a range of epochs, finding the heaviest tipset in that range.
+ // We thus try to form the heaviest tipset for 5 epochs above heaviestHeight (most of which will likely not exist),
+ // as well as for 5 below.
+ // This is slow, but we expect to almost-never be here (only if miners are equivocating, which carries a hefty penalty).
+ for i := heaviestHeight + 5; i > heaviestHeight-5; i-- {
+ possibleHeaviestTs, possibleHeaviestWeight, err := cs.FormHeaviestTipSetForHeight(ctx, i)
+ if err != nil {
+ return xerrors.Errorf("failed to produce head at height %d: %w", i, err)
+ }
+
+ if possibleHeaviestWeight.GreaterThan(newHeaviestWeight) {
+ newHeaviestWeight = possibleHeaviestWeight
+ newHeaviest = possibleHeaviestTs
+ }
+ }
+
+ // if we've found something, we know it's the heaviest equivocation-free head, take it IMMEDIATELY
+ if newHeaviest != nil {
+ errTake := cs.takeHeaviestTipSet(ctx, newHeaviest)
+ if errTake != nil {
+ return xerrors.Errorf("failed to take newHeaviest tipset as head: %w", err)
+ }
+ } else {
+ // if we haven't found something, just stay with our equivocation-y head
+ newHeaviest = cs.heaviest
+ }
+ }
+
+ // if the new height we were notified about isn't what we just refreshed at, see if we have a heavier tipset there
+ if newTsHeight != newHeaviest.Height() {
+ bestTs, bestTsWeight, err := cs.FormHeaviestTipSetForHeight(ctx, newTsHeight)
+ if err != nil {
+ return xerrors.Errorf("failed to form new heaviest tipset at height %d: %w", newTsHeight, err)
+ }
+
+ heavier := bestTsWeight.GreaterThan(newHeaviestWeight)
+ if bestTsWeight.Equals(newHeaviestWeight) {
+ heavier = breakWeightTie(bestTs, newHeaviest)
+ }
+
+ if heavier {
+ newHeaviest = bestTs
+ }
+ }
+
+ // Everything's the same as before, exit early
+ if newHeaviest.Equals(cs.heaviest) {
return nil
}
- w, err := cs.weight(ctx, cs.StateBlockstore(), ts)
+ // At this point, it MUST be true that newHeaviest is heavier than cs.heaviest -- update if fork allows
+ exceeds, err := cs.exceedsForkLength(ctx, cs.heaviest, newHeaviest)
if err != nil {
- return err
+ return xerrors.Errorf("failed to check fork length: %w", err)
}
- heaviestW, err := cs.weight(ctx, cs.StateBlockstore(), cs.heaviest)
+
+ if exceeds {
+ return nil
+ }
+
+ err = cs.takeHeaviestTipSet(ctx, newHeaviest)
if err != nil {
- return err
- }
-
- heavier := w.GreaterThan(heaviestW)
- if w.Equals(heaviestW) && !ts.Equals(cs.heaviest) {
- log.Errorw("weight draw", "currTs", cs.heaviest, "ts", ts)
- heavier = breakWeightTie(ts, cs.heaviest)
- }
-
- if heavier {
- // TODO: don't do this for initial sync. Now that we don't have a
- // difference between 'bootstrap sync' and 'caught up' sync, we need
- // some other heuristic.
-
- exceeds, err := cs.exceedsForkLength(ctx, cs.heaviest, ts)
- if err != nil {
- return err
- }
- if exceeds {
- return nil
- }
-
- return cs.takeHeaviestTipSet(ctx, ts)
+ return xerrors.Errorf("failed to take heaviest tipset: %w", err)
}
return nil
@@ -655,6 +689,16 @@ func (cs *ChainStore) takeHeaviestTipSet(ctx context.Context, ts *types.TipSet)
return err
}
+ // write the tipsetkey block to the blockstore for EthAPI queries
+ tsBlk, err := ts.Key().ToStorageBlock()
+ if err != nil {
+ return xerrors.Errorf("failed to get tipset key block: %w", err)
+ }
+
+ if err = cs.chainLocalBlockstore.Put(ctx, tsBlk); err != nil {
+ return xerrors.Errorf("failed to put tipset key block: %w", err)
+ }
+
if prevHeaviest != nil { // buf
if len(cs.reorgCh) > 0 {
log.Warnf("Reorg channel running behind, %d reorgs buffered", len(cs.reorgCh))
@@ -904,6 +948,14 @@ func ReorgOps(ctx context.Context, lts func(ctx context.Context, _ types.TipSetK
var leftChain, rightChain []*types.TipSet
for !left.Equals(right) {
+ // this can take a long time and lot of memory if the tipsets are far apart
+ // since it can be reached through remote calls, we need to
+ // cancel early when possible to prevent resource exhaustion.
+ select {
+ case <-ctx.Done():
+ return nil, nil, ctx.Err()
+ default:
+ }
if left.Height() > right.Height() {
leftChain = append(leftChain, left)
par, err := lts(ctx, left.Parents())
@@ -960,7 +1012,7 @@ func (cs *ChainStore) AddToTipSetTracker(ctx context.Context, b *types.BlockHead
// This means that we ideally want to keep only most recent 900 epochs in here
// Golang's map iteration starts at a random point in a map.
// With 5 tries per epoch, and 900 entries to keep, on average we will have
- // ~136 garbage entires in the `cs.tipsets` map. (solve for 1-(1-x/(900+x))^5 == 0.5)
+ // ~136 garbage entries in the `cs.tipsets` map. (solve for 1-(1-x/(900+x))^5 == 0.5)
// Seems good enough to me
for height := range cs.tipsets {
@@ -975,6 +1027,7 @@ func (cs *ChainStore) AddToTipSetTracker(ctx context.Context, b *types.BlockHead
return nil
}
+// PersistTipsets writes the provided blocks and the TipSetKey objects to the blockstore
func (cs *ChainStore) PersistTipsets(ctx context.Context, tipsets []*types.TipSet) error {
toPersist := make([]*types.BlockHeader, 0, len(tipsets)*int(build.BlocksPerEpoch))
tsBlks := make([]block.Block, 0, len(tipsets))
@@ -1027,44 +1080,72 @@ func (cs *ChainStore) persistBlockHeaders(ctx context.Context, b ...*types.Block
return err
}
-func (cs *ChainStore) expandTipset(ctx context.Context, b *types.BlockHeader) (*types.TipSet, error) {
- // Hold lock for the whole function for now, if it becomes a problem we can
- // fix pretty easily
+// FormHeaviestTipSetForHeight looks up all valid blocks at a given height, and returns the heaviest tipset that can be made at that height
+// It does not consider ANY blocks from miners that have "equivocated" (produced 2 blocks at the same height)
+func (cs *ChainStore) FormHeaviestTipSetForHeight(ctx context.Context, height abi.ChainEpoch) (*types.TipSet, types.BigInt, error) {
cs.tstLk.Lock()
defer cs.tstLk.Unlock()
- all := []*types.BlockHeader{b}
-
- tsets, ok := cs.tipsets[b.Height]
+ blockCids, ok := cs.tipsets[height]
if !ok {
- return types.NewTipSet(all)
+ return nil, types.NewInt(0), nil
}
- inclMiners := map[address.Address]cid.Cid{b.Miner: b.Cid()}
- for _, bhc := range tsets {
- if bhc == b.Cid() {
- continue
- }
+ // First, identify "bad" miners for the height
+ seenMiners := map[address.Address]struct{}{}
+ badMiners := map[address.Address]struct{}{}
+ blocks := make([]*types.BlockHeader, 0, len(blockCids))
+ for _, bhc := range blockCids {
h, err := cs.GetBlock(ctx, bhc)
if err != nil {
- return nil, xerrors.Errorf("failed to load block (%s) for tipset expansion: %w", bhc, err)
+ return nil, types.NewInt(0), xerrors.Errorf("failed to load block (%s) for tipset expansion: %w", bhc, err)
}
- if cid, found := inclMiners[h.Miner]; found {
- log.Warnf("Have multiple blocks from miner %s at height %d in our tipset cache %s-%s", h.Miner, h.Height, h.Cid(), cid)
+ if _, seen := seenMiners[h.Miner]; seen {
+ badMiners[h.Miner] = struct{}{}
continue
}
+ seenMiners[h.Miner] = struct{}{}
+ blocks = append(blocks, h)
+ }
- if types.CidArrsEqual(h.Parents, b.Parents) {
- all = append(all, h)
- inclMiners[h.Miner] = bhc
+ // Next, group by parent tipset
+
+ formableTipsets := make(map[types.TipSetKey][]*types.BlockHeader, 0)
+ for _, h := range blocks {
+ if _, bad := badMiners[h.Miner]; bad {
+ continue
+ }
+ ptsk := types.NewTipSetKey(h.Parents...)
+ formableTipsets[ptsk] = append(formableTipsets[ptsk], h)
+ }
+
+ maxWeight := types.NewInt(0)
+ var maxTs *types.TipSet
+ for _, headers := range formableTipsets {
+ ts, err := types.NewTipSet(headers)
+ if err != nil {
+ return nil, types.NewInt(0), xerrors.Errorf("unexpected error forming tipset: %w", err)
+ }
+
+ weight, err := cs.Weight(ctx, ts)
+ if err != nil {
+ return nil, types.NewInt(0), xerrors.Errorf("failed to calculate weight: %w", err)
+ }
+
+ heavier := weight.GreaterThan(maxWeight)
+ if weight.Equals(maxWeight) {
+ heavier = breakWeightTie(ts, maxTs)
+ }
+
+ if heavier {
+ maxWeight = weight
+ maxTs = ts
}
}
- // TODO: other validation...?
-
- return types.NewTipSet(all)
+ return maxTs, maxWeight, nil
}
func (cs *ChainStore) GetGenesis(ctx context.Context) (*types.BlockHeader, error) {
diff --git a/chain/store/store_test.go b/chain/store/store_test.go
index cea0fdc2a..9c717fdbe 100644
--- a/chain/store/store_test.go
+++ b/chain/store/store_test.go
@@ -10,6 +10,7 @@ import (
"github.com/ipfs/go-datastore"
"github.com/stretchr/testify/require"
+ "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/crypto"
@@ -238,3 +239,171 @@ func TestChainExportImportFull(t *testing.T) {
}
}
}
+
+func TestEquivocations(t *testing.T) {
+ ctx := context.Background()
+ cg, err := gen.NewGenerator()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var last *types.TipSet
+ for i := 0; i < 10; i++ {
+ ts, err := cg.NextTipSet()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ last = ts.TipSet.TipSet()
+ }
+
+ mTs, err := cg.NextTipSetFromMiners(last, []address.Address{last.Blocks()[0].Miner}, 0)
+ require.NoError(t, err)
+ require.Equal(t, 1, len(mTs.TipSet.TipSet().Cids()))
+ last = mTs.TipSet.TipSet()
+
+ require.NotEmpty(t, last.Blocks())
+ blk1 := *last.Blocks()[0]
+
+ // quick check: asking to form tipset at latest height just returns head
+ bestHead, bestHeadWeight, err := cg.ChainStore().FormHeaviestTipSetForHeight(ctx, last.Height())
+ require.NoError(t, err)
+ require.Equal(t, last.Key(), bestHead.Key())
+ require.Contains(t, last.Cids(), blk1.Cid())
+ expectedWeight, err := cg.ChainStore().Weight(ctx, bestHead)
+ require.NoError(t, err)
+ require.Equal(t, expectedWeight, bestHeadWeight)
+
+ // add another block by a different miner -- it should get included in the best tipset
+ blk2 := blk1
+ blk1Miner, err := address.IDFromAddress(blk2.Miner)
+ require.NoError(t, err)
+ blk2.Miner, err = address.NewIDAddress(blk1Miner + 50)
+ require.NoError(t, err)
+ addBlockToTracker(t, cg.ChainStore(), &blk2)
+
+ bestHead, bestHeadWeight, err = cg.ChainStore().FormHeaviestTipSetForHeight(ctx, last.Height())
+ require.NoError(t, err)
+ for _, blkCid := range last.Cids() {
+ require.Contains(t, bestHead.Cids(), blkCid)
+ }
+ require.Contains(t, bestHead.Cids(), blk2.Cid())
+ expectedWeight, err = cg.ChainStore().Weight(ctx, bestHead)
+ require.NoError(t, err)
+ require.Equal(t, expectedWeight, bestHeadWeight)
+
+ // add another block by a different miner, but on a different tipset -- it should NOT get included
+ blk3 := blk1
+ blk3.Miner, err = address.NewIDAddress(blk1Miner + 100)
+ require.NoError(t, err)
+ blk1Parent, err := cg.ChainStore().GetBlock(ctx, blk3.Parents[0])
+ require.NoError(t, err)
+ blk3.Parents = blk1Parent.Parents
+ addBlockToTracker(t, cg.ChainStore(), &blk3)
+
+ bestHead, bestHeadWeight, err = cg.ChainStore().FormHeaviestTipSetForHeight(ctx, last.Height())
+ require.NoError(t, err)
+ for _, blkCid := range last.Cids() {
+ require.Contains(t, bestHead.Cids(), blkCid)
+ }
+ require.Contains(t, bestHead.Cids(), blk2.Cid())
+ require.NotContains(t, bestHead.Cids(), blk3.Cid())
+ expectedWeight, err = cg.ChainStore().Weight(ctx, bestHead)
+ require.NoError(t, err)
+ require.Equal(t, expectedWeight, bestHeadWeight)
+
+ // add another block by the same miner as blk1 -- it should NOT get included, and blk1 should be excluded too
+ blk4 := blk1
+ blk4.Timestamp = blk1.Timestamp + 1
+ addBlockToTracker(t, cg.ChainStore(), &blk4)
+
+ bestHead, bestHeadWeight, err = cg.ChainStore().FormHeaviestTipSetForHeight(ctx, last.Height())
+ require.NoError(t, err)
+ for _, blkCid := range last.Cids() {
+ if blkCid != blk1.Cid() {
+ require.Contains(t, bestHead.Cids(), blkCid)
+ }
+ }
+ require.NotContains(t, bestHead.Cids(), blk4.Cid())
+ require.NotContains(t, bestHead.Cids(), blk1.Cid())
+ expectedWeight, err = cg.ChainStore().Weight(ctx, bestHead)
+ require.NoError(t, err)
+ require.Equal(t, expectedWeight, bestHeadWeight)
+
+ // check that after all of that, the chainstore's head has NOT changed
+ require.Equal(t, last.Key(), cg.ChainStore().GetHeaviestTipSet().Key())
+
+ // NOW, after all that, notify the chainstore to refresh its head
+ require.NoError(t, cg.ChainStore().RefreshHeaviestTipSet(ctx, blk1.Height+1))
+
+ originalHead := *last
+ newHead := cg.ChainStore().GetHeaviestTipSet()
+ // the newHead should be at the same height as the originalHead
+ require.Equal(t, originalHead.Height(), newHead.Height())
+ // the newHead should NOT be the same as the originalHead
+ require.NotEqual(t, originalHead.Key(), newHead.Key())
+ // specifically, it should not contain any blocks by blk1Miner
+ for _, b := range newHead.Blocks() {
+ require.NotEqual(t, blk1.Miner, b.Miner)
+ }
+
+ // now have blk2's Miner equivocate too! this causes us to switch to a tipset with a different parent!
+ blk5 := blk2
+ blk5.Timestamp = blk5.Timestamp + 1
+ addBlockToTracker(t, cg.ChainStore(), &blk5)
+
+ // notify the chainstore to refresh its head
+ require.NoError(t, cg.ChainStore().RefreshHeaviestTipSet(ctx, blk1.Height+1))
+ newHead = cg.ChainStore().GetHeaviestTipSet()
+ // the newHead should still be at the same height as the originalHead
+ require.Equal(t, originalHead.Height(), newHead.Height())
+ // BUT it should no longer have the same parents -- only blk3's miner is good, and they mined on a different tipset
+ require.Equal(t, 1, len(newHead.Blocks()))
+ require.Equal(t, blk3.Cid(), newHead.Cids()[0])
+ require.NotEqual(t, originalHead.Parents(), newHead.Parents())
+
+ // now have blk3's Miner equivocate too! this causes us to switch to a previous epoch entirely :(
+ blk6 := blk3
+ blk6.Timestamp = blk6.Timestamp + 1
+ addBlockToTracker(t, cg.ChainStore(), &blk6)
+
+ // trying to form a tipset at our previous height leads to emptiness
+ tryTs, tryTsWeight, err := cg.ChainStore().FormHeaviestTipSetForHeight(ctx, blk1.Height)
+ require.NoError(t, err)
+ require.Nil(t, tryTs)
+ require.True(t, tryTsWeight.IsZero())
+
+ // notify the chainstore to refresh its head
+ require.NoError(t, cg.ChainStore().RefreshHeaviestTipSet(ctx, blk1.Height+1))
+ newHead = cg.ChainStore().GetHeaviestTipSet()
+ // the newHead should now be one epoch behind originalHead
+ require.Greater(t, originalHead.Height(), newHead.Height())
+
+ // next, we create a new tipset with only one block after many null rounds
+ headAfterNulls, err := cg.NextTipSetFromMiners(newHead, []address.Address{newHead.Blocks()[0].Miner}, 15)
+ require.NoError(t, err)
+ require.Equal(t, 1, len(headAfterNulls.TipSet.Blocks))
+
+ // now, we disqualify the block in this tipset because of equivocation
+ blkAfterNulls := headAfterNulls.TipSet.TipSet().Blocks()[0]
+ equivocatedBlkAfterNulls := *blkAfterNulls
+ equivocatedBlkAfterNulls.Timestamp = blkAfterNulls.Timestamp + 1
+ addBlockToTracker(t, cg.ChainStore(), &equivocatedBlkAfterNulls)
+
+ // try to form a tipset at this height -- it should be empty
+ tryTs2, tryTsWeight2, err := cg.ChainStore().FormHeaviestTipSetForHeight(ctx, blkAfterNulls.Height)
+ require.NoError(t, err)
+ require.Nil(t, tryTs2)
+ require.True(t, tryTsWeight2.IsZero())
+
+ // now we "notify" at this height -- it should lead to no head change because there's no formable head in near epochs
+ require.NoError(t, cg.ChainStore().RefreshHeaviestTipSet(ctx, blkAfterNulls.Height))
+ require.True(t, headAfterNulls.TipSet.TipSet().Equals(cg.ChainStore().GetHeaviestTipSet()))
+}
+
+func addBlockToTracker(t *testing.T, cs *store.ChainStore, blk *types.BlockHeader) {
+ blk2Ts, err := types.NewTipSet([]*types.BlockHeader{blk})
+ require.NoError(t, err)
+ require.NoError(t, cs.PersistTipsets(context.TODO(), []*types.TipSet{blk2Ts}))
+ require.NoError(t, cs.AddToTipSetTracker(context.TODO(), blk))
+}
diff --git a/chain/sub/incoming.go b/chain/sub/incoming.go
index a7c0bee57..3a11f7c98 100644
--- a/chain/sub/incoming.go
+++ b/chain/sub/incoming.go
@@ -350,6 +350,7 @@ func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubs
)
recordFailure(ctx, metrics.MessageValidationFailure, "add")
switch {
+
case xerrors.Is(err, messagepool.ErrSoftValidationFailure):
fallthrough
case xerrors.Is(err, messagepool.ErrRBFTooLowPremium):
@@ -362,8 +363,17 @@ func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubs
fallthrough
case xerrors.Is(err, messagepool.ErrNonceTooLow):
fallthrough
+ case xerrors.Is(err, messagepool.ErrNotEnoughFunds):
+ fallthrough
case xerrors.Is(err, messagepool.ErrExistingNonce):
return pubsub.ValidationIgnore
+
+ case xerrors.Is(err, messagepool.ErrMessageTooBig):
+ fallthrough
+ case xerrors.Is(err, messagepool.ErrMessageValueTooHigh):
+ fallthrough
+ case xerrors.Is(err, messagepool.ErrInvalidToAddr):
+ fallthrough
default:
return pubsub.ValidationReject
}
@@ -519,9 +529,8 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg
msgCid := idxrMsg.Cid
- var msgInfo *peerMsgInfo
- msgInfo, ok := v.peerCache.Get(minerAddr)
- if !ok {
+ msgInfo, cached := v.peerCache.Get(minerAddr)
+ if !cached {
msgInfo = &peerMsgInfo{}
}
@@ -529,17 +538,17 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg
msgInfo.mutex.Lock()
defer msgInfo.mutex.Unlock()
- if ok {
+ var seqno uint64
+ if cached {
// Reject replayed messages.
- seqno := binary.BigEndian.Uint64(msg.Message.GetSeqno())
+ seqno = binary.BigEndian.Uint64(msg.Message.GetSeqno())
if seqno <= msgInfo.lastSeqno {
log.Debugf("ignoring replayed indexer message")
return pubsub.ValidationIgnore
}
- msgInfo.lastSeqno = seqno
}
- if !ok || originPeer != msgInfo.peerID {
+ if !cached || originPeer != msgInfo.peerID {
// Check that the miner ID maps to the peer that sent the message.
err = v.authenticateMessage(ctx, minerAddr, originPeer)
if err != nil {
@@ -548,7 +557,7 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg
return pubsub.ValidationReject
}
msgInfo.peerID = originPeer
- if !ok {
+ if !cached {
// Add msgInfo to cache only after being authenticated. If two
// messages from the same peer are handled concurrently, there is a
// small chance that one msgInfo could replace the other here when
@@ -557,6 +566,9 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg
}
}
+ // Update message info cache with the latest message's sequence number.
+ msgInfo.lastSeqno = seqno
+
// See if message needs to be ignored due to rate limiting.
if v.rateLimitPeer(msgInfo, msgCid) {
return pubsub.ValidationIgnore
diff --git a/chain/sub/incoming_test.go b/chain/sub/incoming_test.go
index f54e09049..d8ee99b7f 100644
--- a/chain/sub/incoming_test.go
+++ b/chain/sub/incoming_test.go
@@ -12,10 +12,12 @@ import (
"github.com/ipni/go-libipni/announce/message"
pubsub "github.com/libp2p/go-libp2p-pubsub"
pb "github.com/libp2p/go-libp2p-pubsub/pb"
+ "github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/mocks"
"github.com/filecoin-project/lotus/chain/types"
)
@@ -134,3 +136,123 @@ func TestIndexerMessageValidator_Validate(t *testing.T) {
})
}
}
+
+func TestIdxValidator(t *testing.T) {
+ validCid, err := cid.Decode("QmbpDgg5kRLDgMxS8vPKNFXEcA6D5MC4CkuUdSWDVtHPGK")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ addr, err := address.NewFromString("f01024")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ buf1, err := addr.MarshalBinary()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ selfPID := "12D3KooWQiCbqEStCkdqUvr69gQsrp9urYJZUCkzsQXia7mbqbFW"
+ senderPID := "12D3KooWE8yt84RVwW3sFcd6WMjbUdWrZer2YtT4dmtj3dHdahSZ"
+ extraData := buf1
+
+ mc := gomock.NewController(t)
+ node := mocks.NewMockFullNode(mc)
+ node.EXPECT().ChainHead(gomock.Any()).Return(nil, nil).AnyTimes()
+
+ subject := NewIndexerMessageValidator(peer.ID(selfPID), node, node)
+ message := message.Message{
+ Cid: validCid,
+ Addrs: nil,
+ ExtraData: extraData,
+ }
+ buf := bytes.NewBuffer(nil)
+ if err := message.MarshalCBOR(buf); err != nil {
+ t.Fatal(err)
+ }
+
+ topic := "topic"
+
+ privk, _, err := crypto.GenerateKeyPair(crypto.RSA, 2048)
+ if err != nil {
+ t.Fatal(err)
+ }
+ id, err := peer.IDFromPublicKey(privk.GetPublic())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ node.EXPECT().StateMinerInfo(gomock.Any(), gomock.Any(), gomock.Any()).Return(api.MinerInfo{PeerId: &id}, nil).AnyTimes()
+
+ pbm := &pb.Message{
+ Data: buf.Bytes(),
+ Topic: &topic,
+ From: []byte(id),
+ Seqno: []byte{1, 1, 1, 1, 2, 2, 2, 2},
+ }
+ validate := subject.Validate(context.Background(), peer.ID(senderPID), &pubsub.Message{
+ Message: pbm,
+ ReceivedFrom: peer.ID("f01024"), // peer.ID(senderPID),
+ ValidatorData: nil,
+ })
+ if validate != pubsub.ValidationAccept {
+ t.Error("Expected to receive ValidationAccept")
+ }
+ msgInfo, cached := subject.peerCache.Get(addr)
+ if !cached {
+ t.Fatal("Message info should be in cache")
+ }
+ seqno := msgInfo.lastSeqno
+ msgInfo.rateLimit = nil // prevent interference from rate limiting
+
+ t.Log("Sending DoS msg")
+ privk, _, err = crypto.GenerateKeyPair(crypto.RSA, 2048)
+ if err != nil {
+ t.Fatal(err)
+ }
+ id2, err := peer.IDFromPublicKey(privk.GetPublic())
+ if err != nil {
+ t.Fatal(err)
+ }
+ pbm = &pb.Message{
+ Data: buf.Bytes(),
+ Topic: &topic,
+ From: []byte(id2),
+ Seqno: []byte{255, 255, 255, 255, 255, 255, 255, 255},
+ }
+ validate = subject.Validate(context.Background(), peer.ID(senderPID), &pubsub.Message{
+ Message: pbm,
+ ReceivedFrom: peer.ID(senderPID),
+ ValidatorData: nil,
+ })
+ if validate != pubsub.ValidationReject {
+ t.Error("Expected to get ValidationReject")
+ }
+ msgInfo, cached = subject.peerCache.Get(addr)
+ if !cached {
+ t.Fatal("Message info should be in cache")
+ }
+ msgInfo.rateLimit = nil // prevent interference from rate limiting
+
+ // Check if DoS is possible.
+ if msgInfo.lastSeqno != seqno {
+ t.Fatal("Sequence number should not have been updated")
+ }
+
+ t.Log("Sending another valid message from miner...")
+ pbm = &pb.Message{
+ Data: buf.Bytes(),
+ Topic: &topic,
+ From: []byte(id),
+ Seqno: []byte{1, 1, 1, 1, 2, 2, 2, 3},
+ }
+ validate = subject.Validate(context.Background(), peer.ID(senderPID), &pubsub.Message{
+ Message: pbm,
+ ReceivedFrom: peer.ID("f01024"), // peer.ID(senderPID),
+ ValidatorData: nil,
+ })
+ if validate != pubsub.ValidationAccept {
+ t.Fatal("Did not receive ValidationAccept")
+ }
+}
diff --git a/chain/sync.go b/chain/sync.go
index d8892a84e..044f317d9 100644
--- a/chain/sync.go
+++ b/chain/sync.go
@@ -536,7 +536,7 @@ func (syncer *Syncer) Sync(ctx context.Context, maybeHead *types.TipSet) error {
// At this point we have accepted and synced to the new `maybeHead`
// (`StageSyncComplete`).
- if err := syncer.store.PutTipSet(ctx, maybeHead); err != nil {
+ if err := syncer.store.RefreshHeaviestTipSet(ctx, maybeHead.Height()); err != nil {
span.AddAttributes(trace.StringAttribute("put_error", err.Error()))
span.SetStatus(trace.Status{
Code: 13,
diff --git a/chain/sync_manager.go b/chain/sync_manager.go
index 94017c276..3369c3b5a 100644
--- a/chain/sync_manager.go
+++ b/chain/sync_manager.go
@@ -92,6 +92,7 @@ type syncManager struct {
var _ SyncManager = (*syncManager)(nil)
type peerHead struct {
+ // Note: this doesn't _necessarily_ mean that p's head is ts, just that ts is a tipset that p sent to us
p peer.ID
ts *types.TipSet
}
diff --git a/chain/sync_test.go b/chain/sync_test.go
index a86d42f17..ec960d7d0 100644
--- a/chain/sync_test.go
+++ b/chain/sync_test.go
@@ -311,7 +311,7 @@ func (tu *syncTestUtil) addSourceNode(gen int) {
for _, lastB := range lastTs.Blocks {
require.NoError(tu.t, cs.AddToTipSetTracker(context.Background(), lastB.Header))
}
- err = cs.PutTipSet(tu.ctx, lastTs.TipSet())
+ err = cs.RefreshHeaviestTipSet(tu.ctx, lastTs.TipSet().Height())
require.NoError(tu.t, err)
tu.genesis = genesis
diff --git a/chain/types/ethtypes/eth_types.go b/chain/types/ethtypes/eth_types.go
index 3e0dd8724..b796e6f56 100644
--- a/chain/types/ethtypes/eth_types.go
+++ b/chain/types/ethtypes/eth_types.go
@@ -18,6 +18,7 @@ import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
builtintypes "github.com/filecoin-project/go-state-types/builtin"
@@ -929,3 +930,57 @@ func (e *EthBlockNumberOrHash) UnmarshalJSON(b []byte) error {
return errors.New("invalid block param")
}
+
+type EthTrace struct {
+ Action EthTraceAction `json:"action"`
+ Result EthTraceResult `json:"result"`
+ Subtraces int `json:"subtraces"`
+ TraceAddress []int `json:"traceAddress"`
+ Type string `json:"Type"`
+
+ Parent *EthTrace `json:"-"`
+
+ // if a subtrace makes a call to GetBytecode, we store a pointer to that subtrace here
+ // which we then lookup when checking for delegatecall (InvokeContractDelegate)
+ LastByteCode *EthTrace `json:"-"`
+}
+
+func (t *EthTrace) SetCallType(callType string) {
+ t.Action.CallType = callType
+ t.Type = callType
+}
+
+type EthTraceBlock struct {
+ *EthTrace
+ BlockHash EthHash `json:"blockHash"`
+ BlockNumber int64 `json:"blockNumber"`
+ TransactionHash EthHash `json:"transactionHash"`
+ TransactionPosition int `json:"transactionPosition"`
+}
+
+type EthTraceReplayBlockTransaction struct {
+ Output EthBytes `json:"output"`
+ StateDiff *string `json:"stateDiff"`
+ Trace []*EthTrace `json:"trace"`
+ TransactionHash EthHash `json:"transactionHash"`
+ VmTrace *string `json:"vmTrace"`
+}
+
+type EthTraceAction struct {
+ CallType string `json:"callType"`
+ From EthAddress `json:"from"`
+ To EthAddress `json:"to"`
+ Gas EthUint64 `json:"gas"`
+ Input EthBytes `json:"input"`
+ Value EthBigInt `json:"value"`
+
+ FilecoinMethod abi.MethodNum `json:"-"`
+ FilecoinCodeCid cid.Cid `json:"-"`
+ FilecoinFrom address.Address `json:"-"`
+ FilecoinTo address.Address `json:"-"`
+}
+
+type EthTraceResult struct {
+ GasUsed EthUint64 `json:"gasUsed"`
+ Output EthBytes `json:"output"`
+}
diff --git a/chain/types/ethtypes/rlp.go b/chain/types/ethtypes/rlp.go
index 049ea6fc4..15cee4a22 100644
--- a/chain/types/ethtypes/rlp.go
+++ b/chain/types/ethtypes/rlp.go
@@ -134,7 +134,7 @@ func decodeRLP(data []byte) (res interface{}, consumed int, err error) {
return nil, 0, err
}
totalLen := 1 + strLenInBytes + strLen
- if totalLen > len(data) {
+ if totalLen > len(data) || totalLen < 0 {
return nil, 0, xerrors.Errorf("invalid rlp data: out of bound while parsing string")
}
return data[1+strLenInBytes : totalLen], totalLen, nil
@@ -157,7 +157,12 @@ func decodeLength(data []byte, lenInBytes int) (length int, err error) {
if err := binary.Read(r, binary.BigEndian, &decodedLength); err != nil {
return 0, xerrors.Errorf("invalid rlp data: cannot parse string length: %w", err)
}
- if lenInBytes+int(decodedLength) > len(data) {
+ if decodedLength < 0 {
+ return 0, xerrors.Errorf("invalid rlp data: negative string length")
+ }
+
+ totalLength := lenInBytes + int(decodedLength)
+ if totalLength < 0 || totalLength > len(data) {
return 0, xerrors.Errorf("invalid rlp data: out of bound while parsing list")
}
return int(decodedLength), nil
diff --git a/chain/types/ethtypes/rlp_test.go b/chain/types/ethtypes/rlp_test.go
index bdbedff00..0ce6e15d9 100644
--- a/chain/types/ethtypes/rlp_test.go
+++ b/chain/types/ethtypes/rlp_test.go
@@ -143,6 +143,20 @@ func TestDecodeList(t *testing.T) {
}
}
+func TestDecodeNegativeLength(t *testing.T) {
+ testcases := [][]byte{
+ mustDecodeHex("0xbfffffffffffffff0041424344"),
+ mustDecodeHex("0xc1bFFF1111111111111111"),
+ mustDecodeHex("0xbFFF11111111111111"),
+ mustDecodeHex("0xbf7fffffffffffffff41424344"),
+ }
+
+ for _, tc := range testcases {
+ _, err := DecodeRLP(tc)
+ require.ErrorContains(t, err, "invalid rlp data")
+ }
+}
+
func TestDecodeEncodeTx(t *testing.T) {
testcases := [][]byte{
mustDecodeHex("0xdc82013a0185012a05f2008504a817c8008080872386f26fc1000000c0"),
diff --git a/chain/types/event.go b/chain/types/event.go
index 91b0e95d3..106a120e2 100644
--- a/chain/types/event.go
+++ b/chain/types/event.go
@@ -1,11 +1,6 @@
package types
import (
- "bytes"
- "fmt"
-
- cbg "github.com/whyrusleeping/cbor-gen"
-
"github.com/filecoin-project/go-state-types/abi"
)
@@ -38,24 +33,3 @@ type EventEntry struct {
}
type FilterID [32]byte // compatible with EthHash
-
-// DecodeEvents decodes a CBOR list of CBOR-encoded events.
-func DecodeEvents(input []byte) ([]Event, error) {
- r := bytes.NewReader(input)
- typ, len, err := cbg.NewCborReader(r).ReadHeader()
- if err != nil {
- return nil, fmt.Errorf("failed to read events: %w", err)
- }
- if typ != cbg.MajArray {
- return nil, fmt.Errorf("expected a CBOR list, was major type %d", typ)
- }
- events := make([]Event, 0, len)
- for i := 0; i < int(len); i++ {
- var evt Event
- if err := evt.UnmarshalCBOR(r); err != nil {
- return nil, fmt.Errorf("failed to parse event: %w", err)
- }
- events = append(events, evt)
- }
- return events, nil
-}
diff --git a/chain/vm/fvm.go b/chain/vm/fvm.go
index 47b4d3320..c8c02dddd 100644
--- a/chain/vm/fvm.go
+++ b/chain/vm/fvm.go
@@ -468,7 +468,7 @@ func (vm *FVM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet
}
if vm.returnEvents && len(ret.EventsBytes) > 0 {
- applyRet.Events, err = types.DecodeEvents(ret.EventsBytes)
+ applyRet.Events, err = decodeEvents(ret.EventsBytes)
if err != nil {
return nil, fmt.Errorf("failed to decode events returned by the FVM: %w", err)
}
@@ -524,7 +524,7 @@ func (vm *FVM) ApplyImplicitMessage(ctx context.Context, cmsg *types.Message) (*
}
if vm.returnEvents && len(ret.EventsBytes) > 0 {
- applyRet.Events, err = types.DecodeEvents(ret.EventsBytes)
+ applyRet.Events, err = decodeEvents(ret.EventsBytes)
if err != nil {
return nil, fmt.Errorf("failed to decode events returned by the FVM: %w", err)
}
diff --git a/chain/vm/fvm_util.go b/chain/vm/fvm_util.go
new file mode 100644
index 000000000..9f3dfd869
--- /dev/null
+++ b/chain/vm/fvm_util.go
@@ -0,0 +1,39 @@
+package vm
+
+import (
+ "bytes"
+ "fmt"
+
+ cbg "github.com/whyrusleeping/cbor-gen"
+
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+const MaxEventSliceLength = 6_000_000
+
+// DecodeEvents decodes a CBOR list of CBOR-encoded events.
+func decodeEvents(input []byte) ([]types.Event, error) {
+ r := bytes.NewReader(input)
+ typ, length, err := cbg.NewCborReader(r).ReadHeader()
+ if err != nil {
+ return nil, fmt.Errorf("failed to read events: %w", err)
+ }
+
+ if length > MaxEventSliceLength {
+ log.Errorf("extremely long event slice (len %d) returned, not decoding", length)
+ return nil, nil
+ }
+
+ if typ != cbg.MajArray {
+ return nil, fmt.Errorf("expected a CBOR list, was major type %d", typ)
+ }
+ events := make([]types.Event, 0, length)
+ for i := 0; i < int(length); i++ {
+ var evt types.Event
+ if err := evt.UnmarshalCBOR(r); err != nil {
+ return nil, fmt.Errorf("failed to parse event: %w", err)
+ }
+ events = append(events, evt)
+ }
+ return events, nil
+}
diff --git a/chain/vm/syscalls.go b/chain/vm/syscalls.go
index 68dbbb2df..83a07ca2d 100644
--- a/chain/vm/syscalls.go
+++ b/chain/vm/syscalls.go
@@ -70,11 +70,6 @@ type syscallShim struct {
}
func (ss *syscallShim) ComputeUnsealedSectorCID(st abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) {
- var sum abi.PaddedPieceSize
- for _, p := range pieces {
- sum += p.Size
- }
-
commd, err := ffiwrapper.GenerateUnsealedCID(st, pieces)
if err != nil {
log.Errorf("generate data commitment failed: %s", err)
diff --git a/chain/vm/vm.go b/chain/vm/vm.go
index 8f4c89e92..ba404ab1f 100644
--- a/chain/vm/vm.go
+++ b/chain/vm/vm.go
@@ -11,7 +11,7 @@ import (
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
logging "github.com/ipfs/go-log/v2"
- mh "github.com/multiformats/go-multihash"
+ "github.com/multiformats/go-multicodec"
cbg "github.com/whyrusleeping/cbor-gen"
"go.opencensus.io/stats"
"go.opencensus.io/trace"
@@ -38,7 +38,6 @@ import (
)
const MaxCallDepth = 4096
-const CborCodec = 0x51
var (
log = logging.Logger("vm")
@@ -128,7 +127,7 @@ func (bs *gasChargingBlocks) Put(ctx context.Context, blk block.Block) error {
func (vm *LegacyVM) makeRuntime(ctx context.Context, msg *types.Message, parent *Runtime) *Runtime {
paramsCodec := uint64(0)
if len(msg.Params) > 0 {
- paramsCodec = CborCodec
+ paramsCodec = uint64(multicodec.Cbor)
}
rt := &Runtime{
ctx: ctx,
@@ -380,7 +379,7 @@ func (vm *LegacyVM) send(ctx context.Context, msg *types.Message, parent *Runtim
retCodec := uint64(0)
if len(ret) > 0 {
- retCodec = CborCodec
+ retCodec = uint64(multicodec.Cbor)
}
rt.executionTrace.MsgRct = types.ReturnTrace{
ExitCode: aerrors.RetCode(err),
@@ -695,15 +694,15 @@ func (vm *LegacyVM) ActorStore(ctx context.Context) adt.Store {
}
func linksForObj(blk block.Block, cb func(cid.Cid)) error {
- switch blk.Cid().Prefix().Codec {
- case cid.DagCBOR:
+ switch multicodec.Code(blk.Cid().Prefix().Codec) {
+ case multicodec.DagCbor:
err := cbg.ScanForLinks(bytes.NewReader(blk.RawData()), cb)
if err != nil {
return xerrors.Errorf("cbg.ScanForLinks: %w", err)
}
return nil
- case cid.Raw:
- // We implicitly have all children of raw blocks.
+ case multicodec.Raw, multicodec.Cbor:
+ // We implicitly have all children of raw/cbor blocks.
return nil
default:
return xerrors.Errorf("vm flush copy method only supports dag cbor")
@@ -803,14 +802,17 @@ func copyRec(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid,
}
prefix := link.Prefix()
- if prefix.Codec == cid.FilCommitmentSealed || prefix.Codec == cid.FilCommitmentUnsealed {
+ codec := multicodec.Code(prefix.Codec)
+ switch codec {
+ case multicodec.FilCommitmentSealed, cid.FilCommitmentUnsealed:
return
}
// We always have blocks inlined into CIDs, but we may not have their children.
- if prefix.MhType == mh.IDENTITY {
+ if multicodec.Code(prefix.MhType) == multicodec.Identity {
// Unless the inlined block has no children.
- if prefix.Codec == cid.Raw {
+ switch codec {
+ case multicodec.Raw, multicodec.Cbor:
return
}
} else {
diff --git a/cli/sync.go b/cli/sync.go
index 02e4e381f..89d2d94f0 100644
--- a/cli/sync.go
+++ b/cli/sync.go
@@ -273,11 +273,6 @@ func SyncWait(ctx context.Context, napi v0api.FullNode, watch bool) error {
continue
}
- head, err := napi.ChainHead(ctx)
- if err != nil {
- return err
- }
-
working := -1
for i, ss := range state.ActiveSyncs {
switch ss.Stage {
@@ -332,7 +327,11 @@ func SyncWait(ctx context.Context, napi v0api.FullNode, watch bool) error {
_ = target // todo: maybe print? (creates a bunch of line wrapping issues with most tipsets)
- if !watch && time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs) {
+ isDone, err := IsSyncDone(ctx, napi)
+ if err != nil {
+ return err
+ }
+ if !watch && isDone {
fmt.Println("\nDone!")
return nil
}
@@ -347,3 +346,11 @@ func SyncWait(ctx context.Context, napi v0api.FullNode, watch bool) error {
i++
}
}
+
+func IsSyncDone(ctx context.Context, napi v0api.FullNode) (bool, error) {
+ head, err := napi.ChainHead(ctx)
+ if err != nil {
+ return false, err
+ }
+ return time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs), nil
+}
diff --git a/cli/wallet.go b/cli/wallet.go
index 2afe8617b..628d6841d 100644
--- a/cli/wallet.go
+++ b/cli/wallet.go
@@ -7,7 +7,9 @@ import (
"encoding/json"
"fmt"
"os"
+ "os/signal"
"strings"
+ "syscall"
"github.com/urfave/cli/v2"
"golang.org/x/term"
@@ -206,7 +208,12 @@ var walletBalance = &cli.Command{
return err
}
- if balance.Equals(types.NewInt(0)) {
+ inSync, err := IsSyncDone(ctx, api)
+ if err != nil {
+ return err
+ }
+
+ if balance.Equals(types.NewInt(0)) && !inSync {
afmt.Printf("%s (warning: may display 0 if chain sync in progress)\n", types.FIL(balance))
} else {
afmt.Printf("%s\n", types.FIL(balance))
@@ -330,6 +337,17 @@ var walletImport = &cli.Command{
if !cctx.Args().Present() || cctx.Args().First() == "-" {
if term.IsTerminal(int(os.Stdin.Fd())) {
fmt.Print("Enter private key(not display in the terminal): ")
+
+ sigCh := make(chan os.Signal, 1)
+ // Notify the channel when SIGINT is received
+ signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM)
+
+ go func() {
+ <-sigCh
+ fmt.Println("\nInterrupt signal received. Exiting...")
+ os.Exit(1)
+ }()
+
inpdata, err = term.ReadPassword(int(os.Stdin.Fd()))
if err != nil {
return err
diff --git a/cli/wallet_test.go b/cli/wallet_test.go
index dee26018b..eb2c544f0 100644
--- a/cli/wallet_test.go
+++ b/cli/wallet_test.go
@@ -21,6 +21,7 @@ import (
"github.com/filecoin-project/lotus/api"
apitypes "github.com/filecoin-project/lotus/api/types"
"github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/chain/types/mock"
)
func TestWalletNew(t *testing.T) {
@@ -133,6 +134,11 @@ func TestWalletBalance(t *testing.T) {
balance := big.NewInt(1234)
+ // add blocks to the chain
+ first := mock.TipSet(mock.MkBlock(nil, 5, 4))
+ head := mock.TipSet(mock.MkBlock(first, 15, 7))
+
+ mockApi.EXPECT().ChainHead(ctx).Return(head, nil)
mockApi.EXPECT().WalletBalance(ctx, addr).Return(balance, nil)
//stm: @CLI_WALLET_BALANCE_001
diff --git a/cmd/lotus-bench/amt_internal.go b/cmd/lotus-bench/amt_internal.go
new file mode 100644
index 000000000..f0e3035b7
--- /dev/null
+++ b/cmd/lotus-bench/amt_internal.go
@@ -0,0 +1,312 @@
+// Copied from go-amt-ipld https://github.com/filecoin-project/go-amt-ipld/tree/master/internal
+// which for some reason is a go internal package and therefore cannot be imported
+
+package main
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "sort"
+
+ cid "github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ xerrors "golang.org/x/xerrors"
+)
+
+type AMTRoot struct {
+ BitWidth uint64
+ Height uint64
+ Count uint64
+ AMTNode AMTNode
+}
+
+type AMTNode struct {
+ Bmap []byte
+ Links []cid.Cid
+ Values []*cbg.Deferred
+}
+
+// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT.
+
+var _ = xerrors.Errorf
+var _ = cid.Undef
+var _ = math.E
+var _ = sort.Sort
+
+var lengthBufAMTRoot = []byte{132}
+
+func (t *AMTRoot) MarshalCBOR(w io.Writer) error {
+ if t == nil {
+ _, err := w.Write(cbg.CborNull)
+ return err
+ }
+
+ cw := cbg.NewCborWriter(w)
+
+ if _, err := cw.Write(lengthBufAMTRoot); err != nil {
+ return err
+ }
+
+ // t.BitWidth (uint64) (uint64)
+
+ if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, t.BitWidth); err != nil {
+ return err
+ }
+
+ // t.Height (uint64) (uint64)
+
+ if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, t.Height); err != nil {
+ return err
+ }
+
+ // t.Count (uint64) (uint64)
+
+ if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, t.Count); err != nil {
+ return err
+ }
+
+ // t.AMTNode (internal.AMTNode) (struct)
+ if err := t.AMTNode.MarshalCBOR(cw); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (t *AMTRoot) UnmarshalCBOR(r io.Reader) (err error) {
+ *t = AMTRoot{}
+
+ cr := cbg.NewCborReader(r)
+
+ maj, extra, err := cr.ReadHeader()
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ }()
+
+ if maj != cbg.MajArray {
+ return fmt.Errorf("cbor input should be of type array")
+ }
+
+ if extra != 4 {
+ return fmt.Errorf("cbor input had wrong number of fields")
+ }
+
+ // t.BitWidth (uint64) (uint64)
+
+ {
+
+ maj, extra, err = cr.ReadHeader()
+ if err != nil {
+ return err
+ }
+ if maj != cbg.MajUnsignedInt {
+ return fmt.Errorf("wrong type for uint64 field")
+ }
+ t.BitWidth = extra
+
+ }
+ // t.Height (uint64) (uint64)
+
+ {
+
+ maj, extra, err = cr.ReadHeader()
+ if err != nil {
+ return err
+ }
+ if maj != cbg.MajUnsignedInt {
+ return fmt.Errorf("wrong type for uint64 field")
+ }
+ t.Height = extra
+
+ }
+ // t.Count (uint64) (uint64)
+
+ {
+
+ maj, extra, err = cr.ReadHeader()
+ if err != nil {
+ return err
+ }
+ if maj != cbg.MajUnsignedInt {
+ return fmt.Errorf("wrong type for uint64 field")
+ }
+ t.Count = extra
+
+ }
+ // t.AMTNode (internal.AMTNode) (struct)
+
+ {
+
+ if err := t.AMTNode.UnmarshalCBOR(cr); err != nil {
+ return xerrors.Errorf("unmarshaling t.AMTNode: %w", err)
+ }
+
+ }
+ return nil
+}
+
+var lengthBufAMTNode = []byte{131}
+
+func (t *AMTNode) MarshalCBOR(w io.Writer) error {
+ if t == nil {
+ _, err := w.Write(cbg.CborNull)
+ return err
+ }
+
+ cw := cbg.NewCborWriter(w)
+
+ if _, err := cw.Write(lengthBufAMTNode); err != nil {
+ return err
+ }
+
+ // t.Bmap ([]uint8) (slice)
+ if len(t.Bmap) > cbg.ByteArrayMaxLen {
+ return xerrors.Errorf("Byte array in field t.Bmap was too long")
+ }
+
+ if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.Bmap))); err != nil {
+ return err
+ }
+
+ if _, err := cw.Write(t.Bmap[:]); err != nil {
+ return err
+ }
+
+ // t.Links ([]cid.Cid) (slice)
+ if len(t.Links) > cbg.MaxLength {
+ return xerrors.Errorf("Slice value in field t.Links was too long")
+ }
+
+ if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Links))); err != nil {
+ return err
+ }
+ for _, v := range t.Links {
+ if err := cbg.WriteCid(w, v); err != nil {
+ return xerrors.Errorf("failed writing cid field t.Links: %w", err)
+ }
+ }
+
+ // t.Values ([]*typegen.Deferred) (slice)
+ if len(t.Values) > cbg.MaxLength {
+ return xerrors.Errorf("Slice value in field t.Values was too long")
+ }
+
+ if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Values))); err != nil {
+ return err
+ }
+ for _, v := range t.Values {
+ if err := v.MarshalCBOR(cw); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (t *AMTNode) UnmarshalCBOR(r io.Reader) (err error) {
+ *t = AMTNode{}
+
+ cr := cbg.NewCborReader(r)
+
+ maj, extra, err := cr.ReadHeader()
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ }()
+
+ if maj != cbg.MajArray {
+ return fmt.Errorf("cbor input should be of type array")
+ }
+
+ if extra != 3 {
+ return fmt.Errorf("cbor input had wrong number of fields")
+ }
+
+ // t.Bmap ([]uint8) (slice)
+
+ maj, extra, err = cr.ReadHeader()
+ if err != nil {
+ return err
+ }
+
+ if extra > cbg.ByteArrayMaxLen {
+ return fmt.Errorf("t.Bmap: byte array too large (%d)", extra)
+ }
+ if maj != cbg.MajByteString {
+ return fmt.Errorf("expected byte array")
+ }
+
+ if extra > 0 {
+ t.Bmap = make([]uint8, extra)
+ }
+
+ if _, err := io.ReadFull(cr, t.Bmap[:]); err != nil {
+ return err
+ }
+ // t.Links ([]cid.Cid) (slice)
+
+ maj, extra, err = cr.ReadHeader()
+ if err != nil {
+ return err
+ }
+
+ if extra > cbg.MaxLength {
+ return fmt.Errorf("t.Links: array too large (%d)", extra)
+ }
+
+ if maj != cbg.MajArray {
+ return fmt.Errorf("expected cbor array")
+ }
+
+ if extra > 0 {
+ t.Links = make([]cid.Cid, extra)
+ }
+
+ for i := 0; i < int(extra); i++ {
+
+ c, err := cbg.ReadCid(cr)
+ if err != nil {
+ return xerrors.Errorf("reading cid field t.Links failed: %w", err)
+ }
+ t.Links[i] = c
+ }
+
+ // t.Values ([]*typegen.Deferred) (slice)
+
+ maj, extra, err = cr.ReadHeader()
+ if err != nil {
+ return err
+ }
+
+ if extra > cbg.MaxLength {
+ return fmt.Errorf("t.Values: array too large (%d)", extra)
+ }
+
+ if maj != cbg.MajArray {
+ return fmt.Errorf("expected cbor array")
+ }
+
+ if extra > 0 {
+ t.Values = make([]*cbg.Deferred, extra)
+ }
+
+ for i := 0; i < int(extra); i++ {
+
+ var v cbg.Deferred
+ if err := v.UnmarshalCBOR(cr); err != nil {
+ return err
+ }
+
+ t.Values[i] = &v
+ }
+
+ return nil
+}
diff --git a/cmd/lotus-bench/main.go b/cmd/lotus-bench/main.go
index 303dd52c9..b70709ff2 100644
--- a/cmd/lotus-bench/main.go
+++ b/cmd/lotus-bench/main.go
@@ -1,6 +1,7 @@
package main
import (
+ "bytes"
"context"
"encoding/json"
"fmt"
@@ -8,9 +9,16 @@ import (
"math/rand"
"os"
"path/filepath"
+ "sync"
"time"
"github.com/docker/go-units"
+ "github.com/ipfs/boxo/blockservice"
+ "github.com/ipfs/boxo/ipld/merkledag"
+ "github.com/ipfs/go-cid"
+ offline "github.com/ipfs/go-ipfs-exchange-offline"
+ cbor "github.com/ipfs/go-ipld-cbor"
+ format "github.com/ipfs/go-ipld-format"
logging "github.com/ipfs/go-log/v2"
"github.com/minio/blake2b-simd"
"github.com/mitchellh/go-homedir"
@@ -20,10 +28,14 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-paramfetch"
"github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
prooftypes "github.com/filecoin-project/go-state-types/proof"
+ adt "github.com/filecoin-project/specs-actors/v6/actors/util/adt"
lapi "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/market"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
@@ -104,6 +116,7 @@ func main() {
DisableSliceFlagSeparator: true,
Commands: []*cli.Command{
proveCmd,
+ amtBenchCmd,
sealBenchCmd,
simpleCmd,
importBenchCmd,
@@ -117,6 +130,211 @@ func main() {
}
}
+type amtStatCollector struct {
+ ds format.NodeGetter
+ walk func(format.Node) ([]*format.Link, error)
+
+ statsLk sync.Mutex
+ totalAMTLinks int
+ totalAMTValues int
+ totalAMTLinkNodes int
+ totalAMTValueNodes int
+ totalAMTLinkNodeSize int
+ totalAMTValueNodeSize int
+}
+
+func (asc *amtStatCollector) String() string {
+ asc.statsLk.Lock()
+ defer asc.statsLk.Unlock()
+
+ str := "\n------------\n"
+ str += fmt.Sprintf("Link Count: %d\n", asc.totalAMTLinks)
+ str += fmt.Sprintf("Value Count: %d\n", asc.totalAMTValues)
+ str += fmt.Sprintf("%d link nodes %d bytes\n", asc.totalAMTLinkNodes, asc.totalAMTLinkNodeSize)
+ str += fmt.Sprintf("%d value nodes %d bytes\n", asc.totalAMTValueNodes, asc.totalAMTValueNodeSize)
+ str += fmt.Sprintf("Total bytes: %d\n------------\n", asc.totalAMTLinkNodeSize+asc.totalAMTValueNodeSize)
+ return str
+}
+
+func (asc *amtStatCollector) record(ctx context.Context, nd format.Node) error {
+ size, err := nd.Size()
+ if err != nil {
+ return err
+ }
+
+ var node AMTNode
+ if err := node.UnmarshalCBOR(bytes.NewReader(nd.RawData())); err != nil {
+ // try to deserialize root
+ var root AMTRoot
+ if err := root.UnmarshalCBOR(bytes.NewReader(nd.RawData())); err != nil {
+ return err
+ }
+ node = root.AMTNode
+ }
+
+ asc.statsLk.Lock()
+ defer asc.statsLk.Unlock()
+
+ link := len(node.Links) > 0
+ value := len(node.Values) > 0
+
+ if link {
+ asc.totalAMTLinks += len(node.Links)
+ asc.totalAMTLinkNodes++
+ asc.totalAMTLinkNodeSize += int(size)
+ } else if value {
+ asc.totalAMTValues += len(node.Values)
+ asc.totalAMTValueNodes++
+ asc.totalAMTValueNodeSize += int(size)
+ } else {
+ return xerrors.Errorf("unexpected AMT node %x: neither link nor value", nd.RawData())
+ }
+
+ return nil
+}
+
+func (asc *amtStatCollector) walkLinks(ctx context.Context, c cid.Cid) ([]*format.Link, error) {
+ nd, err := asc.ds.Get(ctx, c)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := asc.record(ctx, nd); err != nil {
+ return nil, err
+ }
+
+ return asc.walk(nd)
+}
+
+func carWalkFunc(nd format.Node) (out []*format.Link, err error) {
+ for _, link := range nd.Links() {
+ if link.Cid.Prefix().Codec == cid.FilCommitmentSealed || link.Cid.Prefix().Codec == cid.FilCommitmentUnsealed {
+ continue
+ }
+ out = append(out, link)
+ }
+ return out, nil
+}
+
+var amtBenchCmd = &cli.Command{
+ Name: "amt",
+ Usage: "Benchmark AMT churn",
+ Flags: []cli.Flag{
+ &cli.IntFlag{
+ Name: "rounds",
+ Usage: "rounds of churn to measure",
+ Value: 1,
+ },
+ &cli.IntFlag{
+ Name: "interval",
+ Usage: "AMT idx interval for churning values",
+ Value: 2880,
+ },
+ &cli.IntFlag{
+ Name: "bitwidth",
+ Usage: "AMT bitwidth",
+ Value: 6,
+ },
+ },
+ Action: func(c *cli.Context) error {
+ bs := blockstore.NewMemory()
+ ctx := c.Context
+ store := adt.WrapStore(ctx, cbor.NewCborStore(bs))
+
+ // Setup in memory blockstore
+ bitwidth := c.Int("bitwidth")
+ array, err := adt.MakeEmptyArray(store, bitwidth)
+ if err != nil {
+ return err
+ }
+
+ // Using motivating empirical example: market actor states AMT
+ // Create 40,000,000 states for realistic workload
+ fmt.Printf("Populating AMT\n")
+ for i := 0; i < 40000000; i++ {
+ if err := array.Set(uint64(i), &market.DealState{
+ SectorStartEpoch: abi.ChainEpoch(2000000 + i),
+ LastUpdatedEpoch: abi.ChainEpoch(-1),
+ SlashEpoch: -1,
+ VerifiedClaim: verifreg.AllocationId(i),
+ }); err != nil {
+ return err
+ }
+ }
+
+ r, err := array.Root()
+ if err != nil {
+ return err
+ }
+
+ // Measure ratio of internal / leaf nodes / sizes
+ dag := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs)))
+ asc := &amtStatCollector{
+ ds: dag,
+ walk: carWalkFunc,
+ }
+
+ fmt.Printf("Measuring AMT\n")
+ seen := cid.NewSet()
+ if err := merkledag.Walk(ctx, asc.walkLinks, r, seen.Visit, merkledag.Concurrent()); err != nil {
+ return err
+ }
+
+ fmt.Printf("%s\n", asc)
+
+ // Overwrite ids with idx % interval: one epoch of market cron
+ rounds := c.Int("rounds")
+ interval := c.Int("interval")
+
+ fmt.Printf("Overwrite 1 out of %d values for %d rounds\n", interval, rounds)
+ array, err = adt.AsArray(store, r, bitwidth)
+ if err != nil {
+ return err
+ }
+ roots := make([]cid.Cid, rounds)
+ for j := 0; j < rounds; j++ {
+ if j%10 == 0 {
+ fmt.Printf("round: %d\n", j)
+ }
+ for i := j; i < 40000000; i += interval {
+ if i%interval == j {
+ if err := array.Set(uint64(i), &market.DealState{
+ SectorStartEpoch: abi.ChainEpoch(2000000 + i),
+ LastUpdatedEpoch: abi.ChainEpoch(1),
+ SlashEpoch: -1,
+ VerifiedClaim: verifreg.AllocationId(i),
+ }); err != nil {
+ return err
+ }
+ }
+ }
+ roots[j], err = array.Root()
+ if err != nil {
+ return err
+ }
+
+ }
+
+ // Measure churn
+ dag = merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs)))
+ asc = &amtStatCollector{
+ ds: dag,
+ walk: carWalkFunc,
+ }
+
+ fmt.Printf("Measuring %d rounds of churn\n", rounds)
+
+ for _, r := range roots {
+ if err := merkledag.Walk(ctx, asc.walkLinks, r, seen.Visit, merkledag.Concurrent()); err != nil {
+ return err
+ }
+ }
+
+ fmt.Printf("%s\n", asc)
+ return nil
+ },
+}
+
var sealBenchCmd = &cli.Command{
Name: "sealing",
Usage: "Benchmark seal and winning post and window post",
diff --git a/cmd/lotus-bench/simple.go b/cmd/lotus-bench/simple.go
index d19c37bc9..8ae5713ad 100644
--- a/cmd/lotus-bench/simple.go
+++ b/cmd/lotus-bench/simple.go
@@ -120,6 +120,11 @@ p: pvC0JBrEyUqtIIUvB2UUx/2a24c3Cvnu6AZ0D3IMBYAu...
type benchSectorProvider map[storiface.SectorFileType]string
+func (b benchSectorProvider) AcquireSectorCopy(ctx context.Context, id storiface.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) {
+ // there's no copying in this context
+ return b.AcquireSector(ctx, id, existing, allocate, ptype)
+}
+
func (b benchSectorProvider) AcquireSector(ctx context.Context, id storiface.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) {
out := storiface.SectorPaths{
ID: id.ID,
diff --git a/cmd/lotus-shed/block.go b/cmd/lotus-shed/block.go
new file mode 100644
index 000000000..814eef3fd
--- /dev/null
+++ b/cmd/lotus-shed/block.go
@@ -0,0 +1,42 @@
+package main
+
+import (
+ "bytes"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+
+ "github.com/urfave/cli/v2"
+
+ "github.com/filecoin-project/lotus/chain/types"
+ lcli "github.com/filecoin-project/lotus/cli"
+)
+
+var blockCmd = &cli.Command{
+ Name: "block",
+ Usage: "Output decoded block header in readeble form",
+ ArgsUsage: "[block header hex]",
+ Action: func(cctx *cli.Context) error {
+ if cctx.NArg() != 1 {
+ return lcli.IncorrectNumArgs(cctx)
+ }
+
+ b, err := hex.DecodeString(cctx.Args().First())
+ if err != nil {
+ return err
+ }
+
+ var blk types.BlockHeader
+ if err := blk.UnmarshalCBOR(bytes.NewReader(b)); err != nil {
+ return err
+ }
+
+ jb, err := json.MarshalIndent(blk, "", " ")
+ if err != nil {
+ return err
+ }
+
+ fmt.Println(string(jb))
+ return nil
+ },
+}
diff --git a/cmd/lotus-shed/cron-count.go b/cmd/lotus-shed/cron-count.go
index 2b8dc8ebf..9741792ec 100644
--- a/cmd/lotus-shed/cron-count.go
+++ b/cmd/lotus-shed/cron-count.go
@@ -1,14 +1,24 @@
package main
import (
+ "encoding/json"
"fmt"
+ "os"
+ "github.com/ipfs/go-cid"
+ ipldcbor "github.com/ipfs/go-ipld-cbor"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-bitfield"
+ "github.com/filecoin-project/go-state-types/abi"
+ miner11 "github.com/filecoin-project/go-state-types/builtin/v11/miner"
+ "github.com/filecoin-project/go-state-types/builtin/v11/util/adt"
+ "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
)
@@ -17,6 +27,245 @@ var cronWcCmd = &cli.Command{
Description: "cron stats",
Subcommands: []*cli.Command{
minerDeadlineCronCountCmd,
+ minerDeadlinePartitionMeasurementCmd,
+ },
+}
+
+type DeadlineRef struct {
+ To string
+ Height abi.ChainEpoch
+ Gas json.RawMessage
+}
+
+type DeadlineSummary struct {
+ Partitions []PartitionSummary
+ PreCommitExpiry PreCommitExpiry
+ VestingDiff VestingDiff
+}
+
+type PreCommitExpiry struct {
+ Expired []uint64
+}
+
+type VestingDiff struct {
+ PrevTableSize int
+ NewTableSize int
+}
+
+type PartitionSummary struct {
+ Live int
+ Dead int
+ Faulty int
+ Diff PartitionDiff
+}
+
+type PartitionDiff struct {
+ Faulted int
+ Recovered int
+ Killed int
+}
+
+var minerDeadlinePartitionMeasurementCmd = &cli.Command{
+ Name: "deadline-summary",
+ Description: "",
+ Flags: []cli.Flag{
+ &cli.BoolFlag{
+ Name: "json",
+ Usage: "read input as json",
+ Value: true,
+ },
+ &cli.StringFlag{
+ Name: "tipset",
+ Usage: "specify tipset state to search on (pass comma separated array of cids)",
+ },
+ },
+ Action: func(c *cli.Context) error {
+ // read in values to process
+ if !c.Bool("json") {
+ return xerrors.Errorf("unsupported non json input format")
+ }
+ var refStream []DeadlineRef
+ if err := json.NewDecoder(os.Stdin).Decode(&refStream); err != nil {
+ return xerrors.Errorf("failed to parse input: %w", err)
+ }
+
+ // go from height and sp addr to deadline partition data
+ n, acloser, err := lcli.GetFullNodeAPI(c)
+ if err != nil {
+ return err
+ }
+ defer acloser()
+ ctx := lcli.ReqContext(c)
+
+ bs := ReadOnlyAPIBlockstore{n}
+ adtStore := adt.WrapStore(ctx, ipldcbor.NewCborStore(&bs))
+
+ dSummaries := make([]DeadlineSummary, len(refStream))
+ for j, ref := range refStream {
+ // get miner's deadline
+ tsBefore, err := n.ChainGetTipSetByHeight(ctx, ref.Height, types.EmptyTSK)
+ if err != nil {
+ return xerrors.Errorf("failed to get tipset at epoch: %d: %w", ref.Height, err)
+ }
+ tsAfter, err := n.ChainGetTipSetByHeight(ctx, ref.Height+1, types.EmptyTSK)
+ if err != nil {
+ return xerrors.Errorf("failed to get tipset at epoch %d: %w", ref.Height, err)
+ }
+ addr, err := address.NewFromString(ref.To)
+ if err != nil {
+ return xerrors.Errorf("faield to get address from input string: %w", err)
+ }
+ dline, err := n.StateMinerProvingDeadline(ctx, addr, tsBefore.Key())
+ if err != nil {
+ return xerrors.Errorf("failed to read proving deadline: %w", err)
+ }
+
+ // iterate through all partitions at epoch of processing
+ var pSummaries []PartitionSummary
+ psBefore, err := n.StateMinerPartitions(ctx, addr, dline.Index, tsBefore.Key())
+ if err != nil {
+ return xerrors.Errorf("failed to get partitions: %w", err)
+ }
+ psAfter, err := n.StateMinerPartitions(ctx, addr, dline.Index, tsAfter.Key())
+ if err != nil {
+ return xerrors.Errorf("failed to get partitions: %w", err)
+ }
+ if len(psBefore) != len(psAfter) {
+ return xerrors.Errorf("faield")
+ }
+
+ type partitionCount struct {
+ live int
+ dead int
+ faulty int
+ recovering int
+ }
+ countPartition := func(p api.Partition) (partitionCount, error) {
+ liveSectors, err := p.LiveSectors.All(abi.MaxSectorNumber)
+ if err != nil {
+ return partitionCount{}, xerrors.Errorf("failed to count live sectors in partition: %w", err)
+ }
+ allSectors, err := p.AllSectors.All(abi.MaxSectorNumber)
+ if err != nil {
+ return partitionCount{}, xerrors.Errorf("failed to count all sectors in partition: %w", err)
+ }
+ faultySectors, err := p.FaultySectors.All(abi.MaxSectorNumber)
+ if err != nil {
+ return partitionCount{}, xerrors.Errorf("failed to count faulty sectors in partition: %w", err)
+ }
+ recoveringSectors, err := p.RecoveringSectors.All(abi.MaxSectorNumber)
+ if err != nil {
+ return partitionCount{}, xerrors.Errorf("failed to count recovering sectors in partition: %w", err)
+ }
+
+ return partitionCount{
+ live: len(liveSectors),
+ dead: len(allSectors) - len(liveSectors),
+ faulty: len(faultySectors),
+ recovering: len(recoveringSectors),
+ }, nil
+ }
+
+ countVestingTable := func(table cid.Cid) (int, error) {
+ var vestingTable miner11.VestingFunds
+ if err := adtStore.Get(ctx, table, &vestingTable); err != nil {
+ return 0, err
+ }
+ return len(vestingTable.Funds), nil
+ }
+
+ for i := 0; i < len(psBefore); i++ {
+ cntBefore, err := countPartition(psBefore[i])
+ if err != nil {
+ return err
+ }
+ cntAfter, err := countPartition(psAfter[i])
+ if err != nil {
+ return err
+ }
+ pSummaries = append(pSummaries, PartitionSummary{
+ Live: cntBefore.live,
+ Dead: cntBefore.dead,
+ Faulty: cntBefore.faulty,
+ Diff: PartitionDiff{
+ Faulted: cntAfter.faulty - cntBefore.faulty,
+ Recovered: cntBefore.recovering - cntAfter.recovering,
+ Killed: cntAfter.dead - cntBefore.dead,
+ },
+ })
+ }
+
+ // Precommit and vesting table data
+ // Before
+ aBefore, err := n.StateGetActor(ctx, addr, tsBefore.Key())
+ if err != nil {
+ return err
+ }
+ var st miner11.State
+ err = adtStore.Get(ctx, aBefore.Head, &st)
+ if err != nil {
+ return err
+ }
+ expiryQArray, err := adt.AsArray(adtStore, st.PreCommittedSectorsCleanUp, miner11.PrecommitCleanUpAmtBitwidth)
+ if err != nil {
+ return err
+ }
+ var sectorsBf bitfield.BitField
+ var accumulator []uint64
+ h := ref.Height
+ if err := expiryQArray.ForEach(§orsBf, func(i int64) error {
+ if abi.ChainEpoch(i) > h {
+ return nil
+ }
+ sns, err := sectorsBf.All(abi.MaxSectorNumber)
+ if err != nil {
+ return err
+ }
+ accumulator = append(accumulator, sns...)
+ return nil
+ }); err != nil {
+ return err
+ }
+
+ vestingBefore, err := countVestingTable(st.VestingFunds)
+ if err != nil {
+ return err
+ }
+
+ // After
+ aAfter, err := n.StateGetActor(ctx, addr, tsAfter.Key())
+ if err != nil {
+ return err
+ }
+ var stAfter miner11.State
+ err = adtStore.Get(ctx, aAfter.Head, &stAfter)
+ if err != nil {
+ return err
+ }
+
+ vestingAfter, err := countVestingTable(stAfter.VestingFunds)
+ if err != nil {
+ return err
+ }
+
+ dSummaries[j] = DeadlineSummary{
+ Partitions: pSummaries,
+ PreCommitExpiry: PreCommitExpiry{
+ Expired: accumulator,
+ },
+ VestingDiff: VestingDiff{
+ PrevTableSize: vestingBefore,
+ NewTableSize: vestingAfter,
+ },
+ }
+
+ }
+
+ // output partition info
+ if err := json.NewEncoder(os.Stdout).Encode(dSummaries); err != nil {
+ return err
+ }
+ return nil
},
}
diff --git a/cmd/lotus-shed/election.go b/cmd/lotus-shed/election.go
index 7ac74de48..23c533c31 100644
--- a/cmd/lotus-shed/election.go
+++ b/cmd/lotus-shed/election.go
@@ -219,7 +219,7 @@ func backTestWinner(ctx context.Context, miner address.Address, round abi.ChainE
brand = bvals[len(bvals)-1]
}
- winner, err := gen.IsRoundWinner(ctx, ts, round, miner, brand, mbi, api)
+ winner, err := gen.IsRoundWinner(ctx, round, miner, brand, mbi, api)
if err != nil {
return nil, xerrors.Errorf("failed to check if we win next round: %w", err)
}
diff --git a/cmd/lotus-shed/gas-estimation.go b/cmd/lotus-shed/gas-estimation.go
index e02e2a722..5dc048f56 100644
--- a/cmd/lotus-shed/gas-estimation.go
+++ b/cmd/lotus-shed/gas-estimation.go
@@ -16,7 +16,6 @@ import (
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/build"
- "github.com/filecoin-project/lotus/chain/beacon"
"github.com/filecoin-project/lotus/chain/beacon/drand"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
@@ -100,15 +99,11 @@ var gasTraceCmd = &cli.Command{
return err
}
- dcs := build.DrandConfigSchedule()
- shd := beacon.Schedule{}
- for _, dc := range dcs {
- bc, err := drand.NewDrandBeacon(MAINNET_GENESIS_TIME, build.BlockDelaySecs, nil, dc.Config)
- if err != nil {
- return xerrors.Errorf("creating drand beacon: %w", err)
- }
- shd = append(shd, beacon.BeaconPoint{Start: dc.Start, Beacon: bc})
+ shd, err := drand.BeaconScheduleFromDrandSchedule(build.DrandConfigSchedule(), MAINNET_GENESIS_TIME, nil)
+ if err != nil {
+ return err
}
+
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
defer cs.Close() //nolint:errcheck
@@ -200,14 +195,9 @@ var replayOfflineCmd = &cli.Command{
return err
}
- dcs := build.DrandConfigSchedule()
- shd := beacon.Schedule{}
- for _, dc := range dcs {
- bc, err := drand.NewDrandBeacon(MAINNET_GENESIS_TIME, build.BlockDelaySecs, nil, dc.Config)
- if err != nil {
- return xerrors.Errorf("creating drand beacon: %w", err)
- }
- shd = append(shd, beacon.BeaconPoint{Start: dc.Start, Beacon: bc})
+ shd, err := drand.BeaconScheduleFromDrandSchedule(build.DrandConfigSchedule(), MAINNET_GENESIS_TIME, nil)
+ if err != nil {
+ return err
}
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
diff --git a/cmd/lotus-shed/indexes.go b/cmd/lotus-shed/indexes.go
index 24a9a817f..be7d43e05 100644
--- a/cmd/lotus-shed/indexes.go
+++ b/cmd/lotus-shed/indexes.go
@@ -1,6 +1,7 @@
package main
import (
+ "context"
"database/sql"
"fmt"
"path"
@@ -8,12 +9,18 @@ import (
"strings"
"github.com/mitchellh/go-homedir"
+ "github.com/multiformats/go-varint"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
+ "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
+ builtintypes "github.com/filecoin-project/go-state-types/builtin"
"github.com/filecoin-project/go-state-types/crypto"
+ "github.com/filecoin-project/go-state-types/exitcode"
+ lapi "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/ethtypes"
lcli "github.com/filecoin-project/lotus/cli"
)
@@ -31,6 +38,291 @@ var indexesCmd = &cli.Command{
withCategory("msgindex", backfillMsgIndexCmd),
withCategory("msgindex", pruneMsgIndexCmd),
withCategory("txhash", backfillTxHashCmd),
+ withCategory("events", backfillEventsCmd),
+ },
+}
+
+var backfillEventsCmd = &cli.Command{
+ Name: "backfill-events",
+ Usage: "Backfill the events.db for a number of epochs starting from a specified height",
+ Flags: []cli.Flag{
+ &cli.UintFlag{
+ Name: "from",
+ Value: 0,
+ Usage: "the tipset height to start backfilling from (0 is head of chain)",
+ },
+ &cli.IntFlag{
+ Name: "epochs",
+ Value: 2000,
+ Usage: "the number of epochs to backfill",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ srv, err := lcli.GetFullNodeServices(cctx)
+ if err != nil {
+ return err
+ }
+ defer srv.Close() //nolint:errcheck
+
+ api := srv.FullNodeAPI()
+ ctx := lcli.ReqContext(cctx)
+
+ // currTs will be the tipset where we start backfilling from
+ currTs, err := api.ChainHead(ctx)
+ if err != nil {
+ return err
+ }
+ if cctx.IsSet("from") {
+ // we need to fetch the tipset after the epoch being specified since we will need to advance currTs
+ currTs, err = api.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(cctx.Int("from")+1), currTs.Key())
+ if err != nil {
+ return err
+ }
+ }
+
+ // advance currTs by one epoch and maintain prevTs as the previous tipset (this allows us to easily use the ChainGetParentMessages/Receipt API)
+ prevTs := currTs
+ currTs, err = api.ChainGetTipSet(ctx, currTs.Parents())
+ if err != nil {
+ return fmt.Errorf("failed to load tipset %s: %w", prevTs.Parents(), err)
+ }
+
+ epochs := cctx.Int("epochs")
+
+ basePath, err := homedir.Expand(cctx.String("repo"))
+ if err != nil {
+ return err
+ }
+
+ dbPath := path.Join(basePath, "sqlite", "events.db")
+ db, err := sql.Open("sqlite3", dbPath)
+ if err != nil {
+ return err
+ }
+
+ defer func() {
+ err := db.Close()
+ if err != nil {
+ fmt.Printf("ERROR: closing db: %s", err)
+ }
+ }()
+
+ addressLookups := make(map[abi.ActorID]address.Address)
+
+ resolveFn := func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool) {
+ // we only want to match using f4 addresses
+ idAddr, err := address.NewIDAddress(uint64(emitter))
+ if err != nil {
+ return address.Undef, false
+ }
+
+ actor, err := api.StateGetActor(ctx, idAddr, ts.Key())
+ if err != nil || actor.Address == nil {
+ return address.Undef, false
+ }
+
+ // if robust address is not f4 then we won't match against it so bail early
+ if actor.Address.Protocol() != address.Delegated {
+ return address.Undef, false
+ }
+
+ // we have an f4 address, make sure it's assigned by the EAM
+ if namespace, _, err := varint.FromUvarint(actor.Address.Payload()); err != nil || namespace != builtintypes.EthereumAddressManagerActorID {
+ return address.Undef, false
+ }
+ return *actor.Address, true
+ }
+
+ isIndexedValue := func(b uint8) bool {
+ // currently we mark the full entry as indexed if either the key
+ // or the value are indexed; in the future we will need finer-grained
+ // management of indices
+ return b&(types.EventFlagIndexedKey|types.EventFlagIndexedValue) > 0
+ }
+
+ var totalEventsAffected int64
+ var totalEntriesAffected int64
+
+ processHeight := func(ctx context.Context, cnt int, msgs []lapi.Message, receipts []*types.MessageReceipt) error {
+ tx, err := db.BeginTx(ctx, nil)
+ if err != nil {
+ return fmt.Errorf("failed to start transaction: %w", err)
+ }
+ defer tx.Rollback() //nolint:errcheck
+
+ stmtSelectEvent, err := tx.Prepare("SELECT MAX(id) from event WHERE height=? AND tipset_key=? and tipset_key_cid=? and emitter_addr=? and event_index=? and message_cid=? and message_index=? and reverted=false")
+ if err != nil {
+ return err
+ }
+ stmtEvent, err := tx.Prepare("INSERT INTO event (height, tipset_key, tipset_key_cid, emitter_addr, event_index, message_cid, message_index, reverted) VALUES(?, ?, ?, ?, ?, ?, ?, ?)")
+ if err != nil {
+ return err
+ }
+ stmtEntry, err := tx.Prepare("INSERT INTO event_entry(event_id, indexed, flags, key, codec, value) VALUES(?, ?, ?, ?, ?, ?)")
+ if err != nil {
+ return err
+ }
+
+ var eventsAffected int64
+ var entriesAffected int64
+
+ // loop over each message receipt and backfill the events
+ for idx, receipt := range receipts {
+ msg := msgs[idx]
+
+ if receipt.ExitCode != exitcode.Ok {
+ continue
+ }
+
+ if receipt.EventsRoot == nil {
+ continue
+ }
+
+ events, err := api.ChainGetEvents(ctx, *receipt.EventsRoot)
+ if err != nil {
+ return fmt.Errorf("failed to load events for tipset %s: %w", currTs, err)
+ }
+
+ for eventIdx, event := range events {
+ addr, found := addressLookups[event.Emitter]
+ if !found {
+ var ok bool
+ addr, ok = resolveFn(ctx, event.Emitter, currTs)
+ if !ok {
+ // not an address we will be able to match against
+ continue
+ }
+ addressLookups[event.Emitter] = addr
+ }
+
+ tsKeyCid, err := currTs.Key().Cid()
+ if err != nil {
+ return fmt.Errorf("failed to get tipset key cid: %w", err)
+ }
+
+ // select the highest event id that exists in database, or null if none exists
+ var entryID sql.NullInt64
+ err = stmtSelectEvent.QueryRow(
+ currTs.Height(),
+ currTs.Key().Bytes(),
+ tsKeyCid.Bytes(),
+ addr.Bytes(),
+ eventIdx,
+ msg.Cid.Bytes(),
+ idx,
+ ).Scan(&entryID)
+ if err != nil {
+ return fmt.Errorf("error checking if event exists: %w", err)
+ }
+
+ // we already have this event
+ if entryID.Valid {
+ continue
+ }
+
+ // event does not exist, lets backfill it
+ res, err := tx.Stmt(stmtEvent).Exec(
+ currTs.Height(), // height
+ currTs.Key().Bytes(), // tipset_key
+ tsKeyCid.Bytes(), // tipset_key_cid
+ addr.Bytes(), // emitter_addr
+ eventIdx, // event_index
+ msg.Cid.Bytes(), // message_cid
+ idx, // message_index
+ false, // reverted
+ )
+ if err != nil {
+ return fmt.Errorf("error inserting event: %w", err)
+ }
+
+ entryID.Int64, err = res.LastInsertId()
+ if err != nil {
+ return fmt.Errorf("could not get last insert id: %w", err)
+ }
+
+ rowsAffected, err := res.RowsAffected()
+ if err != nil {
+ return fmt.Errorf("could not get rows affected: %w", err)
+ }
+ eventsAffected += rowsAffected
+
+ // backfill the event entries
+ for _, entry := range event.Entries {
+ _, err := tx.Stmt(stmtEntry).Exec(
+ entryID.Int64, // event_id
+ isIndexedValue(entry.Flags), // indexed
+ []byte{entry.Flags}, // flags
+ entry.Key, // key
+ entry.Codec, // codec
+ entry.Value, // value
+ )
+ if err != nil {
+ return fmt.Errorf("error inserting entry: %w", err)
+ }
+
+ rowsAffected, err := res.RowsAffected()
+ if err != nil {
+ return fmt.Errorf("could not get rows affected: %w", err)
+ }
+ entriesAffected += rowsAffected
+ }
+ }
+ }
+
+ err = tx.Commit()
+ if err != nil {
+ return fmt.Errorf("failed to commit transaction: %w", err)
+ }
+
+ log.Infof("[%d] backfilling actor events epoch:%d, eventsAffected:%d, entriesAffected:%d", cnt, currTs.Height(), eventsAffected, entriesAffected)
+
+ totalEventsAffected += eventsAffected
+ totalEntriesAffected += entriesAffected
+
+ return nil
+ }
+
+ for i := 0; i < epochs; i++ {
+ select {
+ case <-ctx.Done():
+ return nil
+ default:
+ }
+
+ blockCid := prevTs.Blocks()[0].Cid()
+
+ // get messages for the parent of the previous tipset (which will be currTs)
+ msgs, err := api.ChainGetParentMessages(ctx, blockCid)
+ if err != nil {
+ return fmt.Errorf("failed to get parent messages for block %s: %w", blockCid, err)
+ }
+
+ // get receipts for the parent of the previous tipset (which will be currTs)
+ receipts, err := api.ChainGetParentReceipts(ctx, blockCid)
+ if err != nil {
+ return fmt.Errorf("failed to get parent receipts for block %s: %w", blockCid, err)
+ }
+
+ if len(msgs) != len(receipts) {
+ return fmt.Errorf("mismatched in message and receipt count: %d != %d", len(msgs), len(receipts))
+ }
+
+ err = processHeight(ctx, i, msgs, receipts)
+ if err != nil {
+ return err
+ }
+
+ // advance prevTs and currTs up the chain
+ prevTs = currTs
+ currTs, err = api.ChainGetTipSet(ctx, currTs.Parents())
+ if err != nil {
+ return fmt.Errorf("failed to load tipset %s: %w", currTs, err)
+ }
+ }
+
+ log.Infof("backfilling events complete, totalEventsAffected:%d, totalEntriesAffected:%d", totalEventsAffected, totalEntriesAffected)
+
+ return nil
},
}
diff --git a/cmd/lotus-shed/main.go b/cmd/lotus-shed/main.go
index 12f52c68f..a5b66a096 100644
--- a/cmd/lotus-shed/main.go
+++ b/cmd/lotus-shed/main.go
@@ -24,6 +24,7 @@ func main() {
local := []*cli.Command{
addressCmd,
statActorCmd,
+ statSnapshotCmd,
statObjCmd,
base64Cmd,
base32Cmd,
@@ -90,6 +91,7 @@ func main() {
indexesCmd,
FevmAnalyticsCmd,
mismatchesCmd,
+ blockCmd,
}
app := &cli.App{
diff --git a/cmd/lotus-shed/msg.go b/cmd/lotus-shed/msg.go
index 062e077df..35f8eed35 100644
--- a/cmd/lotus-shed/msg.go
+++ b/cmd/lotus-shed/msg.go
@@ -26,6 +26,12 @@ var msgCmd = &cli.Command{
Aliases: []string{"msg"},
Usage: "Translate message between various formats",
ArgsUsage: "Message in any form",
+ Flags: []cli.Flag{
+ &cli.BoolFlag{
+ Name: "exec-trace",
+ Usage: "Print the execution trace",
+ },
+ },
Action: func(cctx *cli.Context) error {
if cctx.NArg() != 1 {
return lcli.IncorrectNumArgs(cctx)
@@ -36,6 +42,48 @@ var msgCmd = &cli.Command{
return err
}
+ api, closer, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ ctx := lcli.ReqContext(cctx)
+
+ // Get the CID of the message
+ mcid := msg.Cid()
+
+ // Search for the message on-chain
+ lookup, err := api.StateSearchMsg(ctx, mcid)
+ if err != nil {
+ return err
+ }
+ if lookup == nil {
+ fmt.Println("Message not found on-chain. Continuing...")
+ } else {
+ // Replay the message to get the execution trace
+ res, err := api.StateReplay(ctx, types.EmptyTSK, mcid)
+ if err != nil {
+ return xerrors.Errorf("replay call failed: %w", err)
+ }
+
+ if cctx.Bool("exec-trace") {
+ // Print the execution trace
+ color.Green("Execution trace:")
+ trace, err := json.MarshalIndent(res.ExecutionTrace, "", " ")
+ if err != nil {
+ return xerrors.Errorf("marshaling execution trace: %w", err)
+ }
+ fmt.Println(string(trace))
+ fmt.Println()
+
+ color.Green("Receipt:")
+ fmt.Printf("Exit code: %d\n", res.MsgRct.ExitCode)
+ fmt.Printf("Return: %x\n", res.MsgRct.Return)
+ fmt.Printf("Gas Used: %d\n", res.MsgRct.GasUsed)
+ }
+ }
+
switch msg := msg.(type) {
case *types.SignedMessage:
return printSignedMessage(cctx, msg)
diff --git a/cmd/lotus-shed/state-stats.go b/cmd/lotus-shed/state-stats.go
index 4aec02091..4eb00f981 100644
--- a/cmd/lotus-shed/state-stats.go
+++ b/cmd/lotus-shed/state-stats.go
@@ -1,11 +1,14 @@
package main
import (
+ "bytes"
"context"
"encoding/json"
"fmt"
"io"
+ "path"
"reflect"
+ "sort"
"sync"
"github.com/docker/go-units"
@@ -21,8 +24,12 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
+ gstactors "github.com/filecoin-project/go-state-types/actors"
+ "github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/blockstore"
+ "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
@@ -49,6 +56,19 @@ type fieldItem struct {
Stats api.ObjStat
}
+type job struct {
+ c cid.Cid
+ key string // prefix path for the region being recorded i.e. "/state/mineractor"
+}
+type cidCall struct {
+ c cid.Cid
+ resp chan bool
+}
+type result struct {
+ key string
+ stats api.ObjStat
+}
+
type cacheNodeGetter struct {
ds format.NodeGetter
cache *lru.TwoQueueCache[cid.Cid, format.Node]
@@ -166,39 +186,13 @@ var statObjCmd = &cli.Command{
return err
}
- r, err := repo.NewFS(cctx.String("repo"))
- if err != nil {
- return xerrors.Errorf("opening fs repo: %w", err)
- }
-
- exists, err := r.Exists()
+ h, err := loadChainStore(ctx, cctx.String("repo"))
if err != nil {
return err
}
- if !exists {
- return xerrors.Errorf("lotus repo doesn't exist")
- }
+ defer h.closer()
- lr, err := r.Lock(repo.FullNode)
- if err != nil {
- return err
- }
- defer lr.Close() //nolint:errcheck
-
- bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore)
- if err != nil {
- return fmt.Errorf("failed to open blockstore: %w", err)
- }
-
- defer func() {
- if c, ok := bs.(io.Closer); ok {
- if err := c.Close(); err != nil {
- log.Warnf("failed to close blockstore: %s", err)
- }
- }
- }()
-
- dag := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs)))
+ dag := merkledag.NewDAGService(blockservice.New(h.bs, offline.Exchange(h.bs)))
dsc := &dagStatCollector{
ds: dag,
walk: carWalkFunc,
@@ -212,6 +206,376 @@ var statObjCmd = &cli.Command{
},
}
+type StoreHandle struct {
+ bs blockstore.Blockstore
+ cs *store.ChainStore
+ sm *stmgr.StateManager
+ closer func()
+}
+
+func loadChainStore(ctx context.Context, repoPath string) (*StoreHandle, error) {
+ r, err := repo.NewFS(repoPath)
+ if err != nil {
+ return nil, xerrors.Errorf("opening fs repo: %w", err)
+ }
+
+ exists, err := r.Exists()
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, xerrors.Errorf("lotus repo doesn't exist")
+ }
+
+ lr, err := r.Lock(repo.FullNode)
+ if err != nil {
+ return nil, err
+ }
+
+ bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore)
+ if err != nil {
+ return nil, fmt.Errorf("failed to open blockstore: %w", err)
+ }
+
+ closer := func() {
+ if err := lr.Close(); err != nil {
+ log.Warnf("failed to close locked repo: %s", err)
+ }
+ if c, ok := bs.(io.Closer); ok {
+ if err := c.Close(); err != nil {
+ log.Warnf("failed to close blockstore: %s", err)
+ }
+ }
+ }
+
+ mds, err := lr.Datastore(context.Background(), "/metadata")
+ if err != nil {
+ return nil, err
+ }
+
+ cs := store.NewChainStore(bs, bs, mds, nil, nil)
+ if err := cs.Load(ctx); err != nil {
+ return nil, fmt.Errorf("failed to load chain store: %w", err)
+ }
+
+ tsExec := consensus.NewTipSetExecutor(filcns.RewardFunc)
+ sm, err := stmgr.NewStateManager(cs, tsExec, vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, index.DummyMsgIndex)
+ if err != nil {
+ return nil, fmt.Errorf("failed to open state manager: %w", err)
+ }
+ handle := StoreHandle{
+ bs: bs,
+ sm: sm,
+ cs: cs,
+ closer: closer,
+ }
+
+ return &handle, nil
+}
+
+func pipeline(ctx context.Context, name string, numWorkers int, createJobs func(ctx context.Context, jobCh chan job, resultCh chan result) error,
+ worker func(ctx context.Context, id int, jobCh chan job, resultCh chan result) error,
+ processResults func(ctx context.Context, resultCh chan result) error) error {
+
+ eg, egctx := errgroup.WithContext(ctx)
+ jobCh := make(chan job, numWorkers)
+ resultCh := make(chan result)
+ var resultWriterWg sync.WaitGroup
+
+ resultWriterWg.Add(1)
+ eg.Go(func() error {
+ defer resultWriterWg.Done()
+ defer func() {
+ close(jobCh)
+ }()
+ return createJobs(ctx, jobCh, resultCh)
+ })
+
+ var id int
+ for w := 0; w < numWorkers; w++ {
+ id = w
+
+ resultWriterWg.Add(1)
+ eg.Go(func() error {
+ defer resultWriterWg.Done()
+ return worker(egctx, id, jobCh, resultCh)
+ })
+ }
+
+ eg.Go(func() error {
+ return processResults(ctx, resultCh)
+ })
+
+ // close result channel when workers are done sending to it.
+ eg.Go(func() error {
+ resultWriterWg.Wait()
+ close(resultCh)
+ return nil
+ })
+
+ if err := eg.Wait(); err != nil {
+ return fmt.Errorf("failed pipeline %s: %w", name, err)
+ }
+ return nil
+}
+
+var statSnapshotCmd = &cli.Command{
+ Name: "stat-snapshot",
+ Usage: "calculates the space usage of a snapshot taken from the given tipset",
+ Description: `Walk the chain back to lightweight snapshot size and break down space usage into high level
+ categories: headers, messages, receipts, latest state root, and churn from earlier state roots.
+ State root and churn space is further broken down by actor type and immediate top level fields
+ `,
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "tipset",
+ Usage: "specify tipset to call method on (pass comma separated array of cids)",
+ },
+ &cli.IntFlag{
+ Name: "workers",
+ Usage: "number of workers to use when processing",
+ Value: 10,
+ },
+ &cli.IntFlag{
+ Name: "dag-cache-size",
+ Usage: "cache size per worker (setting to 0 disables)",
+ Value: 8092,
+ },
+ &cli.BoolFlag{
+ Name: "pretty",
+ Usage: "print formated output instead of ldjson",
+ Value: false,
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ ctx := lcli.ReqContext(cctx)
+ h, err := loadChainStore(ctx, cctx.String("repo"))
+ if err != nil {
+ return err
+ }
+ defer h.closer()
+ tsr := &ChainStoreTipSetResolver{
+ Chain: h.cs,
+ }
+
+ ts, err := lcli.LoadTipSet(ctx, cctx, tsr)
+ if err != nil {
+ return err
+ }
+
+ numWorkers := cctx.Int("workers")
+ dagCacheSize := cctx.Int("dag-cache-size")
+
+ cidCh := make(chan cidCall, numWorkers)
+ summary := make(map[string]api.ObjStat)
+ // snapshot root objects with no additional bytes or links
+ summary["/"] = api.ObjStat{Size: 0, Links: 0}
+ summary["/statetree"] = api.ObjStat{Size: 0, Links: 0}
+
+ combine := func(statsA, statsB api.ObjStat) api.ObjStat {
+ return api.ObjStat{
+ Size: statsA.Size + statsB.Size,
+ Links: statsA.Links + statsB.Links,
+ }
+ }
+
+ // Threadsafe cid set lives across different pipelines so not part of error group
+ go func() {
+ seen := cid.NewSet()
+ for {
+ select {
+ case call := <-cidCh:
+ call.resp <- seen.Visit(call.c)
+ case <-ctx.Done():
+ log.Infof("shutting down cid set goroutine: %s", ctx.Err())
+ return
+ }
+ }
+ }()
+ visit := func(c cid.Cid) bool {
+ ch := make(chan bool)
+ cidCh <- cidCall{c: c, resp: ch}
+ out := <-ch
+ return out
+ }
+ // Stage 1 walk all actors in latest state root
+ createJobsStage1 := func(ctx context.Context, jobCh chan job, _ chan result) error {
+ st, err := h.sm.StateTree(ts.ParentState())
+ if err != nil {
+ return err
+ }
+
+ return st.ForEach(func(_ address.Address, act *types.Actor) error {
+ actType := builtin.ActorNameByCode(act.Code)
+ actType = path.Base(actType) // strip away fil/
+ if actType == "" {
+ actType = act.Code.String()
+ }
+ jobCh <- job{c: act.Head, key: fmt.Sprintf("/statetree/latest/%s", actType)}
+
+ return nil
+ })
+ }
+
+ worker := func(ctx context.Context, id int, jobCh chan job, resultCh chan result) error {
+ var dag format.NodeGetter = merkledag.NewDAGService(blockservice.New(h.bs, offline.Exchange(h.bs)))
+ if dagCacheSize != 0 {
+ var err error
+ dag, err = newCacheNodeGetter(merkledag.NewDAGService(blockservice.New(h.bs, offline.Exchange(h.bs))), dagCacheSize)
+ if err != nil {
+ return err
+ }
+ }
+
+ for job := range jobCh {
+ stats, err := collectSnapshotJobStats(ctx, job, dag, visit)
+ if err != nil {
+ return err
+ }
+ for _, stat := range stats {
+ select {
+ case resultCh <- stat:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+ }
+ return nil
+ }
+
+ processResults := func(ctx context.Context, resultCh chan result) error {
+ for result := range resultCh {
+ if stat, ok := summary[result.key]; ok {
+ summary[result.key] = combine(stat, result.stats)
+
+ } else {
+ summary[result.key] = result.stats
+ }
+ }
+ return nil
+ }
+
+ if err := pipeline(ctx, "Latest State Actors", numWorkers, createJobsStage1, worker, processResults); err != nil {
+ return err
+ }
+
+ // Stage 2: walk the top of the latest state root
+ createJobsStage2 := func(ctx context.Context, jobCh chan job, _ chan result) error {
+ jobCh <- job{c: ts.ParentState(), key: "/statetree/latest"}
+ return nil
+ }
+
+ if err := pipeline(ctx, "Latest State HAMT", numWorkers, createJobsStage2, worker, processResults); err != nil {
+ return err
+ }
+
+ // Stage 3 walk the rest of the chain: headers, messages, churn
+ // ordering:
+ // for each header send jobs for messages, receipts, state tree churn
+ // don't walk header directly as it would just walk everything including parent tipsets
+
+ churnStateRoots := cid.NewSet()
+ createJobsStage3 := func(ctx context.Context, jobCh chan job, resultCh chan result) error {
+ // walk chain
+ var dag format.NodeGetter = merkledag.NewDAGService(blockservice.New(h.bs, offline.Exchange(h.bs)))
+ if dagCacheSize != 0 {
+ var err error
+ dag, err = newCacheNodeGetter(merkledag.NewDAGService(blockservice.New(h.bs, offline.Exchange(h.bs))), dagCacheSize)
+ if err != nil {
+ return err
+ }
+ }
+
+ blocksToWalk := ts.Cids()
+ startHeight := ts.Height()
+ snapshotStateLimit := abi.ChainEpoch(2000)
+
+ churnActorCache := cid.NewSet()
+ blocksTracked := cid.NewSet()
+ for len(blocksToWalk) > 0 {
+ blkCid := blocksToWalk[0]
+ blocksToWalk = blocksToWalk[1:]
+ nd, err := dag.Get(ctx, blkCid)
+ if err != nil {
+ return xerrors.Errorf("getting block: %w", err)
+ }
+
+ var b types.BlockHeader
+ if err := b.UnmarshalCBOR(bytes.NewBuffer(nd.RawData())); err != nil {
+ return xerrors.Errorf("unmarshaling block header (cid=%s): %w", blkCid, err)
+ }
+
+ // header directly to result channel
+ resultCh <- result{key: "/headers", stats: api.ObjStat{Size: uint64(len(nd.RawData())), Links: uint64(len(nd.Links()))}}
+ // message job
+ if b.Height > startHeight-snapshotStateLimit {
+ jobCh <- job{key: "/messages", c: b.Messages}
+ }
+
+ // state churn job
+ if b.Height > startHeight-snapshotStateLimit {
+ if churnStateRoots.Visit(b.ParentStateRoot) {
+ st, err := h.sm.StateTree(b.ParentStateRoot)
+ if err != nil {
+ return err
+ }
+
+ err = st.ForEach(func(_ address.Address, act *types.Actor) error {
+ if churnActorCache.Visit(act.Head) {
+ actType := builtin.ActorNameByCode(act.Code)
+ actType = path.Base(actType) // strip away fil/
+ if actType == "" {
+ actType = act.Code.String()
+ }
+ jobCh <- job{c: act.Head, key: fmt.Sprintf("/statetree/churn/%s", actType)}
+ }
+
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ }
+ }
+ for _, blkCid := range b.Parents {
+ if blocksTracked.Visit(blkCid) && b.Height != 0 {
+ blocksToWalk = append(blocksToWalk, blkCid)
+ }
+ }
+ }
+ return nil
+ }
+
+ if err := pipeline(ctx, "Churn, Headers, Messages", numWorkers, createJobsStage3, worker, processResults); err != nil {
+ return err
+ }
+
+ // step 1 clean things up and get a nice abstraction to reuse
+ // Stage 4 walk all actor HAMTs for churn
+
+ createJobsStage4 := func(ctx context.Context, jobCh chan job, _ chan result) error {
+ return churnStateRoots.ForEach(func(c cid.Cid) error {
+ jobCh <- job{c: c, key: "/statetree/churn"}
+ return nil
+ })
+ }
+
+ if err := pipeline(ctx, "Churn HAMT", numWorkers, createJobsStage4, worker, processResults); err != nil {
+ return err
+ }
+
+ if cctx.Bool("pretty") {
+ DumpSnapshotStats(summary)
+ } else {
+ if err := DumpJSON(summary); err != nil {
+ return err
+ }
+ }
+
+ return nil
+ },
+}
+
var statActorCmd = &cli.Command{
Name: "stat-actor",
Usage: "calculates the size of actors and their immeidate structures",
@@ -265,57 +629,14 @@ to reduce the number of decode operations performed by caching the decoded objec
addrs = append(addrs, addr)
}
}
-
- r, err := repo.NewFS(cctx.String("repo"))
- if err != nil {
- return xerrors.Errorf("opening fs repo: %w", err)
- }
-
- exists, err := r.Exists()
- if err != nil {
- return err
- }
- if !exists {
- return xerrors.Errorf("lotus repo doesn't exist")
- }
-
- lr, err := r.Lock(repo.FullNode)
- if err != nil {
- return err
- }
- defer lr.Close() //nolint:errcheck
-
- bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore)
- if err != nil {
- return fmt.Errorf("failed to open blockstore: %w", err)
- }
-
- defer func() {
- if c, ok := bs.(io.Closer); ok {
- if err := c.Close(); err != nil {
- log.Warnf("failed to close blockstore: %s", err)
- }
- }
- }()
-
- mds, err := lr.Datastore(context.Background(), "/metadata")
- if err != nil {
- return err
- }
-
- cs := store.NewChainStore(bs, bs, mds, nil, nil)
- if err := cs.Load(ctx); err != nil {
- return nil
- }
-
- tsExec := consensus.NewTipSetExecutor(filcns.RewardFunc)
- sm, err := stmgr.NewStateManager(cs, tsExec, vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, index.DummyMsgIndex)
+ h, err := loadChainStore(ctx, cctx.String("repo"))
if err != nil {
return err
}
+ defer h.closer()
tsr := &ChainStoreTipSetResolver{
- Chain: cs,
+ Chain: h.cs,
}
ts, err := lcli.LoadTipSet(ctx, cctx, tsr)
@@ -327,7 +648,7 @@ to reduce the number of decode operations performed by caching the decoded objec
if len(addrs) == 0 && cctx.Bool("all") {
var err error
- addrs, err = sm.ListAllActors(ctx, ts)
+ addrs, err = h.sm.ListAllActors(ctx, ts)
if err != nil {
return err
}
@@ -354,15 +675,15 @@ to reduce the number of decode operations performed by caching the decoded objec
return nil
}
- actor, err := sm.LoadActor(ctx, addr, ts)
+ actor, err := h.sm.LoadActor(ctx, addr, ts)
if err != nil {
return err
}
- var dag format.NodeGetter = merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs)))
+ var dag format.NodeGetter = merkledag.NewDAGService(blockservice.New(h.bs, offline.Exchange(h.bs)))
if dagCacheSize != 0 {
var err error
- dag, err = newCacheNodeGetter(merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs))), dagCacheSize)
+ dag, err = newCacheNodeGetter(merkledag.NewDAGService(blockservice.New(h.bs, offline.Exchange(h.bs))), dagCacheSize)
if err != nil {
return err
}
@@ -427,6 +748,93 @@ to reduce the number of decode operations performed by caching the decoded objec
},
}
+func collectSnapshotJobStats(ctx context.Context, in job, dag format.NodeGetter, visit func(c cid.Cid) bool) ([]result, error) {
+ // "state" and "churn" attempt further breakdown by actor type
+ if !(path.Dir(in.key) == "/statetree/latest") && !(path.Dir(in.key) == "/statetree/churn") {
+ dsc := &dagStatCollector{
+ ds: dag,
+ walk: carWalkFunc,
+ }
+
+ if err := merkledag.Walk(ctx, dsc.walkLinks, in.c, visit, merkledag.Concurrent()); err != nil {
+ return nil, err
+ }
+ return []result{{key: in.key, stats: dsc.stats}}, nil
+ }
+
+ // in.c is an actor head cid, try to unmarshal and create sub keys for different regions of state
+ nd, err := dag.Get(ctx, in.c)
+ if err != nil {
+ return nil, err
+ }
+ subjobs := make([]job, 0)
+ results := make([]result, 0)
+
+ // reconstruct actor for state parsing from key
+ av, err := gstactors.VersionForNetwork(network.Version20)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get actors version for network: %w", err)
+ }
+ code, ok := actors.GetActorCodeID(av, path.Base(in.key))
+ if !ok { // try parsing key directly
+ code, err = cid.Parse(path.Base(in.key))
+ if err != nil {
+ log.Debugf("failing to parse actor string: %s", path.Base(in.key))
+ }
+ }
+
+ actor := types.ActorV5{Head: in.c, Code: code}
+ oif, err := vm.DumpActorState(consensus.NewTipSetExecutor(filcns.RewardFunc).NewActorRegistry(), &actor, nd.RawData())
+ if err != nil {
+ oif = nil
+ }
+ // Account actors return nil from DumpActorState as they have no state
+ if oif != nil {
+ v := reflect.Indirect(reflect.ValueOf(oif))
+ for i := 0; i < v.NumField(); i++ {
+ varName := v.Type().Field(i).Name
+ varType := v.Type().Field(i).Type
+ varValue := v.Field(i).Interface()
+
+ if varType == reflect.TypeOf(cid.Cid{}) {
+ subjobs = append(subjobs, job{
+ key: fmt.Sprintf("%s/%s", in.key, varName),
+ c: varValue.(cid.Cid),
+ })
+ }
+ }
+ }
+
+ // Walk subfields
+ for _, job := range subjobs {
+ dsc := &dagStatCollector{
+ ds: dag,
+ walk: carWalkFunc,
+ }
+
+ if err := merkledag.Walk(ctx, dsc.walkLinks, job.c, visit, merkledag.Concurrent()); err != nil {
+ return nil, err
+ }
+ var res result
+ res.key = job.key
+ res.stats = dsc.stats
+
+ results = append(results, res)
+ }
+
+ // now walk the top level object of actor state
+ dsc := &dagStatCollector{
+ ds: dag,
+ walk: carWalkFunc,
+ }
+
+ if err := merkledag.Walk(ctx, dsc.walkLinks, in.c, visit, merkledag.Concurrent()); err != nil {
+ return nil, err
+ }
+ results = append(results, result{key: in.key, stats: dsc.stats})
+ return results, nil
+}
+
func collectStats(ctx context.Context, addr address.Address, actor *types.Actor, dag format.NodeGetter) (actorStats, error) {
log.Infow("actor", "addr", addr, "code", actor.Code, "name", builtin.ActorNameByCode(actor.Code))
@@ -532,3 +940,19 @@ func DumpStats(actStats actorStats) {
fmt.Println("--------------------------------------------------------------------------")
}
+
+func DumpSnapshotStats(stats map[string]api.ObjStat) {
+ // sort keys so we get subkey locality
+ keys := make([]string, 0, len(stats))
+ for k := range stats {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ fmt.Printf("%-*s%-*s%-*s\n", 32, "Path", 24, "Size", 24, "\"Blocks\"")
+ for _, k := range keys {
+ stat := stats[k]
+ sizeStr := units.BytesSize(float64(stat.Size))
+ fmt.Printf("%-*s%-*s%-*s%-*d\n", 32, k, 10, sizeStr, 14, fmt.Sprintf("(%d)", stat.Size), 24, stat.Links)
+ }
+}
diff --git a/cmd/lotus-shed/stateroot-stats.go b/cmd/lotus-shed/stateroot-stats.go
index f429c4e64..16dfc5935 100644
--- a/cmd/lotus-shed/stateroot-stats.go
+++ b/cmd/lotus-shed/stateroot-stats.go
@@ -197,7 +197,7 @@ var staterootStatCmd = &cli.Command{
return err
}
- fmt.Printf("%s\t%s\t%d\n", inf.Addr, string(cmh.Digest), inf.Stat.Size)
+ fmt.Printf("%s\t%x\t%d\n", inf.Addr, cmh.Digest, inf.Stat.Size)
}
return nil
},
diff --git a/cmd/lotus-worker/main.go b/cmd/lotus-worker/main.go
index 944791275..873dada47 100644
--- a/cmd/lotus-worker/main.go
+++ b/cmd/lotus-worker/main.go
@@ -8,12 +8,14 @@ import (
"net/http"
"os"
"path/filepath"
+ "reflect"
"strings"
"time"
"github.com/google/uuid"
"github.com/ipfs/go-datastore/namespace"
logging "github.com/ipfs/go-log/v2"
+ "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
"github.com/urfave/cli/v2"
"go.opencensus.io/stats/view"
@@ -320,6 +322,29 @@ var runCmd = &cli.Command{
}
}
+ // Check DC-environment variable
+ sectorSizes := []string{"2KiB", "8MiB", "512MiB", "32GiB", "64GiB"}
+ resourcesType := reflect.TypeOf(storiface.Resources{})
+
+ for _, sectorSize := range sectorSizes {
+ for i := 0; i < resourcesType.NumField(); i++ {
+ field := resourcesType.Field(i)
+ envName := field.Tag.Get("envname")
+ if envName != "" {
+ // Check if DC_[SectorSize]_[ResourceRestriction] is set
+ envVar, ok := os.LookupEnv("DC_" + sectorSize + "_" + envName)
+ if ok {
+ // If it is set, convert it to DC_[ResourceRestriction]
+ err := os.Setenv("DC_"+envName, envVar)
+ if err != nil {
+ log.Fatalf("Error setting environment variable: %v", err)
+ }
+ log.Warnf("Converted DC_%s_%s to DC_%s, because DC is a sector-size independent job", sectorSize, envName, envName)
+ }
+ }
+ }
+ }
+
// Connect to storage-miner
ctx := lcli.ReqContext(cctx)
@@ -530,9 +555,14 @@ var runCmd = &cli.Command{
log.Info("Opening local storage; connecting to master")
const unspecifiedAddress = "0.0.0.0"
+
address := cctx.String("listen")
- addressSlice := strings.Split(address, ":")
- if ip := net.ParseIP(addressSlice[0]); ip != nil {
+ host, port, err := net.SplitHostPort(address)
+ if err != nil {
+ return err
+ }
+
+ if ip := net.ParseIP(host); ip != nil {
if ip.String() == unspecifiedAddress {
timeout, err := time.ParseDuration(cctx.String("timeout"))
if err != nil {
@@ -542,11 +572,21 @@ var runCmd = &cli.Command{
if err != nil {
return err
}
- address = rip + ":" + addressSlice[1]
+ host = rip
}
}
- localStore, err := paths.NewLocal(ctx, lr, nodeApi, []string{"http://" + address + "/remote"})
+ var newAddress string
+
+ // Check if the IP address is IPv6
+ ip := net.ParseIP(host)
+ if ip.To4() == nil && ip.To16() != nil {
+ newAddress = "[" + host + "]:" + port
+ } else {
+ newAddress = host + ":" + port
+ }
+
+ localStore, err := paths.NewLocal(ctx, lr, nodeApi, []string{"http://" + newAddress + "/remote"})
if err != nil {
return err
}
@@ -587,7 +627,7 @@ var runCmd = &cli.Command{
Storage: lr,
}
- log.Info("Setting up control endpoint at " + address)
+ log.Info("Setting up control endpoint at " + newAddress)
timeout, err := time.ParseDuration(cctx.String("http-server-timeout"))
if err != nil {
@@ -612,13 +652,13 @@ var runCmd = &cli.Command{
log.Warn("Graceful shutdown successful")
}()
- nl, err := net.Listen("tcp", address)
+ nl, err := net.Listen("tcp", newAddress)
if err != nil {
return err
}
{
- a, err := net.ResolveTCPAddr("tcp", address)
+ a, err := net.ResolveTCPAddr("tcp", newAddress)
if err != nil {
return xerrors.Errorf("parsing address: %w", err)
}
@@ -699,7 +739,7 @@ var runCmd = &cli.Command{
select {
case <-readyCh:
- if err := nodeApi.WorkerConnect(ctx, "http://"+address+"/rpc/v0"); err != nil {
+ if err := nodeApi.WorkerConnect(ctx, "http://"+newAddress+"/rpc/v0"); err != nil {
log.Errorf("Registering worker failed: %+v", err)
cancel()
return
@@ -740,21 +780,46 @@ func extractRoutableIP(timeout time.Duration) (string, error) {
deprecatedMinerMultiAddrKey := "STORAGE_API_INFO"
env, ok := os.LookupEnv(minerMultiAddrKey)
if !ok {
- // TODO remove after deprecation period
_, ok = os.LookupEnv(deprecatedMinerMultiAddrKey)
if ok {
log.Warnf("Using a deprecated env(%s) value, please use env(%s) instead.", deprecatedMinerMultiAddrKey, minerMultiAddrKey)
}
return "", xerrors.New("MINER_API_INFO environment variable required to extract IP")
}
- minerAddr := strings.Split(env, "/")
- conn, err := net.DialTimeout("tcp", minerAddr[2]+":"+minerAddr[4], timeout)
+
+ // Splitting the env to separate the JWT from the multiaddress
+ splitEnv := strings.SplitN(env, ":", 2)
+ if len(splitEnv) < 2 {
+ return "", xerrors.Errorf("invalid MINER_API_INFO format")
+ }
+ // Only take the multiaddress part
+ maddrStr := splitEnv[1]
+
+ maddr, err := multiaddr.NewMultiaddr(maddrStr)
if err != nil {
return "", err
}
- defer conn.Close() //nolint:errcheck
+
+ minerIP, _ := maddr.ValueForProtocol(multiaddr.P_IP6)
+ if minerIP == "" {
+ minerIP, _ = maddr.ValueForProtocol(multiaddr.P_IP4)
+ }
+ minerPort, _ := maddr.ValueForProtocol(multiaddr.P_TCP)
+
+ // Format the address appropriately
+ addressToDial := net.JoinHostPort(minerIP, minerPort)
+
+ conn, err := net.DialTimeout("tcp", addressToDial, timeout)
+ if err != nil {
+ return "", err
+ }
+
+ defer func() {
+ if cerr := conn.Close(); cerr != nil {
+ log.Errorf("Error closing connection: %v", cerr)
+ }
+ }()
localAddr := conn.LocalAddr().(*net.TCPAddr)
-
- return strings.Split(localAddr.IP.String(), ":")[0], nil
+ return localAddr.IP.String(), nil
}
diff --git a/cmd/lotus/daemon.go b/cmd/lotus/daemon.go
index fbb9dfd9a..7271a6e53 100644
--- a/cmd/lotus/daemon.go
+++ b/cmd/lotus/daemon.go
@@ -12,6 +12,7 @@ import (
"io"
"os"
"path"
+ "path/filepath"
"runtime/pprof"
"strings"
@@ -32,6 +33,7 @@ import (
lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/beacon/drand"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/index"
@@ -119,6 +121,10 @@ var DaemonCmd = &cli.Command{
Name: "import-snapshot",
Usage: "import chain state from a given chain export file or url",
},
+ &cli.BoolFlag{
+ Name: "remove-existing-chain",
+ Usage: "remove existing chain and splitstore data on a snapshot-import",
+ },
&cli.BoolFlag{
Name: "halt-after-import",
Usage: "halt the process after importing chain from file",
@@ -263,6 +269,26 @@ var DaemonCmd = &cli.Command{
}
}
+ if cctx.Bool("remove-existing-chain") {
+ lr, err := repo.NewFS(cctx.String("repo"))
+ if err != nil {
+ return xerrors.Errorf("error opening fs repo: %w", err)
+ }
+
+ exists, err := lr.Exists()
+ if err != nil {
+ return err
+ }
+ if !exists {
+ return xerrors.Errorf("lotus repo doesn't exist")
+ }
+
+ err = removeExistingChain(cctx, lr)
+ if err != nil {
+ return err
+ }
+ }
+
chainfile := cctx.String("import-chain")
snapshot := cctx.String("import-snapshot")
if chainfile != "" || snapshot != "" {
@@ -380,7 +406,6 @@ var DaemonCmd = &cli.Command{
if err != nil {
return fmt.Errorf("failed to start json-rpc endpoint: %s", err)
}
-
// Monitor for shutdown.
finishCh := node.MonitorShutdown(shutdownChan,
node.ShutdownHandler{Component: "rpc server", StopFunc: rpcStopper},
@@ -536,13 +561,17 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool)
return err
}
- // TODO: We need to supply the actual beacon after v14
- stm, err := stmgr.NewStateManager(cst, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, index.DummyMsgIndex)
- if err != nil {
- return err
- }
-
if !snapshot {
+ shd, err := drand.BeaconScheduleFromDrandSchedule(build.DrandConfigSchedule(), gb.MinTimestamp(), nil)
+ if err != nil {
+ return xerrors.Errorf("failed to construct beacon schedule: %w", err)
+ }
+
+ stm, err := stmgr.NewStateManager(cst, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd, mds, index.DummyMsgIndex)
+ if err != nil {
+ return err
+ }
+
log.Infof("validating imported chain...")
if err := stm.ValidateChain(ctx, ts); err != nil {
return xerrors.Errorf("chain validation failed: %w", err)
@@ -574,3 +603,59 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool)
return nil
}
+
+func removeExistingChain(cctx *cli.Context, lr repo.Repo) error {
+ lockedRepo, err := lr.Lock(repo.FullNode)
+ if err != nil {
+ return xerrors.Errorf("error locking repo: %w", err)
+ }
+ // Ensure that lockedRepo is closed when this function exits
+ defer func() {
+ if closeErr := lockedRepo.Close(); closeErr != nil {
+ log.Errorf("Error closing the lockedRepo: %v", closeErr)
+ }
+ }()
+
+ cfg, err := lockedRepo.Config()
+ if err != nil {
+ return xerrors.Errorf("error getting config: %w", err)
+ }
+
+ fullNodeConfig, ok := cfg.(*config.FullNode)
+ if !ok {
+ return xerrors.Errorf("wrong config type: %T", cfg)
+ }
+
+ if fullNodeConfig.Chainstore.EnableSplitstore {
+ log.Info("removing splitstore directory...")
+ err = deleteSplitstoreDir(lockedRepo)
+ if err != nil {
+ return xerrors.Errorf("error removing splitstore directory: %w", err)
+ }
+ }
+
+ // Get the base repo path
+ repoPath := lockedRepo.Path()
+
+ // Construct the path to the chain directory
+ chainPath := filepath.Join(repoPath, "datastore", "chain")
+
+ log.Info("removing chain directory:", chainPath)
+
+ err = os.RemoveAll(chainPath)
+ if err != nil {
+ return xerrors.Errorf("error removing chain directory: %w", err)
+ }
+
+ log.Info("chain and splitstore data have been removed")
+ return nil
+}
+
+func deleteSplitstoreDir(lr repo.LockedRepo) error {
+ path, err := lr.SplitstorePath()
+ if err != nil {
+ return xerrors.Errorf("error getting splitstore path: %w", err)
+ }
+
+ return os.RemoveAll(path)
+}
diff --git a/documentation/en/api-v0-methods.md b/documentation/en/api-v0-methods.md
index f6e460a54..742f3de8e 100644
--- a/documentation/en/api-v0-methods.md
+++ b/documentation/en/api-v0-methods.md
@@ -5467,7 +5467,8 @@ Response:
"UpgradeLightningHeight": 10101,
"UpgradeThunderHeight": 10101,
"UpgradeWatermelonHeight": 10101
- }
+ },
+ "Eip155ChainID": 123
}
```
diff --git a/documentation/en/api-v1-unstable-methods.md b/documentation/en/api-v1-unstable-methods.md
index 31e4075b6..e2c249395 100644
--- a/documentation/en/api-v1-unstable-methods.md
+++ b/documentation/en/api-v1-unstable-methods.md
@@ -104,6 +104,8 @@
* [EthSendRawTransaction](#EthSendRawTransaction)
* [EthSubscribe](#EthSubscribe)
* [EthSyncing](#EthSyncing)
+ * [EthTraceBlock](#EthTraceBlock)
+ * [EthTraceReplayBlockTransactions](#EthTraceReplayBlockTransactions)
* [EthUninstallFilter](#EthUninstallFilter)
* [EthUnsubscribe](#EthUnsubscribe)
* [Filecoin](#Filecoin)
@@ -3083,6 +3085,99 @@ Inputs: `null`
Response: `false`
+### EthTraceBlock
+TraceAPI related methods
+
+Returns traces created at given block
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ "string value"
+]
+```
+
+Response:
+```json
+[
+ {
+ "action": {
+ "callType": "string value",
+ "from": "0x5cbeecf99d3fdb3f25e309cc264f240bb0664031",
+ "to": "0x5cbeecf99d3fdb3f25e309cc264f240bb0664031",
+ "gas": "0x5",
+ "input": "0x07",
+ "value": "0x0"
+ },
+ "result": {
+ "gasUsed": "0x5",
+ "output": "0x07"
+ },
+ "subtraces": 123,
+ "traceAddress": [
+ 123
+ ],
+ "Type": "string value",
+ "blockHash": "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e",
+ "blockNumber": 9,
+ "transactionHash": "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e",
+ "transactionPosition": 123
+ }
+]
+```
+
+### EthTraceReplayBlockTransactions
+Replays all transactions in a block returning the requested traces for each transaction
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ "string value",
+ [
+ "string value"
+ ]
+]
+```
+
+Response:
+```json
+[
+ {
+ "output": "0x07",
+ "stateDiff": "string value",
+ "trace": [
+ {
+ "action": {
+ "callType": "string value",
+ "from": "0x5cbeecf99d3fdb3f25e309cc264f240bb0664031",
+ "to": "0x5cbeecf99d3fdb3f25e309cc264f240bb0664031",
+ "gas": "0x5",
+ "input": "0x07",
+ "value": "0x0"
+ },
+ "result": {
+ "gasUsed": "0x5",
+ "output": "0x07"
+ },
+ "subtraces": 123,
+ "traceAddress": [
+ 123
+ ],
+ "Type": "string value"
+ }
+ ],
+ "transactionHash": "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e",
+ "vmTrace": "string value"
+ }
+]
+```
+
### EthUninstallFilter
Uninstalls a filter with given id.
@@ -6981,7 +7076,8 @@ Response:
"UpgradeLightningHeight": 10101,
"UpgradeThunderHeight": 10101,
"UpgradeWatermelonHeight": 10101
- }
+ },
+ "Eip155ChainID": 123
}
```
diff --git a/documentation/en/cli-lotus-miner.md b/documentation/en/cli-lotus-miner.md
index 3cc8c77ec..1614e4d88 100644
--- a/documentation/en/cli-lotus-miner.md
+++ b/documentation/en/cli-lotus-miner.md
@@ -7,7 +7,7 @@ USAGE:
lotus-miner [global options] command [command options] [arguments...]
VERSION:
- 1.24.0
+ 1.25.0
COMMANDS:
init Initialize a lotus miner repo
diff --git a/documentation/en/cli-lotus-worker.md b/documentation/en/cli-lotus-worker.md
index fd9508472..a47879738 100644
--- a/documentation/en/cli-lotus-worker.md
+++ b/documentation/en/cli-lotus-worker.md
@@ -7,7 +7,7 @@ USAGE:
lotus-worker [global options] command [command options] [arguments...]
VERSION:
- 1.24.0
+ 1.25.0
COMMANDS:
run Start lotus worker
diff --git a/documentation/en/cli-lotus.md b/documentation/en/cli-lotus.md
index c6b0d6f44..46635abcf 100644
--- a/documentation/en/cli-lotus.md
+++ b/documentation/en/cli-lotus.md
@@ -7,7 +7,7 @@ USAGE:
lotus [global options] command [command options] [arguments...]
VERSION:
- 1.24.0
+ 1.25.0
COMMANDS:
daemon Start a lotus daemon process
@@ -65,6 +65,7 @@ OPTIONS:
--bootstrap (default: true)
--import-chain value on first run, load chain from given file or url and validate
--import-snapshot value import chain state from a given chain export file or url
+ --remove-existing-chain remove existing chain and splitstore data on a snapshot-import (default: false)
--halt-after-import halt the process after importing chain from file (default: false)
--lite start lotus in lite mode (default: false)
--pprof value specify name of file for writing cpu profile to
diff --git a/documentation/misc/RELEASE_ISSUE_TEMPLATE.md b/documentation/misc/RELEASE_ISSUE_TEMPLATE.md
index 9ae46fe57..116c615d3 100644
--- a/documentation/misc/RELEASE_ISSUE_TEMPLATE.md
+++ b/documentation/misc/RELEASE_ISSUE_TEMPLATE.md
@@ -2,28 +2,25 @@
# Lotus X.Y.Z Release
-
-## What will be in the release
-
+[//]: # (Open this issue as [WIP] Lotus vX.Y.Z)
+[//]: # (Apply the `tpm` label to it, and pin the issue on GitHub)
## 🚢 Estimated shipping date
-## 🔦 Highlights
-
-< See Changelog>
-
## ✅ Release Checklist
-**Note for whomever is owning the release:** please capture notes as comments in this issue for anything you noticed that could be improved for future releases. There is a *Post Release* step below for incorporating changes back into the [RELEASE_ISSUE_TEMPLATE](https://github.com/filecoin-project/lotus/blob/master/documentation/misc/RELEASE_ISSUE_TEMPLATE.md), and this is easier done by collecting notes from along the way rather than just thinking about it at the end.
+**Note for whoever is owning the release:** please capture notes as comments in this issue for anything you noticed that could be improved for future releases. There is a *Post Release* step below for incorporating changes back into the [RELEASE_ISSUE_TEMPLATE](https://github.com/filecoin-project/lotus/blob/master/documentation/misc/RELEASE_ISSUE_TEMPLATE.md), and this is easier done by collecting notes from along the way rather than just thinking about it at the end.
First steps:
-
+ - [ ] FFI: Fork a new branch (`release/lotus-vX.Y.Z`) from the filecoin-ffi `master` branch
+ - [ ] FFI: Tag the head of `release/lotus-vX.Y.Z` as `vX.Y.Z-pre1`
+ - [ ] Open and land a PR in lotus `master` that updates the FFI dependency to `vX.Y.Z-pre1` as cut in the previous step
- [ ] Fork a new branch (`release/vX.Y.Z`) from `master` and make any further release related changes to this branch. If any "non-trivial" changes get added to the release, uncheck all the checkboxes and return to this stage.
- [ ] Bump the version in `build/version.go` in the `master` branch to `vX.Y.(Z+1)-dev` (bump from feature release) or `vX.(Y+1).0-dev` (bump from mandatory release). Run make gen and make docsgen-cli before committing changes
-Prepping an RC:
+**Prepping an RC**:
- [ ] version string in `build/version.go` has been updated (in the `release/vX.Y.Z` branch)
- [ ] run `make gen && make docsgen-cli`
@@ -32,7 +29,7 @@ Prepping an RC:
- [ ] tag commit with `vX.Y.Z-rcN`
- [ ] cut a pre-release [here](https://github.com/filecoin-project/lotus/releases/new?prerelease=true)
-Testing an RC:
+**Testing an RC**:
- [ ] **Stage 0 - Automated Testing**
- Automated Testing
@@ -69,7 +66,7 @@ Testing an RC:
- [ ] Update the [CHANGELOG.md](https://github.com/filecoin-project/lotus/blob/master/CHANGELOG.md) to the state that can be used as release note.
- [ ] Invite the wider community through (link to the release issue)
-- [ ] **Stage 4 - Stable Release**
+**Stable Release**
- [ ] Final preparation
- [ ] Verify that version string in [`version.go`](https://github.com/filecoin-project/lotus/blob/master/build/version.go) has been updated.
- [ ] Verify that codegen is up to date (`make gen && make docsgen-cli`)
@@ -79,7 +76,7 @@ Testing an RC:
- [ ] Cut the release [here](https://github.com/filecoin-project/lotus/releases/new?prerelease=false&target=releases).
-- [ ] **Post-Release**
+**Post-Release**
- [ ] Merge the `releases` branch back into `master`, ignoring the changes to `version.go` (keep the `-dev` version from master). Do NOT delete the `releases` branch when doing so!
- [ ] Update [RELEASE_ISSUE_TEMPLATE.md](https://github.com/filecoin-project/lotus/blob/master/documentation/misc/RELEASE_ISSUE_TEMPLATE.md) with any improvements determined from this latest release iteration.
- [ ] Create an issue using [RELEASE_ISSUE_TEMPLATE.md](https://github.com/filecoin-project/lotus/blob/master/documentation/misc/RELEASE_ISSUE_TEMPLATE.md) for the _next_ release.
diff --git a/extern/filecoin-ffi b/extern/filecoin-ffi
index fa64b5537..441fa8e61 160000
--- a/extern/filecoin-ffi
+++ b/extern/filecoin-ffi
@@ -1 +1 @@
-Subproject commit fa64b5537320dbdcf8456bb6ca9e82adb07b7747
+Subproject commit 441fa8e61189dc32c2960c1f8d8ba56269f20366
diff --git a/gateway/node.go b/gateway/node.go
index 811cc79d3..367e645c1 100644
--- a/gateway/node.go
+++ b/gateway/node.go
@@ -14,6 +14,7 @@ import (
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/go-state-types/abi"
+ verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/go-state-types/dline"
"github.com/filecoin-project/go-state-types/network"
@@ -77,6 +78,11 @@ type TargetAPI interface {
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error)
StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error)
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
+ StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error)
+ StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error)
+ StateGetAllocations(ctx context.Context, clientAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error)
+ StateGetClaim(ctx context.Context, providerAddr address.Address, claimId verifregtypes.ClaimId, tsk types.TipSetKey) (*verifregtypes.Claim, error)
+ StateGetClaims(ctx context.Context, providerAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error)
StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error)
StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error)
@@ -138,6 +144,8 @@ type TargetAPI interface {
EthSubscribe(ctx context.Context, params jsonrpc.RawParams) (ethtypes.EthSubscriptionID, error)
EthUnsubscribe(ctx context.Context, id ethtypes.EthSubscriptionID) (bool, error)
Web3ClientVersion(ctx context.Context) (string, error)
+ EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error)
+ EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error)
}
var _ TargetAPI = *new(api.FullNode) // gateway depends on latest
diff --git a/gateway/proxy_eth.go b/gateway/proxy_eth.go
index a07ead16c..e6d433a17 100644
--- a/gateway/proxy_eth.go
+++ b/gateway/proxy_eth.go
@@ -16,18 +16,11 @@ import (
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/chain/events/filter"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/ethtypes"
)
-func (gw *Node) Web3ClientVersion(ctx context.Context) (string, error) {
- if err := gw.limit(ctx, basicRateLimitTokens); err != nil {
- return "", err
- }
-
- return gw.target.Web3ClientVersion(ctx)
-}
-
func (gw *Node) EthAccounts(ctx context.Context) ([]ethtypes.EthAddress, error) {
// gateway provides public API, so it can't hold user accounts
return []ethtypes.EthAddress{}, nil
@@ -427,7 +420,7 @@ func (gw *Node) EthGetFilterChanges(ctx context.Context, id ethtypes.EthFilterID
ft.lk.Unlock()
if !ok {
- return nil, nil
+ return nil, filter.ErrFilterNotFound
}
return gw.target.EthGetFilterChanges(ctx, id)
@@ -581,6 +574,38 @@ func (gw *Node) EthUnsubscribe(ctx context.Context, id ethtypes.EthSubscriptionI
return ok, nil
}
+func (gw *Node) Web3ClientVersion(ctx context.Context) (string, error) {
+ if err := gw.limit(ctx, basicRateLimitTokens); err != nil {
+ return "", err
+ }
+
+ return gw.target.Web3ClientVersion(ctx)
+}
+
+func (gw *Node) EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error) {
+ if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
+ return nil, err
+ }
+
+ if err := gw.checkBlkParam(ctx, blkNum, 0); err != nil {
+ return nil, err
+ }
+
+ return gw.target.EthTraceBlock(ctx, blkNum)
+}
+
+func (gw *Node) EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) {
+ if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
+ return nil, err
+ }
+
+ if err := gw.checkBlkParam(ctx, blkNum, 0); err != nil {
+ return nil, err
+ }
+
+ return gw.target.EthTraceReplayBlockTransactions(ctx, blkNum, traceTypes)
+}
+
var EthMaxFiltersPerConn = 16 // todo make this configurable
func addUserFilterLimited(ctx context.Context, cb func() (ethtypes.EthFilterID, error)) (ethtypes.EthFilterID, error) {
diff --git a/gateway/proxy_fil.go b/gateway/proxy_fil.go
index abd5371fe..eb8a354ed 100644
--- a/gateway/proxy_fil.go
+++ b/gateway/proxy_fil.go
@@ -10,6 +10,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-state-types/abi"
+ verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/dline"
"github.com/filecoin-project/go-state-types/network"
@@ -579,3 +580,53 @@ func (gw *Node) WalletBalance(ctx context.Context, k address.Address) (types.Big
}
return gw.target.WalletBalance(ctx, k)
}
+
+func (gw *Node) StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error) {
+ if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
+ return nil, err
+ }
+ if err := gw.checkTipsetKey(ctx, tsk); err != nil {
+ return nil, err
+ }
+ return gw.target.StateGetAllocationForPendingDeal(ctx, dealId, tsk)
+}
+
+func (gw *Node) StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error) {
+ if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
+ return nil, err
+ }
+ if err := gw.checkTipsetKey(ctx, tsk); err != nil {
+ return nil, err
+ }
+ return gw.target.StateGetAllocation(ctx, clientAddr, allocationId, tsk)
+}
+
+func (gw *Node) StateGetAllocations(ctx context.Context, clientAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) {
+ if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
+ return nil, err
+ }
+ if err := gw.checkTipsetKey(ctx, tsk); err != nil {
+ return nil, err
+ }
+ return gw.target.StateGetAllocations(ctx, clientAddr, tsk)
+}
+
+func (gw *Node) StateGetClaim(ctx context.Context, providerAddr address.Address, claimId verifregtypes.ClaimId, tsk types.TipSetKey) (*verifregtypes.Claim, error) {
+ if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
+ return nil, err
+ }
+ if err := gw.checkTipsetKey(ctx, tsk); err != nil {
+ return nil, err
+ }
+ return gw.target.StateGetClaim(ctx, providerAddr, claimId, tsk)
+}
+
+func (gw *Node) StateGetClaims(ctx context.Context, providerAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) {
+ if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
+ return nil, err
+ }
+ if err := gw.checkTipsetKey(ctx, tsk); err != nil {
+ return nil, err
+ }
+ return gw.target.StateGetClaims(ctx, providerAddr, tsk)
+}
diff --git a/go.mod b/go.mod
index 75be44575..01b9d1586 100644
--- a/go.mod
+++ b/go.mod
@@ -59,7 +59,7 @@ require (
github.com/filecoin-project/specs-actors/v6 v6.0.2
github.com/filecoin-project/specs-actors/v7 v7.0.1
github.com/filecoin-project/specs-actors/v8 v8.0.1
- github.com/filecoin-project/test-vectors/schema v0.0.6-0.20230822140104-bed37e1ca04f
+ github.com/filecoin-project/test-vectors/schema v0.0.7
github.com/gbrlsnchs/jwt/v3 v3.0.1
github.com/gdamore/tcell/v2 v2.2.0
github.com/go-openapi/spec v0.19.11
@@ -90,6 +90,7 @@ require (
github.com/ipfs/go-fs-lock v0.0.7
github.com/ipfs/go-graphsync v0.14.6
github.com/ipfs/go-ipfs-blocksutil v0.0.1
+ github.com/ipfs/go-ipfs-exchange-offline v0.3.0
github.com/ipfs/go-ipld-cbor v0.0.6
github.com/ipfs/go-ipld-format v0.5.0
github.com/ipfs/go-log/v2 v2.5.1
@@ -124,6 +125,7 @@ require (
github.com/multiformats/go-multiaddr v0.11.0
github.com/multiformats/go-multiaddr-dns v0.3.1
github.com/multiformats/go-multibase v0.2.0
+ github.com/multiformats/go-multicodec v0.9.0
github.com/multiformats/go-multihash v0.2.3
github.com/multiformats/go-varint v0.0.7
github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333
@@ -279,7 +281,6 @@ require (
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
- github.com/multiformats/go-multicodec v0.9.0 // indirect
github.com/multiformats/go-multistream v0.4.1 // indirect
github.com/nikkolasg/hexjson v0.1.0 // indirect
github.com/nkovacs/streamquote v1.0.0 // indirect
@@ -295,7 +296,7 @@ require (
github.com/prometheus/procfs v0.9.0 // indirect
github.com/prometheus/statsd_exporter v0.22.7 // indirect
github.com/quic-go/qpack v0.4.0 // indirect
- github.com/quic-go/qtls-go1-20 v0.3.2 // indirect
+ github.com/quic-go/qtls-go1-20 v0.3.3 // indirect
github.com/quic-go/quic-go v0.37.6 // indirect
github.com/quic-go/webtransport-go v0.5.3 // indirect
github.com/rivo/uniseg v0.1.0 // indirect
diff --git a/go.sum b/go.sum
index 1ea3ac44c..5ec12dd27 100644
--- a/go.sum
+++ b/go.sum
@@ -370,8 +370,8 @@ github.com/filecoin-project/specs-actors/v7 v7.0.1 h1:w72xCxijK7xs1qzmJiw+WYJaVt
github.com/filecoin-project/specs-actors/v7 v7.0.1/go.mod h1:tPLEYXoXhcpyLh69Ccq91SOuLXsPWjHiY27CzawjUEk=
github.com/filecoin-project/specs-actors/v8 v8.0.1 h1:4u0tIRJeT5G7F05lwLRIsDnsrN+bJ5Ixj6h49Q7uE2Y=
github.com/filecoin-project/specs-actors/v8 v8.0.1/go.mod h1:UYIPg65iPWoFw5NEftREdJwv9b/5yaLKdCgTvNI/2FA=
-github.com/filecoin-project/test-vectors/schema v0.0.6-0.20230822140104-bed37e1ca04f h1:Ho3kK/WetJ7wco2VhR/pOZ9HD/WWL1BDEzYRTFQK8dw=
-github.com/filecoin-project/test-vectors/schema v0.0.6-0.20230822140104-bed37e1ca04f/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E=
+github.com/filecoin-project/test-vectors/schema v0.0.7 h1:hhrcxLnQR2Oe6fjk63hZXG1fWQGyxgCVXOOlAlR/D9A=
+github.com/filecoin-project/test-vectors/schema v0.0.7/go.mod h1:WqdmeJrz0V37wp7DucRR/bvrScZffqaCyIk9G0BGw1o=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ=
github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
@@ -746,6 +746,7 @@ github.com/ipfs/go-ipfs-exchange-interface v0.2.0/go.mod h1:z6+RhJuDQbqKguVyslSO
github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0=
github.com/ipfs/go-ipfs-exchange-offline v0.1.1/go.mod h1:vTiBRIbzSwDD0OWm+i3xeT0mO7jG2cbJYatp3HPk5XY=
github.com/ipfs/go-ipfs-exchange-offline v0.3.0 h1:c/Dg8GDPzixGd0MC8Jh6mjOwU57uYokgWRFidfvEkuA=
+github.com/ipfs/go-ipfs-exchange-offline v0.3.0/go.mod h1:MOdJ9DChbb5u37M1IcbrRB02e++Z7521fMxqCNRrz9s=
github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4=
github.com/ipfs/go-ipfs-files v0.0.4/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4=
github.com/ipfs/go-ipfs-files v0.3.0 h1:fallckyc5PYjuMEitPNrjRfpwl7YFt69heCOUhsbGxQ=
@@ -1453,8 +1454,8 @@ github.com/puzpuzpuz/xsync/v2 v2.4.0 h1:5sXAMHrtx1bg9nbRZTOn8T4MkWe5V+o8yKRH02Ez
github.com/puzpuzpuz/xsync/v2 v2.4.0/go.mod h1:gD2H2krq/w52MfPLE+Uy64TzJDVY7lP2znR9qmR35kU=
github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo=
github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A=
-github.com/quic-go/qtls-go1-20 v0.3.2 h1:rRgN3WfnKbyik4dBV8A6girlJVxGand/d+jVKbQq5GI=
-github.com/quic-go/qtls-go1-20 v0.3.2/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k=
+github.com/quic-go/qtls-go1-20 v0.3.3 h1:17/glZSLI9P9fDAeyCHBFSWSqJcwx1byhLwP5eUIDCM=
+github.com/quic-go/qtls-go1-20 v0.3.3/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k=
github.com/quic-go/quic-go v0.37.6 h1:2IIUmQzT5YNxAiaPGjs++Z4hGOtIR0q79uS5qE9ccfY=
github.com/quic-go/quic-go v0.37.6/go.mod h1:YsbH1r4mSHPJcLF4k4zruUkLBqctEMBDR6VPvcYjIsU=
github.com/quic-go/webtransport-go v0.5.3 h1:5XMlzemqB4qmOlgIus5zB45AcZ2kCgCy2EptUrfOPWU=
@@ -2264,6 +2265,7 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
diff --git a/itests/api_test.go b/itests/api_test.go
index ff303df3e..c87012cfe 100644
--- a/itests/api_test.go
+++ b/itests/api_test.go
@@ -28,12 +28,17 @@ func TestAPI(t *testing.T) {
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
+ ts := apiSuite{}
+ t.Run("testMiningReal", ts.testMiningReal)
+ ts.opts = append(ts.opts, kit.ThroughRPC())
+ t.Run("testMiningReal", ts.testMiningReal)
+
//stm: @CHAIN_STATE_MINER_INFO_001
t.Run("direct", func(t *testing.T) {
- runAPITest(t)
+ runAPITest(t, kit.MockProofs())
})
t.Run("rpc", func(t *testing.T) {
- runAPITest(t, kit.ThroughRPC())
+ runAPITest(t, kit.MockProofs(), kit.ThroughRPC())
})
}
@@ -49,7 +54,6 @@ func runAPITest(t *testing.T, opts ...interface{}) {
t.Run("id", ts.testID)
t.Run("testConnectTwo", ts.testConnectTwo)
t.Run("testMining", ts.testMining)
- t.Run("testMiningReal", ts.testMiningReal)
t.Run("testSlowNotify", ts.testSlowNotify)
t.Run("testSearchMsg", ts.testSearchMsg)
t.Run("testOutOfGasError", ts.testOutOfGasError)
diff --git a/itests/deals_test.go b/itests/deals_test.go
index e8296ea87..a6953d07e 100644
--- a/itests/deals_test.go
+++ b/itests/deals_test.go
@@ -22,7 +22,7 @@ func TestDealsWithSealingAndRPC(t *testing.T) {
kit.QuietMiningLogs()
client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(), kit.WithAllSubsystems()) // no mock proofs.
- ens.InterconnectAll().BeginMining(250 * time.Millisecond)
+ ens.InterconnectAll().BeginMiningMustPost(250 * time.Millisecond)
dh := kit.NewDealHarness(t, client, miner, miner)
t.Run("stdretrieval", func(t *testing.T) {
diff --git a/itests/eth_transactions_test.go b/itests/eth_transactions_test.go
index 3c131a256..6d60f6786 100644
--- a/itests/eth_transactions_test.go
+++ b/itests/eth_transactions_test.go
@@ -325,13 +325,23 @@ func TestGetBlockByNumber(t *testing.T) {
afterNullHeight := hc[0].Val.Height()
+ nullHeight := afterNullHeight - 1
+ for nullHeight > 0 {
+ ts, err := client.ChainGetTipSetByHeight(ctx, nullHeight, types.EmptyTSK)
+ require.NoError(t, err)
+ if ts.Height() == nullHeight {
+ nullHeight--
+ } else {
+ break
+ }
+ }
+
// Fail when trying to fetch a null round.
- _, err = client.EthGetBlockByNumber(ctx, (ethtypes.EthUint64(afterNullHeight - 1)).Hex(), true)
+ _, err = client.EthGetBlockByNumber(ctx, (ethtypes.EthUint64(nullHeight)).Hex(), true)
require.Error(t, err)
// Fetch balance on a null round; should not fail and should return previous balance.
- // Should be lower than original balance.
- bal, err := client.EthGetBalance(ctx, ethAddr, ethtypes.NewEthBlockNumberOrHashFromNumber(ethtypes.EthUint64(afterNullHeight-1)))
+ bal, err := client.EthGetBalance(ctx, ethAddr, ethtypes.NewEthBlockNumberOrHashFromNumber(ethtypes.EthUint64(nullHeight)))
require.NoError(t, err)
require.NotEqual(t, big.Zero(), bal)
require.Equal(t, types.FromFil(10).Int, bal.Int)
diff --git a/itests/kit/deals.go b/itests/kit/deals.go
index 84e74124b..eb6b58667 100644
--- a/itests/kit/deals.go
+++ b/itests/kit/deals.go
@@ -87,6 +87,15 @@ func NewDealHarness(t *testing.T, client *TestFullNode, main *TestMiner, market
//
// TODO: convert input parameters to struct, and add size as an input param.
func (dh *DealHarness) MakeOnlineDeal(ctx context.Context, params MakeFullDealParams) (deal *cid.Cid, res *api.ImportRes, path string) {
+ deal, res, path = dh.StartRandomDeal(ctx, params)
+
+ fmt.Printf("WAIT DEAL SEALEDS START\n")
+ dh.WaitDealSealed(ctx, deal, false, false, nil)
+ fmt.Printf("WAIT DEAL SEALEDS END\n")
+ return deal, res, path
+}
+
+func (dh *DealHarness) StartRandomDeal(ctx context.Context, params MakeFullDealParams) (deal *cid.Cid, res *api.ImportRes, path string) {
if params.UseCARFileForStorageDeal {
res, _, path = dh.client.ClientImportCARFile(ctx, params.Rseed, 200)
} else {
@@ -107,11 +116,6 @@ func (dh *DealHarness) MakeOnlineDeal(ctx context.Context, params MakeFullDealPa
dp.FastRetrieval = params.FastRet
deal = dh.StartDeal(ctx, dp)
- // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
- time.Sleep(time.Second)
- fmt.Printf("WAIT DEAL SEALEDS START\n")
- dh.WaitDealSealed(ctx, deal, false, false, nil)
- fmt.Printf("WAIT DEAL SEALEDS END\n")
return deal, res, path
}
diff --git a/itests/kit/ensemble.go b/itests/kit/ensemble.go
index e55a66743..45f21786d 100644
--- a/itests/kit/ensemble.go
+++ b/itests/kit/ensemble.go
@@ -169,6 +169,8 @@ func NewEnsemble(t *testing.T, opts ...EnsembleOpt) *Ensemble {
require.NoError(t, build.UseNetworkBundle("testing"))
}
+ build.EquivocationDelaySecs = 0
+
return n
}
diff --git a/itests/kit/log.go b/itests/kit/log.go
index 0da9adfeb..0c66427f9 100644
--- a/itests/kit/log.go
+++ b/itests/kit/log.go
@@ -21,6 +21,7 @@ func QuietMiningLogs() {
_ = logging.SetLogLevel("pubsub", "ERROR")
_ = logging.SetLogLevel("gen", "ERROR")
_ = logging.SetLogLevel("rpc", "ERROR")
+ _ = logging.SetLogLevel("consensus-common", "ERROR")
_ = logging.SetLogLevel("dht/RtRefreshManager", "ERROR")
}
diff --git a/itests/kit/node_opts.go b/itests/kit/node_opts.go
index 5d418c5be..6469c0a30 100644
--- a/itests/kit/node_opts.go
+++ b/itests/kit/node_opts.go
@@ -197,7 +197,7 @@ func OwnerAddr(wk *key.Key) NodeOpt {
// the node.
func ConstructorOpts(extra ...node.Option) NodeOpt {
return func(opts *nodeOpts) error {
- opts.extraNodeOpts = extra
+ opts.extraNodeOpts = append(opts.extraNodeOpts, extra...)
return nil
}
}
@@ -290,6 +290,13 @@ func SplitstoreMessges() NodeOpt {
})
}
+func SplitstoreDisable() NodeOpt {
+ return WithCfgOpt(func(cfg *config.FullNode) error {
+ cfg.Chainstore.EnableSplitstore = false
+ return nil
+ })
+}
+
func WithEthRPC() NodeOpt {
return WithCfgOpt(func(cfg *config.FullNode) error {
cfg.Fevm.EnableEthRPC = true
diff --git a/itests/msgindex_test.go b/itests/msgindex_test.go
index cb5fd85c9..807ab3c03 100644
--- a/itests/msgindex_test.go
+++ b/itests/msgindex_test.go
@@ -93,7 +93,7 @@ func testSearchMsgWithIndex(t *testing.T, makeMsgIndex func(cs *store.ChainStore
// copy of apiSuite.testSearchMsgWith; needs to be copied or else CI is angry, tests are built individually there
ctx := context.Background()
- full, _, ens := kit.EnsembleMinimal(t, kit.ConstructorOpts(node.Override(new(index.MsgIndex), makeMsgIndex)))
+ full, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ConstructorOpts(node.Override(new(index.MsgIndex), makeMsgIndex)))
senderAddr, err := full.WalletDefaultAddress(ctx)
require.NoError(t, err)
diff --git a/itests/verifreg_test.go b/itests/verifreg_test.go
index e942d6c71..ffe50c72b 100644
--- a/itests/verifreg_test.go
+++ b/itests/verifreg_test.go
@@ -9,16 +9,20 @@ import (
"testing"
"time"
+ "github.com/ipfs/go-cid"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/builtin"
+ datacap2 "github.com/filecoin-project/go-state-types/builtin/v9/datacap"
verifregst "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/go-state-types/network"
lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/datacap"
"github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/wallet/key"
@@ -225,36 +229,8 @@ func TestRemoveDataCap(t *testing.T) {
// make the 2 verifiers
- makeVerifier := func(addr address.Address) error {
- allowance := big.NewInt(100000000000)
- params, aerr := actors.SerializeParams(&verifregst.AddVerifierParams{Address: addr, Allowance: allowance})
- require.NoError(t, aerr)
-
- msg := &types.Message{
- From: rootAddr,
- To: verifreg.Address,
- Method: verifreg.Methods.AddVerifier,
- Params: params,
- Value: big.Zero(),
- }
-
- sm, err := api.MpoolPushMessage(ctx, msg, nil)
- require.NoError(t, err, "AddVerifier failed")
-
- //stm: @CHAIN_STATE_WAIT_MSG_001
- res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
- require.NoError(t, err)
- require.EqualValues(t, 0, res.Receipt.ExitCode)
-
- verifierAllowance, err := api.StateVerifierStatus(ctx, addr, types.EmptyTSK)
- require.NoError(t, err)
- require.Equal(t, allowance, *verifierAllowance)
-
- return nil
- }
-
- require.NoError(t, makeVerifier(verifier1Addr))
- require.NoError(t, makeVerifier(verifier2Addr))
+ makeVerifier(ctx, t, api, rootAddr, verifier1Addr)
+ makeVerifier(ctx, t, api, rootAddr, verifier2Addr)
// assign datacap to a client
datacap := big.NewInt(10000)
@@ -374,3 +350,156 @@ func TestRemoveDataCap(t *testing.T) {
require.NoError(t, err)
require.Nil(t, dcap, "expected datacap to be nil")
}
+
+func TestVerifiedClientCanCreateAllocation(t *testing.T) {
+ blockTime := 100 * time.Millisecond
+
+ rootKey, err := key.GenerateKey(types.KTSecp256k1)
+ require.NoError(t, err)
+
+ verifier1Key, err := key.GenerateKey(types.KTSecp256k1)
+ require.NoError(t, err)
+
+ verifiedClientKey, err := key.GenerateKey(types.KTBLS)
+ require.NoError(t, err)
+
+ bal, err := types.ParseFIL("100fil")
+ require.NoError(t, err)
+
+ node, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(),
+ kit.RootVerifier(rootKey, abi.NewTokenAmount(bal.Int64())),
+ kit.Account(verifier1Key, abi.NewTokenAmount(bal.Int64())),
+ kit.Account(verifiedClientKey, abi.NewTokenAmount(bal.Int64())),
+ )
+
+ ens.InterconnectAll().BeginMining(blockTime)
+
+ api := node.FullNode.(*impl.FullNodeAPI)
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ // get VRH
+ vrh, err := api.StateVerifiedRegistryRootKey(ctx, types.TipSetKey{})
+ fmt.Println(vrh.String())
+ require.NoError(t, err)
+
+ // import the root key.
+ rootAddr, err := api.WalletImport(ctx, &rootKey.KeyInfo)
+ require.NoError(t, err)
+
+ // import the verifiers' keys.
+ verifier1Addr, err := api.WalletImport(ctx, &verifier1Key.KeyInfo)
+ require.NoError(t, err)
+
+ // import the verified client's key.
+ verifiedClientAddr, err := api.WalletImport(ctx, &verifiedClientKey.KeyInfo)
+ require.NoError(t, err)
+
+ // resolve all keys
+
+ // make the 2 verifiers
+
+ makeVerifier(ctx, t, api, rootAddr, verifier1Addr)
+
+ // assign datacap to a client
+ initialDatacap := big.NewInt(10000)
+
+ params, err := actors.SerializeParams(&verifregst.AddVerifiedClientParams{Address: verifiedClientAddr, Allowance: initialDatacap})
+ require.NoError(t, err)
+
+ msg := &types.Message{
+ From: verifier1Addr,
+ To: verifreg.Address,
+ Method: verifreg.Methods.AddVerifiedClient,
+ Params: params,
+ Value: big.Zero(),
+ }
+
+ sm, err := api.MpoolPushMessage(ctx, msg, nil)
+ require.NoError(t, err)
+
+ res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
+ require.NoError(t, err)
+ require.EqualValues(t, 0, res.Receipt.ExitCode)
+
+ // check datacap balance
+ dcap, err := api.StateVerifiedClientStatus(ctx, verifiedClientAddr, types.EmptyTSK)
+ require.NoError(t, err)
+ require.Equal(t, *dcap, initialDatacap)
+
+ minerId, err := address.IDFromAddress(miner.ActorAddr)
+ require.NoError(t, err)
+
+ allocationRequest := verifregst.AllocationRequest{
+ Provider: abi.ActorID(minerId),
+ Data: cid.MustParse("bafkqaaa"),
+ Size: abi.PaddedPieceSize(initialDatacap.Uint64()),
+ TermMin: verifregst.MinimumVerifiedAllocationTerm,
+ TermMax: verifregst.MinimumVerifiedAllocationTerm,
+ Expiration: verifregst.MaximumVerifiedAllocationExpiration,
+ }
+
+ allocationRequests := verifregst.AllocationRequests{
+ Allocations: []verifregst.AllocationRequest{allocationRequest},
+ }
+
+ receiverParams, err := actors.SerializeParams(&allocationRequests)
+ require.NoError(t, err)
+
+ transferParams, err := actors.SerializeParams(&datacap2.TransferParams{
+ To: builtin.VerifiedRegistryActorAddr,
+ Amount: big.Mul(initialDatacap, builtin.TokenPrecision),
+ OperatorData: receiverParams,
+ })
+ require.NoError(t, err)
+
+ msg = &types.Message{
+ To: builtin.DatacapActorAddr,
+ From: verifiedClientAddr,
+ Method: datacap.Methods.TransferExported,
+ Params: transferParams,
+ Value: big.Zero(),
+ }
+
+ sm, err = api.MpoolPushMessage(ctx, msg, nil)
+ require.NoError(t, err)
+
+ res, err = api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
+ require.NoError(t, err)
+ require.EqualValues(t, 0, res.Receipt.ExitCode)
+
+ // check datacap balance
+ dcap, err = api.StateVerifiedClientStatus(ctx, verifiedClientAddr, types.EmptyTSK)
+ require.NoError(t, err)
+ require.Nil(t, dcap)
+
+ allocations, err := api.StateGetAllocations(ctx, verifiedClientAddr, types.EmptyTSK)
+ require.NoError(t, err)
+
+ require.Equal(t, 1, len(allocations))
+}
+
+func makeVerifier(ctx context.Context, t *testing.T, api *impl.FullNodeAPI, rootAddr address.Address, addr address.Address) {
+ allowance := big.NewInt(100000000000)
+ params, aerr := actors.SerializeParams(&verifregst.AddVerifierParams{Address: addr, Allowance: allowance})
+ require.NoError(t, aerr)
+
+ msg := &types.Message{
+ From: rootAddr,
+ To: verifreg.Address,
+ Method: verifreg.Methods.AddVerifier,
+ Params: params,
+ Value: big.Zero(),
+ }
+
+ sm, err := api.MpoolPushMessage(ctx, msg, nil)
+ require.NoError(t, err, "AddVerifier failed")
+
+ res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
+ require.NoError(t, err)
+ require.EqualValues(t, 0, res.Receipt.ExitCode)
+
+ verifierAllowance, err := api.StateVerifierStatus(ctx, addr, types.EmptyTSK)
+ require.NoError(t, err)
+ require.Equal(t, allowance, *verifierAllowance)
+}
diff --git a/itests/worker_test.go b/itests/worker_test.go
index 246c842c5..c4f885fb0 100644
--- a/itests/worker_test.go
+++ b/itests/worker_test.go
@@ -730,3 +730,82 @@ waitForProof:
require.NoError(t, params.UnmarshalCBOR(bytes.NewBuffer(slmsg.Params)))
require.Equal(t, abi.RegisteredPoStProof_StackedDrgWindow2KiBV1_1, params.Proofs[0].PoStProof)
}
+
+func TestWorkerPledgeExpireCommit(t *testing.T) {
+ kit.QuietMiningLogs()
+ _ = logging.SetLogLevel("sectors", "debug")
+
+ var tasksNoC2 = kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTAddPiece, sealtasks.TTDataCid, sealtasks.TTPreCommit1, sealtasks.TTPreCommit2, sealtasks.TTCommit2,
+ sealtasks.TTUnseal, sealtasks.TTFetch, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFinalizeUnsealed})
+
+ fc := config.DefaultStorageMiner().Fees
+ fc.MaxCommitGasFee = types.FIL(abi.NewTokenAmount(10000)) // 10000 attofil, way too low for anything to land
+
+ ctx := context.Background()
+ client, miner, worker, ens := kit.EnsembleWorker(t, kit.WithAllSubsystems(), kit.ThroughRPC(), kit.WithNoLocalSealing(true),
+ kit.MutateSealingConfig(func(sc *config.SealingConfig) {
+ sc.AggregateCommits = true
+ }),
+ kit.ConstructorOpts(
+ node.Override(new(*sealing.Sealing), modules.SealingPipeline(fc)),
+ ),
+ kit.SplitstoreDisable(), // disable splitstore because messages which take a long time may get dropped
+ tasksNoC2) // no mock proofs
+
+ ens.InterconnectAll().BeginMiningMustPost(2 * time.Millisecond)
+
+ e, err := worker.Enabled(ctx)
+ require.NoError(t, err)
+ require.True(t, e)
+
+ dh := kit.NewDealHarness(t, client, miner, miner)
+
+ startEpoch := abi.ChainEpoch(4 << 10)
+
+ dh.StartRandomDeal(ctx, kit.MakeFullDealParams{
+ Rseed: 7,
+ StartEpoch: startEpoch,
+ })
+
+ var sn abi.SectorNumber
+
+ require.Eventually(t, func() bool {
+ s, err := miner.SectorsListNonGenesis(ctx)
+ require.NoError(t, err)
+ if len(s) == 0 {
+ return false
+ }
+ if len(s) > 1 {
+ t.Fatalf("expected 1 sector, got %d", len(s))
+ }
+ sn = s[0]
+ return true
+ }, 30*time.Second, 1*time.Second)
+
+ t.Log("sector", sn)
+
+ t.Log("sector committing")
+
+ // wait until after startEpoch
+ client.WaitTillChain(ctx, kit.HeightAtLeast(startEpoch+20))
+
+ t.Log("after start")
+
+ sstate, err := miner.SectorsStatus(ctx, sn, false)
+ require.NoError(t, err)
+ require.Equal(t, api.SectorState(sealing.SubmitCommitAggregate), sstate.State)
+
+ _, err = miner.SectorCommitFlush(ctx)
+ require.NoError(t, err)
+
+ require.Eventually(t, func() bool {
+ sstate, err := miner.SectorsStatus(ctx, sn, false)
+ require.NoError(t, err)
+
+ t.Logf("sector state: %s", sstate.State)
+
+ return sstate.State == api.SectorState(sealing.Removed)
+ }, 30*time.Second, 1*time.Second)
+
+ t.Log("sector removed")
+}
diff --git a/markets/dagstore/wrapper.go b/markets/dagstore/wrapper.go
index b5813dc5e..a929ad1fc 100644
--- a/markets/dagstore/wrapper.go
+++ b/markets/dagstore/wrapper.go
@@ -48,7 +48,6 @@ type Wrapper struct {
dagst dagstore.Interface
minerAPI MinerAPI
failureCh chan dagstore.ShardResult
- traceCh chan dagstore.Trace
gcInterval time.Duration
}
@@ -64,9 +63,6 @@ func NewDAGStore(cfg config.DAGStoreConfig, minerApi MinerAPI, h host.Host) (*da
// The dagstore will write Shard failures to the `failureCh` here.
failureCh := make(chan dagstore.ShardResult, 1)
- // The dagstore will write Trace events to the `traceCh` here.
- traceCh := make(chan dagstore.Trace, 32)
-
var (
transientsDir = filepath.Join(cfg.RootDir, "transients")
datastoreDir = filepath.Join(cfg.RootDir, "datastore")
@@ -90,7 +86,6 @@ func NewDAGStore(cfg config.DAGStoreConfig, minerApi MinerAPI, h host.Host) (*da
Datastore: dstore,
MountRegistry: registry,
FailureCh: failureCh,
- TraceCh: traceCh,
TopLevelIndex: topIndex,
// not limiting fetches globally, as the Lotus mount does
// conditional throttling.
@@ -109,7 +104,6 @@ func NewDAGStore(cfg config.DAGStoreConfig, minerApi MinerAPI, h host.Host) (*da
dagst: dagst,
minerAPI: minerApi,
failureCh: failureCh,
- traceCh: traceCh,
gcInterval: time.Duration(cfg.GCInterval),
}
@@ -146,10 +140,6 @@ func (w *Wrapper) Start(ctx context.Context) error {
w.backgroundWg.Add(1)
go w.gcLoop()
- // run a go-routine to read the trace for debugging.
- w.backgroundWg.Add(1)
- go w.traceLoop()
-
// Run a go-routine for shard recovery
if dss, ok := w.dagst.(*dagstore.DAGStore); ok {
w.backgroundWg.Add(1)
@@ -159,24 +149,6 @@ func (w *Wrapper) Start(ctx context.Context) error {
return w.dagst.Start(ctx)
}
-func (w *Wrapper) traceLoop() {
- defer w.backgroundWg.Done()
-
- for w.ctx.Err() == nil {
- select {
- // Log trace events from the DAG store
- case tr := <-w.traceCh:
- log.Debugw("trace",
- "shard-key", tr.Key.String(),
- "op-type", tr.Op.String(),
- "after", tr.After.String())
-
- case <-w.ctx.Done():
- return
- }
- }
-}
-
func (w *Wrapper) gcLoop() {
defer w.backgroundWg.Done()
diff --git a/metrics/metrics.go b/metrics/metrics.go
index ee7bd8695..822974f81 100644
--- a/metrics/metrics.go
+++ b/metrics/metrics.go
@@ -116,6 +116,7 @@ var (
PubsubDeliverMessage = stats.Int64("pubsub/delivered", "Counter for total delivered messages", stats.UnitDimensionless)
PubsubRejectMessage = stats.Int64("pubsub/rejected", "Counter for total rejected messages", stats.UnitDimensionless)
PubsubDuplicateMessage = stats.Int64("pubsub/duplicate", "Counter for total duplicate messages", stats.UnitDimensionless)
+ PubsubPruneMessage = stats.Int64("pubsub/prune", "Counter for total prune messages", stats.UnitDimensionless)
PubsubRecvRPC = stats.Int64("pubsub/recv_rpc", "Counter for total received RPCs", stats.UnitDimensionless)
PubsubSendRPC = stats.Int64("pubsub/send_rpc", "Counter for total sent RPCs", stats.UnitDimensionless)
PubsubDropRPC = stats.Int64("pubsub/drop_rpc", "Counter for total dropped RPCs", stats.UnitDimensionless)
@@ -326,6 +327,10 @@ var (
Measure: PubsubDuplicateMessage,
Aggregation: view.Count(),
}
+ PubsubPruneMessageView = &view.View{
+ Measure: PubsubPruneMessage,
+ Aggregation: view.Count(),
+ }
PubsubRecvRPCView = &view.View{
Measure: PubsubRecvRPC,
Aggregation: view.Count(),
@@ -760,6 +765,7 @@ var ChainNodeViews = append([]*view.View{
PubsubDeliverMessageView,
PubsubRejectMessageView,
PubsubDuplicateMessageView,
+ PubsubPruneMessageView,
PubsubRecvRPCView,
PubsubSendRPCView,
PubsubDropRPCView,
diff --git a/miner/miner.go b/miner/miner.go
index 63862ba0f..1272e396f 100644
--- a/miner/miner.go
+++ b/miner/miner.go
@@ -11,6 +11,7 @@ import (
"time"
"github.com/hashicorp/golang-lru/arc/v2"
+ "github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log/v2"
"go.opencensus.io/trace"
"golang.org/x/xerrors"
@@ -373,8 +374,9 @@ minerLoop:
// MiningBase is the tipset on top of which we plan to construct our next block.
// Refer to godocs on GetBestMiningCandidate.
type MiningBase struct {
- TipSet *types.TipSet
- NullRounds abi.ChainEpoch
+ TipSet *types.TipSet
+ ComputeTime time.Time
+ NullRounds abi.ChainEpoch
}
// GetBestMiningCandidate implements the fork choice rule from a miner's
@@ -412,7 +414,7 @@ func (m *Miner) GetBestMiningCandidate(ctx context.Context) (*MiningBase, error)
}
}
- m.lastWork = &MiningBase{TipSet: bts}
+ m.lastWork = &MiningBase{TipSet: bts, ComputeTime: time.Now()}
return m.lastWork, nil
}
@@ -507,13 +509,13 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (minedBlock *type
rbase = bvals[len(bvals)-1]
}
- ticket, err := m.computeTicket(ctx, &rbase, base, mbi)
+ ticket, err := m.computeTicket(ctx, &rbase, round, base.TipSet.MinTicket(), mbi)
if err != nil {
err = xerrors.Errorf("scratching ticket failed: %w", err)
return nil, err
}
- winner, err = gen.IsRoundWinner(ctx, base.TipSet, round, m.address, rbase, mbi, m.api)
+ winner, err = gen.IsRoundWinner(ctx, round, m.address, rbase, mbi, m.api)
if err != nil {
err = xerrors.Errorf("failed to check if we win next round: %w", err)
return nil, err
@@ -554,12 +556,71 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (minedBlock *type
tProof := build.Clock.Now()
// get pending messages early,
- msgs, err := m.api.MpoolSelect(context.TODO(), base.TipSet.Key(), ticket.Quality())
+ msgs, err := m.api.MpoolSelect(ctx, base.TipSet.Key(), ticket.Quality())
if err != nil {
err = xerrors.Errorf("failed to select messages for block: %w", err)
return nil, err
}
+ tEquivocateWait := build.Clock.Now()
+
+ // This next block exists to "catch" equivocating miners,
+ // who submit 2 blocks at the same height at different times in order to split the network.
+ // To safeguard against this, we make sure it's been EquivocationDelaySecs since our base was calculated,
+ // then re-calculate it.
+ // If the daemon detected equivocated blocks, those blocks will no longer be in the new base.
+ m.niceSleep(time.Until(base.ComputeTime.Add(time.Duration(build.EquivocationDelaySecs) * time.Second)))
+ newBase, err := m.GetBestMiningCandidate(ctx)
+ if err != nil {
+ err = xerrors.Errorf("failed to refresh best mining candidate: %w", err)
+ return nil, err
+ }
+
+ // If the base has changed, we take the _intersection_ of our old base and new base,
+ // thus ejecting blocks from any equivocating miners, without taking any new blocks.
+ if newBase.TipSet.Height() == base.TipSet.Height() && !newBase.TipSet.Equals(base.TipSet) {
+ log.Warnf("base changed from %s to %s, taking intersection", base.TipSet.Key(), newBase.TipSet.Key())
+ newBaseMap := map[cid.Cid]struct{}{}
+ for _, newBaseBlk := range newBase.TipSet.Cids() {
+ newBaseMap[newBaseBlk] = struct{}{}
+ }
+
+ refreshedBaseBlocks := make([]*types.BlockHeader, 0, len(base.TipSet.Cids()))
+ for _, baseBlk := range base.TipSet.Blocks() {
+ if _, ok := newBaseMap[baseBlk.Cid()]; ok {
+ refreshedBaseBlocks = append(refreshedBaseBlocks, baseBlk)
+ }
+ }
+
+ if len(refreshedBaseBlocks) != 0 && len(refreshedBaseBlocks) != len(base.TipSet.Blocks()) {
+ refreshedBase, err := types.NewTipSet(refreshedBaseBlocks)
+ if err != nil {
+ err = xerrors.Errorf("failed to create new tipset when refreshing: %w", err)
+ return nil, err
+ }
+
+ if !base.TipSet.MinTicket().Equals(refreshedBase.MinTicket()) {
+ log.Warn("recomputing ticket due to base refresh")
+
+ ticket, err = m.computeTicket(ctx, &rbase, round, refreshedBase.MinTicket(), mbi)
+ if err != nil {
+ err = xerrors.Errorf("failed to refresh ticket: %w", err)
+ return nil, err
+ }
+ }
+
+ log.Warn("re-selecting messages due to base refresh")
+ // refresh messages, as the selected messages may no longer be valid
+ msgs, err = m.api.MpoolSelect(ctx, refreshedBase.Key(), ticket.Quality())
+ if err != nil {
+ err = xerrors.Errorf("failed to re-select messages for block: %w", err)
+ return nil, err
+ }
+
+ base.TipSet = refreshedBase
+ }
+ }
+
tPending := build.Clock.Now()
// TODO: winning post proof
@@ -582,22 +643,22 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (minedBlock *type
"tTicket ", tTicket.Sub(tPowercheck),
"tSeed ", tSeed.Sub(tTicket),
"tProof ", tProof.Sub(tSeed),
- "tPending ", tPending.Sub(tProof),
+ "tEquivocateWait ", tEquivocateWait.Sub(tProof),
+ "tPending ", tPending.Sub(tEquivocateWait),
"tCreateBlock ", tCreateBlock.Sub(tPending))
}
return minedBlock, nil
}
-func (m *Miner) computeTicket(ctx context.Context, brand *types.BeaconEntry, base *MiningBase, mbi *api.MiningBaseInfo) (*types.Ticket, error) {
+func (m *Miner) computeTicket(ctx context.Context, brand *types.BeaconEntry, round abi.ChainEpoch, chainRand *types.Ticket, mbi *api.MiningBaseInfo) (*types.Ticket, error) {
buf := new(bytes.Buffer)
if err := m.address.MarshalCBOR(buf); err != nil {
return nil, xerrors.Errorf("failed to marshal address to cbor: %w", err)
}
- round := base.TipSet.Height() + base.NullRounds + 1
if round > build.UpgradeSmokeHeight {
- buf.Write(base.TipSet.MinTicket().VRFProof)
+ buf.Write(chainRand.VRFProof)
}
input, err := lrand.DrawRandomnessFromBase(brand.Data, crypto.DomainSeparationTag_TicketProduction, round-build.TicketRandomnessLookback, buf.Bytes())
diff --git a/node/impl/client/client.go b/node/impl/client/client.go
index 73ffeabe3..c7bb252a1 100644
--- a/node/impl/client/client.go
+++ b/node/impl/client/client.go
@@ -527,7 +527,7 @@ func (a *API) ClientImport(ctx context.Context, ref api.FileRef) (res *api.Impor
return nil, xerrors.Errorf("failed to read CAR header: %w", err)
}
if len(hd.Roots) != 1 {
- return nil, xerrors.New("car file can have one and only one header")
+ return nil, xerrors.New("car file can have one and only one root")
}
if hd.Version != 1 && hd.Version != 2 {
return nil, xerrors.Errorf("car version must be 1 or 2, is %d", hd.Version)
diff --git a/node/impl/full/dummy.go b/node/impl/full/dummy.go
index c4bda6428..743eadf34 100644
--- a/node/impl/full/dummy.go
+++ b/node/impl/full/dummy.go
@@ -178,5 +178,13 @@ func (e *EthModuleDummy) EthUnsubscribe(ctx context.Context, id ethtypes.EthSubs
return false, ErrModuleDisabled
}
+func (e *EthModuleDummy) EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error) {
+ return nil, ErrModuleDisabled
+}
+
+func (e *EthModuleDummy) EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) {
+ return nil, ErrModuleDisabled
+}
+
var _ EthModuleAPI = &EthModuleDummy{}
var _ EthEventAPI = &EthModuleDummy{}
diff --git a/node/impl/full/eth.go b/node/impl/full/eth.go
index f4d9d8371..2a7293956 100644
--- a/node/impl/full/eth.go
+++ b/node/impl/full/eth.go
@@ -3,20 +3,16 @@ package full
import (
"bytes"
"context"
- "encoding/json"
"errors"
"fmt"
"os"
"sort"
"strconv"
"strings"
- "sync"
"time"
- "github.com/google/uuid"
"github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
- "github.com/zyedidia/generic/queue"
"go.uber.org/fx"
"golang.org/x/xerrors"
@@ -24,10 +20,9 @@ import (
"github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/builtin"
builtintypes "github.com/filecoin-project/go-state-types/builtin"
- "github.com/filecoin-project/go-state-types/builtin/v10/eam"
"github.com/filecoin-project/go-state-types/builtin/v10/evm"
- "github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/lotus/api"
@@ -42,7 +37,6 @@ import (
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/ethtypes"
- "github.com/filecoin-project/lotus/chain/vm"
"github.com/filecoin-project/lotus/node/modules/dtypes"
)
@@ -77,6 +71,8 @@ type EthModuleAPI interface {
EthMaxPriorityFeePerGas(ctx context.Context) (ethtypes.EthBigInt, error)
EthSendRawTransaction(ctx context.Context, rawTx ethtypes.EthBytes) (ethtypes.EthHash, error)
Web3ClientVersion(ctx context.Context) (string, error)
+ EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error)
+ EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error)
}
type EthEventAPI interface {
@@ -241,101 +237,8 @@ func (a *EthModule) EthGetBlockByHash(ctx context.Context, blkHash ethtypes.EthH
return newEthBlockFromFilecoinTipSet(ctx, ts, fullTxInfo, a.Chain, a.StateAPI)
}
-func (a *EthModule) getTipsetByEthBlockNumberOrHash(ctx context.Context, blkParam ethtypes.EthBlockNumberOrHash) (*types.TipSet, error) {
- head := a.Chain.GetHeaviestTipSet()
-
- predefined := blkParam.PredefinedBlock
- if predefined != nil {
- if *predefined == "earliest" {
- return nil, fmt.Errorf("block param \"earliest\" is not supported")
- } else if *predefined == "pending" {
- return head, nil
- } else if *predefined == "latest" {
- parent, err := a.Chain.GetTipSetFromKey(ctx, head.Parents())
- if err != nil {
- return nil, fmt.Errorf("cannot get parent tipset")
- }
- return parent, nil
- } else {
- return nil, fmt.Errorf("unknown predefined block %s", *predefined)
- }
- }
-
- if blkParam.BlockNumber != nil {
- height := abi.ChainEpoch(*blkParam.BlockNumber)
- if height > head.Height()-1 {
- return nil, fmt.Errorf("requested a future epoch (beyond 'latest')")
- }
- ts, err := a.ChainAPI.ChainGetTipSetByHeight(ctx, height, head.Key())
- if err != nil {
- return nil, fmt.Errorf("cannot get tipset at height: %v", height)
- }
- return ts, nil
- }
-
- if blkParam.BlockHash != nil {
- ts, err := a.Chain.GetTipSetByCid(ctx, blkParam.BlockHash.ToCid())
- if err != nil {
- return nil, fmt.Errorf("cannot get tipset by hash: %v", err)
- }
-
- // verify that the tipset is in the canonical chain
- if blkParam.RequireCanonical {
- // walk up the current chain (our head) until we reach ts.Height()
- walkTs, err := a.ChainAPI.ChainGetTipSetByHeight(ctx, ts.Height(), head.Key())
- if err != nil {
- return nil, fmt.Errorf("cannot get tipset at height: %v", ts.Height())
- }
-
- // verify that it equals the expected tipset
- if !walkTs.Equals(ts) {
- return nil, fmt.Errorf("tipset is not canonical")
- }
- }
-
- return ts, nil
- }
-
- return nil, errors.New("invalid block param")
-}
-
-func (a *EthModule) parseBlkParam(ctx context.Context, blkParam string, strict bool) (*types.TipSet, error) {
- if blkParam == "earliest" {
- return nil, fmt.Errorf("block param \"earliest\" is not supported")
- }
-
- head := a.Chain.GetHeaviestTipSet()
- switch blkParam {
- case "pending":
- return head, nil
- case "latest":
- parent, err := a.Chain.GetTipSetFromKey(ctx, head.Parents())
- if err != nil {
- return nil, fmt.Errorf("cannot get parent tipset")
- }
- return parent, nil
- default:
- var num ethtypes.EthUint64
- err := num.UnmarshalJSON([]byte(`"` + blkParam + `"`))
- if err != nil {
- return nil, fmt.Errorf("cannot parse block number: %v", err)
- }
- if abi.ChainEpoch(num) > head.Height()-1 {
- return nil, fmt.Errorf("requested a future epoch (beyond 'latest')")
- }
- ts, err := a.ChainAPI.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(num), head.Key())
- if err != nil {
- return nil, fmt.Errorf("cannot get tipset at height: %v", num)
- }
- if strict && ts.Height() != abi.ChainEpoch(num) {
- return nil, ErrNullRound
- }
- return ts, nil
- }
-}
-
func (a *EthModule) EthGetBlockByNumber(ctx context.Context, blkParam string, fullTxInfo bool) (ethtypes.EthBlock, error) {
- ts, err := a.parseBlkParam(ctx, blkParam, true)
+ ts, err := getTipsetByBlockNumber(ctx, a.Chain, blkParam, true)
if err != nil {
return ethtypes.EthBlock{}, err
}
@@ -431,7 +334,7 @@ func (a *EthModule) EthGetMessageCidByTransactionHash(ctx context.Context, txHas
}
func (a *EthModule) EthGetTransactionHashByCid(ctx context.Context, cid cid.Cid) (*ethtypes.EthHash, error) {
- hash, err := EthTxHashFromMessageCid(ctx, cid, a.StateAPI)
+ hash, err := ethTxHashFromMessageCid(ctx, cid, a.StateAPI)
if hash == ethtypes.EmptyEthHash {
// not found
return nil, nil
@@ -446,7 +349,7 @@ func (a *EthModule) EthGetTransactionCount(ctx context.Context, sender ethtypes.
return ethtypes.EthUint64(0), nil
}
- ts, err := a.getTipsetByEthBlockNumberOrHash(ctx, blkParam)
+ ts, err := getTipsetByEthBlockNumberOrHash(ctx, a.Chain, blkParam)
if err != nil {
return ethtypes.EthUint64(0), xerrors.Errorf("failed to process block param: %v; %w", blkParam, err)
}
@@ -534,7 +437,7 @@ func (a *EthModule) EthGetCode(ctx context.Context, ethAddr ethtypes.EthAddress,
return nil, xerrors.Errorf("cannot get Filecoin address: %w", err)
}
- ts, err := a.getTipsetByEthBlockNumberOrHash(ctx, blkParam)
+ ts, err := getTipsetByEthBlockNumberOrHash(ctx, a.Chain, blkParam)
if err != nil {
return nil, xerrors.Errorf("failed to process block param: %v; %w", blkParam, err)
}
@@ -613,7 +516,7 @@ func (a *EthModule) EthGetCode(ctx context.Context, ethAddr ethtypes.EthAddress,
}
func (a *EthModule) EthGetStorageAt(ctx context.Context, ethAddr ethtypes.EthAddress, position ethtypes.EthBytes, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) {
- ts, err := a.getTipsetByEthBlockNumberOrHash(ctx, blkParam)
+ ts, err := getTipsetByEthBlockNumberOrHash(ctx, a.Chain, blkParam)
if err != nil {
return nil, xerrors.Errorf("failed to process block param: %v; %w", blkParam, err)
}
@@ -709,7 +612,7 @@ func (a *EthModule) EthGetBalance(ctx context.Context, address ethtypes.EthAddre
return ethtypes.EthBigInt{}, err
}
- ts, err := a.getTipsetByEthBlockNumberOrHash(ctx, blkParam)
+ ts, err := getTipsetByEthBlockNumberOrHash(ctx, a.Chain, blkParam)
if err != nil {
return ethtypes.EthBigInt{}, xerrors.Errorf("failed to process block param: %v; %w", blkParam, err)
}
@@ -790,7 +693,7 @@ func (a *EthModule) EthFeeHistory(ctx context.Context, p jsonrpc.RawParams) (eth
}
}
- ts, err := a.parseBlkParam(ctx, params.NewestBlkNum, false)
+ ts, err := getTipsetByBlockNumber(ctx, a.Chain, params.NewestBlkNum, false)
if err != nil {
return ethtypes.EthFeeHistory{}, fmt.Errorf("bad block parameter %s: %s", params.NewestBlkNum, err)
}
@@ -923,62 +826,145 @@ func (a *EthModule) Web3ClientVersion(ctx context.Context) (string, error) {
return build.UserVersion(), nil
}
-func (a *EthModule) ethCallToFilecoinMessage(ctx context.Context, tx ethtypes.EthCall) (*types.Message, error) {
- var from address.Address
- if tx.From == nil || *tx.From == (ethtypes.EthAddress{}) {
- // Send from the filecoin "system" address.
- var err error
- from, err = (ethtypes.EthAddress{}).ToFilecoinAddress()
- if err != nil {
- return nil, fmt.Errorf("failed to construct the ethereum system address: %w", err)
- }
- } else {
- // The from address must be translatable to an f4 address.
- var err error
- from, err = tx.From.ToFilecoinAddress()
- if err != nil {
- return nil, fmt.Errorf("failed to translate sender address (%s): %w", tx.From.String(), err)
- }
- if p := from.Protocol(); p != address.Delegated {
- return nil, fmt.Errorf("expected a class 4 address, got: %d: %w", p, err)
- }
+func (a *EthModule) EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error) {
+ ts, err := getTipsetByBlockNumber(ctx, a.Chain, blkNum, false)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to get tipset: %w", err)
}
- var params []byte
- if len(tx.Data) > 0 {
- initcode := abi.CborBytes(tx.Data)
- params2, err := actors.SerializeParams(&initcode)
- if err != nil {
- return nil, fmt.Errorf("failed to serialize params: %w", err)
- }
- params = params2
+ _, trace, err := a.StateManager.ExecutionTrace(ctx, ts)
+ if err != nil {
+ return nil, xerrors.Errorf("failed when calling ExecutionTrace: %w", err)
}
- var to address.Address
- var method abi.MethodNum
- if tx.To == nil {
- // this is a contract creation
- to = builtintypes.EthereumAddressManagerActorAddr
- method = builtintypes.MethodsEAM.CreateExternal
- } else {
- addr, err := tx.To.ToFilecoinAddress()
- if err != nil {
- return nil, xerrors.Errorf("cannot get Filecoin address: %w", err)
- }
- to = addr
- method = builtintypes.MethodsEVM.InvokeContract
+ tsParent, err := a.ChainAPI.ChainGetTipSetByHeight(ctx, ts.Height()+1, a.Chain.GetHeaviestTipSet().Key())
+ if err != nil {
+ return nil, xerrors.Errorf("cannot get tipset at height: %v", ts.Height()+1)
}
- return &types.Message{
- From: from,
- To: to,
- Value: big.Int(tx.Value),
- Method: method,
- Params: params,
- GasLimit: build.BlockGasLimit,
- GasFeeCap: big.Zero(),
- GasPremium: big.Zero(),
- }, nil
+ msgs, err := a.ChainGetParentMessages(ctx, tsParent.Blocks()[0].Cid())
+ if err != nil {
+ return nil, xerrors.Errorf("failed to get parent messages: %w", err)
+ }
+
+ cid, err := ts.Key().Cid()
+ if err != nil {
+ return nil, xerrors.Errorf("failed to get tipset key cid: %w", err)
+ }
+
+ blkHash, err := ethtypes.EthHashFromCid(cid)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to parse eth hash from cid: %w", err)
+ }
+
+ allTraces := make([]*ethtypes.EthTraceBlock, 0, len(trace))
+ msgIdx := 0
+ for _, ir := range trace {
+ // ignore messages from system actor
+ if ir.Msg.From == builtinactors.SystemActorAddr {
+ continue
+ }
+
+ // as we include TransactionPosition in the results, lets do sanity checking that the
+ // traces are indeed in the message execution order
+ if ir.Msg.Cid() != msgs[msgIdx].Message.Cid() {
+ return nil, xerrors.Errorf("traces are not in message execution order")
+ }
+ msgIdx++
+
+ txHash, err := a.EthGetTransactionHashByCid(ctx, ir.MsgCid)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to get transaction hash by cid: %w", err)
+ }
+ if txHash == nil {
+ log.Warnf("cannot find transaction hash for cid %s", ir.MsgCid)
+ continue
+ }
+
+ traces := []*ethtypes.EthTrace{}
+ err = buildTraces(ctx, &traces, nil, []int{}, ir.ExecutionTrace, int64(ts.Height()), a.StateAPI)
+ if err != nil {
+ return nil, xerrors.Errorf("failed building traces: %w", err)
+ }
+
+ traceBlocks := make([]*ethtypes.EthTraceBlock, 0, len(traces))
+ for _, trace := range traces {
+ traceBlocks = append(traceBlocks, ðtypes.EthTraceBlock{
+ EthTrace: trace,
+ BlockHash: blkHash,
+ BlockNumber: int64(ts.Height()),
+ TransactionHash: *txHash,
+ TransactionPosition: msgIdx,
+ })
+ }
+
+ allTraces = append(allTraces, traceBlocks...)
+ }
+
+ return allTraces, nil
+}
+
+func (a *EthModule) EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) {
+ if len(traceTypes) != 1 || traceTypes[0] != "trace" {
+ return nil, fmt.Errorf("only 'trace' is supported")
+ }
+
+ ts, err := getTipsetByBlockNumber(ctx, a.Chain, blkNum, false)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to get tipset: %w", err)
+ }
+
+ _, trace, err := a.StateManager.ExecutionTrace(ctx, ts)
+ if err != nil {
+ return nil, xerrors.Errorf("failed when calling ExecutionTrace: %w", err)
+ }
+
+ allTraces := make([]*ethtypes.EthTraceReplayBlockTransaction, 0, len(trace))
+ for _, ir := range trace {
+ // ignore messages from system actor
+ if ir.Msg.From == builtinactors.SystemActorAddr {
+ continue
+ }
+
+ txHash, err := a.EthGetTransactionHashByCid(ctx, ir.MsgCid)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to get transaction hash by cid: %w", err)
+ }
+ if txHash == nil {
+ log.Warnf("cannot find transaction hash for cid %s", ir.MsgCid)
+ continue
+ }
+
+ var output ethtypes.EthBytes
+ invokeCreateOnEAM := ir.Msg.To == builtin.EthereumAddressManagerActorAddr && (ir.Msg.Method == builtin.MethodsEAM.Create || ir.Msg.Method == builtin.MethodsEAM.Create2)
+ if ir.Msg.Method == builtin.MethodsEVM.InvokeContract || invokeCreateOnEAM {
+ output, err = decodePayload(ir.ExecutionTrace.MsgRct.Return, ir.ExecutionTrace.MsgRct.ReturnCodec)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to decode payload: %w", err)
+ }
+ } else {
+ output, err = handleFilecoinMethodOutput(ir.ExecutionTrace.MsgRct.ExitCode, ir.ExecutionTrace.MsgRct.ReturnCodec, ir.ExecutionTrace.MsgRct.Return)
+ if err != nil {
+ return nil, xerrors.Errorf("could not convert output: %w", err)
+ }
+ }
+
+ t := ethtypes.EthTraceReplayBlockTransaction{
+ Output: output,
+ TransactionHash: *txHash,
+ StateDiff: nil,
+ VmTrace: nil,
+ }
+
+ err = buildTraces(ctx, &t.Trace, nil, []int{}, ir.ExecutionTrace, int64(ts.Height()), a.StateAPI)
+ if err != nil {
+ return nil, xerrors.Errorf("failed building traces: %w", err)
+ }
+
+ allTraces = append(allTraces, &t)
+ }
+
+ return allTraces, nil
}
func (a *EthModule) applyMessage(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (res *api.InvocResult, err error) {
@@ -1014,7 +1000,7 @@ func (a *EthModule) applyMessage(ctx context.Context, msg *types.Message, tsk ty
}
func (a *EthModule) EthEstimateGas(ctx context.Context, tx ethtypes.EthCall) (ethtypes.EthUint64, error) {
- msg, err := a.ethCallToFilecoinMessage(ctx, tx)
+ msg, err := ethCallToFilecoinMessage(ctx, tx)
if err != nil {
return ethtypes.EthUint64(0), err
}
@@ -1172,12 +1158,12 @@ func ethGasSearch(
}
func (a *EthModule) EthCall(ctx context.Context, tx ethtypes.EthCall, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) {
- msg, err := a.ethCallToFilecoinMessage(ctx, tx)
+ msg, err := ethCallToFilecoinMessage(ctx, tx)
if err != nil {
return nil, xerrors.Errorf("failed to convert ethcall to filecoin message: %w", err)
}
- ts, err := a.getTipsetByEthBlockNumberOrHash(ctx, blkParam)
+ ts, err := getTipsetByEthBlockNumberOrHash(ctx, a.Chain, blkParam)
if err != nil {
return nil, xerrors.Errorf("failed to process block param: %v; %w", blkParam, err)
}
@@ -1578,983 +1564,6 @@ func (e *EthEvent) GC(ctx context.Context, ttl time.Duration) {
}
}
-type filterEventCollector interface {
- TakeCollectedEvents(context.Context) []*filter.CollectedEvent
-}
-
-type filterMessageCollector interface {
- TakeCollectedMessages(context.Context) []*types.SignedMessage
-}
-
-type filterTipSetCollector interface {
- TakeCollectedTipSets(context.Context) []types.TipSetKey
-}
-
-func ethLogFromEvent(entries []types.EventEntry) (data []byte, topics []ethtypes.EthHash, ok bool) {
- var (
- topicsFound [4]bool
- topicsFoundCount int
- dataFound bool
- )
- // Topics must be non-nil, even if empty. So we might as well pre-allocate for 4 (the max).
- topics = make([]ethtypes.EthHash, 0, 4)
- for _, entry := range entries {
- // Drop events with non-raw topics to avoid mistakes.
- if entry.Codec != cid.Raw {
- log.Warnw("did not expect an event entry with a non-raw codec", "codec", entry.Codec, "key", entry.Key)
- return nil, nil, false
- }
- // Check if the key is t1..t4
- if len(entry.Key) == 2 && "t1" <= entry.Key && entry.Key <= "t4" {
- // '1' - '1' == 0, etc.
- idx := int(entry.Key[1] - '1')
-
- // Drop events with mis-sized topics.
- if len(entry.Value) != 32 {
- log.Warnw("got an EVM event topic with an invalid size", "key", entry.Key, "size", len(entry.Value))
- return nil, nil, false
- }
-
- // Drop events with duplicate topics.
- if topicsFound[idx] {
- log.Warnw("got a duplicate EVM event topic", "key", entry.Key)
- return nil, nil, false
- }
- topicsFound[idx] = true
- topicsFoundCount++
-
- // Extend the topics array
- for len(topics) <= idx {
- topics = append(topics, ethtypes.EthHash{})
- }
- copy(topics[idx][:], entry.Value)
- } else if entry.Key == "d" {
- // Drop events with duplicate data fields.
- if dataFound {
- log.Warnw("got duplicate EVM event data")
- return nil, nil, false
- }
-
- dataFound = true
- data = entry.Value
- } else {
- // Skip entries we don't understand (makes it easier to extend things).
- // But we warn for now because we don't expect them.
- log.Warnw("unexpected event entry", "key", entry.Key)
- }
-
- }
-
- // Drop events with skipped topics.
- if len(topics) != topicsFoundCount {
- log.Warnw("EVM event topic length mismatch", "expected", len(topics), "actual", topicsFoundCount)
- return nil, nil, false
- }
- return data, topics, true
-}
-
-func ethFilterResultFromEvents(evs []*filter.CollectedEvent, sa StateAPI) (*ethtypes.EthFilterResult, error) {
- res := ðtypes.EthFilterResult{}
- for _, ev := range evs {
- log := ethtypes.EthLog{
- Removed: ev.Reverted,
- LogIndex: ethtypes.EthUint64(ev.EventIdx),
- TransactionIndex: ethtypes.EthUint64(ev.MsgIdx),
- BlockNumber: ethtypes.EthUint64(ev.Height),
- }
- var (
- err error
- ok bool
- )
-
- log.Data, log.Topics, ok = ethLogFromEvent(ev.Entries)
- if !ok {
- continue
- }
-
- log.Address, err = ethtypes.EthAddressFromFilecoinAddress(ev.EmitterAddr)
- if err != nil {
- return nil, err
- }
-
- log.TransactionHash, err = EthTxHashFromMessageCid(context.TODO(), ev.MsgCid, sa)
- if err != nil {
- return nil, err
- }
- c, err := ev.TipSetKey.Cid()
- if err != nil {
- return nil, err
- }
- log.BlockHash, err = ethtypes.EthHashFromCid(c)
- if err != nil {
- return nil, err
- }
-
- res.Results = append(res.Results, log)
- }
-
- return res, nil
-}
-
-func ethFilterResultFromTipSets(tsks []types.TipSetKey) (*ethtypes.EthFilterResult, error) {
- res := ðtypes.EthFilterResult{}
-
- for _, tsk := range tsks {
- c, err := tsk.Cid()
- if err != nil {
- return nil, err
- }
- hash, err := ethtypes.EthHashFromCid(c)
- if err != nil {
- return nil, err
- }
-
- res.Results = append(res.Results, hash)
- }
-
- return res, nil
-}
-
-func ethFilterResultFromMessages(cs []*types.SignedMessage, sa StateAPI) (*ethtypes.EthFilterResult, error) {
- res := ðtypes.EthFilterResult{}
-
- for _, c := range cs {
- hash, err := EthTxHashFromSignedMessage(context.TODO(), c, sa)
- if err != nil {
- return nil, err
- }
-
- res.Results = append(res.Results, hash)
- }
-
- return res, nil
-}
-
-type EthSubscriptionManager struct {
- Chain *store.ChainStore
- StateAPI StateAPI
- ChainAPI ChainAPI
- mu sync.Mutex
- subs map[ethtypes.EthSubscriptionID]*ethSubscription
-}
-
-func (e *EthSubscriptionManager) StartSubscription(ctx context.Context, out ethSubscriptionCallback, dropFilter func(context.Context, filter.Filter) error) (*ethSubscription, error) { // nolint
- rawid, err := uuid.NewRandom()
- if err != nil {
- return nil, xerrors.Errorf("new uuid: %w", err)
- }
- id := ethtypes.EthSubscriptionID{}
- copy(id[:], rawid[:]) // uuid is 16 bytes
-
- ctx, quit := context.WithCancel(ctx)
-
- sub := ðSubscription{
- Chain: e.Chain,
- StateAPI: e.StateAPI,
- ChainAPI: e.ChainAPI,
- uninstallFilter: dropFilter,
- id: id,
- in: make(chan interface{}, 200),
- out: out,
- quit: quit,
-
- toSend: queue.New[[]byte](),
- sendCond: make(chan struct{}, 1),
- }
-
- e.mu.Lock()
- if e.subs == nil {
- e.subs = make(map[ethtypes.EthSubscriptionID]*ethSubscription)
- }
- e.subs[sub.id] = sub
- e.mu.Unlock()
-
- go sub.start(ctx)
- go sub.startOut(ctx)
-
- return sub, nil
-}
-
-func (e *EthSubscriptionManager) StopSubscription(ctx context.Context, id ethtypes.EthSubscriptionID) error {
- e.mu.Lock()
- defer e.mu.Unlock()
-
- sub, ok := e.subs[id]
- if !ok {
- return xerrors.Errorf("subscription not found")
- }
- sub.stop()
- delete(e.subs, id)
-
- return nil
-}
-
-type ethSubscriptionCallback func(context.Context, jsonrpc.RawParams) error
-
-const maxSendQueue = 20000
-
-type ethSubscription struct {
- Chain *store.ChainStore
- StateAPI StateAPI
- ChainAPI ChainAPI
- uninstallFilter func(context.Context, filter.Filter) error
- id ethtypes.EthSubscriptionID
- in chan interface{}
- out ethSubscriptionCallback
-
- mu sync.Mutex
- filters []filter.Filter
- quit func()
-
- sendLk sync.Mutex
- sendQueueLen int
- toSend *queue.Queue[[]byte]
- sendCond chan struct{}
-}
-
-func (e *ethSubscription) addFilter(ctx context.Context, f filter.Filter) {
- e.mu.Lock()
- defer e.mu.Unlock()
-
- f.SetSubChannel(e.in)
- e.filters = append(e.filters, f)
-}
-
-// sendOut processes the final subscription queue. It's here in case the subscriber
-// is slow, and we need to buffer the messages.
-func (e *ethSubscription) startOut(ctx context.Context) {
- for {
- select {
- case <-ctx.Done():
- return
- case <-e.sendCond:
- e.sendLk.Lock()
-
- for !e.toSend.Empty() {
- front := e.toSend.Dequeue()
- e.sendQueueLen--
-
- e.sendLk.Unlock()
-
- if err := e.out(ctx, front); err != nil {
- log.Warnw("error sending subscription response, killing subscription", "sub", e.id, "error", err)
- e.stop()
- return
- }
-
- e.sendLk.Lock()
- }
-
- e.sendLk.Unlock()
- }
- }
-}
-
-func (e *ethSubscription) send(ctx context.Context, v interface{}) {
- resp := ethtypes.EthSubscriptionResponse{
- SubscriptionID: e.id,
- Result: v,
- }
-
- outParam, err := json.Marshal(resp)
- if err != nil {
- log.Warnw("marshaling subscription response", "sub", e.id, "error", err)
- return
- }
-
- e.sendLk.Lock()
- defer e.sendLk.Unlock()
-
- e.toSend.Enqueue(outParam)
-
- e.sendQueueLen++
- if e.sendQueueLen > maxSendQueue {
- log.Warnw("subscription send queue full, killing subscription", "sub", e.id)
- e.stop()
- return
- }
-
- select {
- case e.sendCond <- struct{}{}:
- default: // already signalled, and we're holding the lock so we know that the event will be processed
- }
-}
-
-func (e *ethSubscription) start(ctx context.Context) {
- for {
- select {
- case <-ctx.Done():
- return
- case v := <-e.in:
- switch vt := v.(type) {
- case *filter.CollectedEvent:
- evs, err := ethFilterResultFromEvents([]*filter.CollectedEvent{vt}, e.StateAPI)
- if err != nil {
- continue
- }
-
- for _, r := range evs.Results {
- e.send(ctx, r)
- }
- case *types.TipSet:
- ev, err := newEthBlockFromFilecoinTipSet(ctx, vt, true, e.Chain, e.StateAPI)
- if err != nil {
- break
- }
-
- e.send(ctx, ev)
- case *types.SignedMessage: // mpool txid
- evs, err := ethFilterResultFromMessages([]*types.SignedMessage{vt}, e.StateAPI)
- if err != nil {
- continue
- }
-
- for _, r := range evs.Results {
- e.send(ctx, r)
- }
- default:
- log.Warnf("unexpected subscription value type: %T", vt)
- }
- }
- }
-}
-
-func (e *ethSubscription) stop() {
- e.mu.Lock()
- if e.quit == nil {
- e.mu.Unlock()
- return
- }
-
- if e.quit != nil {
- e.quit()
- e.quit = nil
- e.mu.Unlock()
-
- for _, f := range e.filters {
- // note: the context in actually unused in uninstallFilter
- if err := e.uninstallFilter(context.TODO(), f); err != nil {
- // this will leave the filter a zombie, collecting events up to the maximum allowed
- log.Warnf("failed to remove filter when unsubscribing: %v", err)
- }
- }
- }
-}
-
-func newEthBlockFromFilecoinTipSet(ctx context.Context, ts *types.TipSet, fullTxInfo bool, cs *store.ChainStore, sa StateAPI) (ethtypes.EthBlock, error) {
- parentKeyCid, err := ts.Parents().Cid()
- if err != nil {
- return ethtypes.EthBlock{}, err
- }
- parentBlkHash, err := ethtypes.EthHashFromCid(parentKeyCid)
- if err != nil {
- return ethtypes.EthBlock{}, err
- }
-
- bn := ethtypes.EthUint64(ts.Height())
-
- blkCid, err := ts.Key().Cid()
- if err != nil {
- return ethtypes.EthBlock{}, err
- }
- blkHash, err := ethtypes.EthHashFromCid(blkCid)
- if err != nil {
- return ethtypes.EthBlock{}, err
- }
-
- msgs, rcpts, err := messagesAndReceipts(ctx, ts, cs, sa)
- if err != nil {
- return ethtypes.EthBlock{}, xerrors.Errorf("failed to retrieve messages and receipts: %w", err)
- }
-
- block := ethtypes.NewEthBlock(len(msgs) > 0)
-
- gasUsed := int64(0)
- for i, msg := range msgs {
- rcpt := rcpts[i]
- ti := ethtypes.EthUint64(i)
- gasUsed += rcpt.GasUsed
- var smsg *types.SignedMessage
- switch msg := msg.(type) {
- case *types.SignedMessage:
- smsg = msg
- case *types.Message:
- smsg = &types.SignedMessage{
- Message: *msg,
- Signature: crypto.Signature{
- Type: crypto.SigTypeBLS,
- },
- }
- default:
- return ethtypes.EthBlock{}, xerrors.Errorf("failed to get signed msg %s: %w", msg.Cid(), err)
- }
- tx, err := newEthTxFromSignedMessage(ctx, smsg, sa)
- if err != nil {
- return ethtypes.EthBlock{}, xerrors.Errorf("failed to convert msg to ethTx: %w", err)
- }
-
- tx.ChainID = ethtypes.EthUint64(build.Eip155ChainId)
- tx.BlockHash = &blkHash
- tx.BlockNumber = &bn
- tx.TransactionIndex = &ti
-
- if fullTxInfo {
- block.Transactions = append(block.Transactions, tx)
- } else {
- block.Transactions = append(block.Transactions, tx.Hash.String())
- }
- }
-
- block.Hash = blkHash
- block.Number = bn
- block.ParentHash = parentBlkHash
- block.Timestamp = ethtypes.EthUint64(ts.Blocks()[0].Timestamp)
- block.BaseFeePerGas = ethtypes.EthBigInt{Int: ts.Blocks()[0].ParentBaseFee.Int}
- block.GasUsed = ethtypes.EthUint64(gasUsed)
- return block, nil
-}
-
-func messagesAndReceipts(ctx context.Context, ts *types.TipSet, cs *store.ChainStore, sa StateAPI) ([]types.ChainMsg, []types.MessageReceipt, error) {
- msgs, err := cs.MessagesForTipset(ctx, ts)
- if err != nil {
- return nil, nil, xerrors.Errorf("error loading messages for tipset: %v: %w", ts, err)
- }
-
- _, rcptRoot, err := sa.StateManager.TipSetState(ctx, ts)
- if err != nil {
- return nil, nil, xerrors.Errorf("failed to compute state: %w", err)
- }
-
- rcpts, err := cs.ReadReceipts(ctx, rcptRoot)
- if err != nil {
- return nil, nil, xerrors.Errorf("error loading receipts for tipset: %v: %w", ts, err)
- }
-
- if len(msgs) != len(rcpts) {
- return nil, nil, xerrors.Errorf("receipts and message array lengths didn't match for tipset: %v: %w", ts, err)
- }
-
- return msgs, rcpts, nil
-}
-
-// lookupEthAddress makes its best effort at finding the Ethereum address for a
-// Filecoin address. It does the following:
-//
-// 1. If the supplied address is an f410 address, we return its payload as the EthAddress.
-// 2. Otherwise (f0, f1, f2, f3), we look up the actor on the state tree. If it has a delegated address, we return it if it's f410 address.
-// 3. Otherwise, we fall back to returning a masked ID Ethereum address. If the supplied address is an f0 address, we
-// use that ID to form the masked ID address.
-// 4. Otherwise, we fetch the actor's ID from the state tree and form the masked ID with it.
-func lookupEthAddress(ctx context.Context, addr address.Address, sa StateAPI) (ethtypes.EthAddress, error) {
- // BLOCK A: We are trying to get an actual Ethereum address from an f410 address.
- // Attempt to convert directly, if it's an f4 address.
- ethAddr, err := ethtypes.EthAddressFromFilecoinAddress(addr)
- if err == nil && !ethAddr.IsMaskedID() {
- return ethAddr, nil
- }
-
- // Lookup on the target actor and try to get an f410 address.
- if actor, err := sa.StateGetActor(ctx, addr, types.EmptyTSK); err != nil {
- return ethtypes.EthAddress{}, err
- } else if actor.Address != nil {
- if ethAddr, err := ethtypes.EthAddressFromFilecoinAddress(*actor.Address); err == nil && !ethAddr.IsMaskedID() {
- return ethAddr, nil
- }
- }
-
- // BLOCK B: We gave up on getting an actual Ethereum address and are falling back to a Masked ID address.
- // Check if we already have an ID addr, and use it if possible.
- if err == nil && ethAddr.IsMaskedID() {
- return ethAddr, nil
- }
-
- // Otherwise, resolve the ID addr.
- idAddr, err := sa.StateLookupID(ctx, addr, types.EmptyTSK)
- if err != nil {
- return ethtypes.EthAddress{}, err
- }
- return ethtypes.EthAddressFromFilecoinAddress(idAddr)
-}
-
-func EthTxHashFromMessageCid(ctx context.Context, c cid.Cid, sa StateAPI) (ethtypes.EthHash, error) {
- smsg, err := sa.Chain.GetSignedMessage(ctx, c)
- if err == nil {
- // This is an Eth Tx, Secp message, Or BLS message in the mpool
- return EthTxHashFromSignedMessage(ctx, smsg, sa)
- }
-
- _, err = sa.Chain.GetMessage(ctx, c)
- if err == nil {
- // This is a BLS message
- return ethtypes.EthHashFromCid(c)
- }
-
- return ethtypes.EmptyEthHash, nil
-}
-
-func EthTxHashFromSignedMessage(ctx context.Context, smsg *types.SignedMessage, sa StateAPI) (ethtypes.EthHash, error) {
- if smsg.Signature.Type == crypto.SigTypeDelegated {
- ethTx, err := newEthTxFromSignedMessage(ctx, smsg, sa)
- if err != nil {
- return ethtypes.EmptyEthHash, err
- }
- return ethTx.Hash, nil
- } else if smsg.Signature.Type == crypto.SigTypeSecp256k1 {
- return ethtypes.EthHashFromCid(smsg.Cid())
- } else { // BLS message
- return ethtypes.EthHashFromCid(smsg.Message.Cid())
- }
-}
-
-func newEthTxFromSignedMessage(ctx context.Context, smsg *types.SignedMessage, sa StateAPI) (ethtypes.EthTx, error) {
- var tx ethtypes.EthTx
- var err error
-
- // This is an eth tx
- if smsg.Signature.Type == crypto.SigTypeDelegated {
- tx, err = ethtypes.EthTxFromSignedEthMessage(smsg)
- if err != nil {
- return ethtypes.EthTx{}, xerrors.Errorf("failed to convert from signed message: %w", err)
- }
-
- tx.Hash, err = tx.TxHash()
- if err != nil {
- return ethtypes.EthTx{}, xerrors.Errorf("failed to calculate hash for ethTx: %w", err)
- }
-
- fromAddr, err := lookupEthAddress(ctx, smsg.Message.From, sa)
- if err != nil {
- return ethtypes.EthTx{}, xerrors.Errorf("failed to resolve Ethereum address: %w", err)
- }
-
- tx.From = fromAddr
- } else if smsg.Signature.Type == crypto.SigTypeSecp256k1 { // Secp Filecoin Message
- tx = ethTxFromNativeMessage(ctx, smsg.VMMessage(), sa)
- tx.Hash, err = ethtypes.EthHashFromCid(smsg.Cid())
- if err != nil {
- return tx, err
- }
- } else { // BLS Filecoin message
- tx = ethTxFromNativeMessage(ctx, smsg.VMMessage(), sa)
- tx.Hash, err = ethtypes.EthHashFromCid(smsg.Message.Cid())
- if err != nil {
- return tx, err
- }
- }
-
- return tx, nil
-}
-
-// ethTxFromNativeMessage does NOT populate:
-// - BlockHash
-// - BlockNumber
-// - TransactionIndex
-// - Hash
-func ethTxFromNativeMessage(ctx context.Context, msg *types.Message, sa StateAPI) ethtypes.EthTx {
- // We don't care if we error here, conversion is best effort for non-eth transactions
- from, _ := lookupEthAddress(ctx, msg.From, sa)
- to, _ := lookupEthAddress(ctx, msg.To, sa)
- return ethtypes.EthTx{
- To: &to,
- From: from,
- Nonce: ethtypes.EthUint64(msg.Nonce),
- ChainID: ethtypes.EthUint64(build.Eip155ChainId),
- Value: ethtypes.EthBigInt(msg.Value),
- Type: ethtypes.Eip1559TxType,
- Gas: ethtypes.EthUint64(msg.GasLimit),
- MaxFeePerGas: ethtypes.EthBigInt(msg.GasFeeCap),
- MaxPriorityFeePerGas: ethtypes.EthBigInt(msg.GasPremium),
- AccessList: []ethtypes.EthHash{},
- }
-}
-
-// newEthTxFromMessageLookup creates an ethereum transaction from filecoin message lookup. If a negative txIdx is passed
-// into the function, it looks up the transaction index of the message in the tipset, otherwise it uses the txIdx passed into the
-// function
-func newEthTxFromMessageLookup(ctx context.Context, msgLookup *api.MsgLookup, txIdx int, cs *store.ChainStore, sa StateAPI) (ethtypes.EthTx, error) {
- ts, err := cs.LoadTipSet(ctx, msgLookup.TipSet)
- if err != nil {
- return ethtypes.EthTx{}, err
- }
-
- // This tx is located in the parent tipset
- parentTs, err := cs.LoadTipSet(ctx, ts.Parents())
- if err != nil {
- return ethtypes.EthTx{}, err
- }
-
- parentTsCid, err := parentTs.Key().Cid()
- if err != nil {
- return ethtypes.EthTx{}, err
- }
-
- // lookup the transactionIndex
- if txIdx < 0 {
- msgs, err := cs.MessagesForTipset(ctx, parentTs)
- if err != nil {
- return ethtypes.EthTx{}, err
- }
- for i, msg := range msgs {
- if msg.Cid() == msgLookup.Message {
- txIdx = i
- break
- }
- }
- if txIdx < 0 {
- return ethtypes.EthTx{}, fmt.Errorf("cannot find the msg in the tipset")
- }
- }
-
- blkHash, err := ethtypes.EthHashFromCid(parentTsCid)
- if err != nil {
- return ethtypes.EthTx{}, err
- }
-
- smsg, err := getSignedMessage(ctx, cs, msgLookup.Message)
- if err != nil {
- return ethtypes.EthTx{}, xerrors.Errorf("failed to get signed msg: %w", err)
- }
-
- tx, err := newEthTxFromSignedMessage(ctx, smsg, sa)
- if err != nil {
- return ethtypes.EthTx{}, err
- }
-
- var (
- bn = ethtypes.EthUint64(parentTs.Height())
- ti = ethtypes.EthUint64(txIdx)
- )
-
- tx.ChainID = ethtypes.EthUint64(build.Eip155ChainId)
- tx.BlockHash = &blkHash
- tx.BlockNumber = &bn
- tx.TransactionIndex = &ti
- return tx, nil
-}
-
-func newEthTxReceipt(ctx context.Context, tx ethtypes.EthTx, lookup *api.MsgLookup, events []types.Event, cs *store.ChainStore, sa StateAPI) (api.EthTxReceipt, error) {
- var (
- transactionIndex ethtypes.EthUint64
- blockHash ethtypes.EthHash
- blockNumber ethtypes.EthUint64
- )
-
- if tx.TransactionIndex != nil {
- transactionIndex = *tx.TransactionIndex
- }
- if tx.BlockHash != nil {
- blockHash = *tx.BlockHash
- }
- if tx.BlockNumber != nil {
- blockNumber = *tx.BlockNumber
- }
-
- receipt := api.EthTxReceipt{
- TransactionHash: tx.Hash,
- From: tx.From,
- To: tx.To,
- TransactionIndex: transactionIndex,
- BlockHash: blockHash,
- BlockNumber: blockNumber,
- Type: ethtypes.EthUint64(2),
- Logs: []ethtypes.EthLog{}, // empty log array is compulsory when no logs, or libraries like ethers.js break
- LogsBloom: ethtypes.EmptyEthBloom[:],
- }
-
- if lookup.Receipt.ExitCode.IsSuccess() {
- receipt.Status = 1
- } else {
- receipt.Status = 0
- }
-
- receipt.GasUsed = ethtypes.EthUint64(lookup.Receipt.GasUsed)
-
- // TODO: handle CumulativeGasUsed
- receipt.CumulativeGasUsed = ethtypes.EmptyEthInt
-
- // TODO: avoid loading the tipset twice (once here, once when we convert the message to a txn)
- ts, err := cs.GetTipSetFromKey(ctx, lookup.TipSet)
- if err != nil {
- return api.EthTxReceipt{}, xerrors.Errorf("failed to lookup tipset %s when constructing the eth txn receipt: %w", lookup.TipSet, err)
- }
-
- // The tx is located in the parent tipset
- parentTs, err := cs.LoadTipSet(ctx, ts.Parents())
- if err != nil {
- return api.EthTxReceipt{}, xerrors.Errorf("failed to lookup tipset %s when constructing the eth txn receipt: %w", ts.Parents(), err)
- }
-
- baseFee := parentTs.Blocks()[0].ParentBaseFee
- gasOutputs := vm.ComputeGasOutputs(lookup.Receipt.GasUsed, int64(tx.Gas), baseFee, big.Int(tx.MaxFeePerGas), big.Int(tx.MaxPriorityFeePerGas), true)
- totalSpent := big.Sum(gasOutputs.BaseFeeBurn, gasOutputs.MinerTip, gasOutputs.OverEstimationBurn)
-
- effectiveGasPrice := big.Zero()
- if lookup.Receipt.GasUsed > 0 {
- effectiveGasPrice = big.Div(totalSpent, big.NewInt(lookup.Receipt.GasUsed))
- }
- receipt.EffectiveGasPrice = ethtypes.EthBigInt(effectiveGasPrice)
-
- if receipt.To == nil && lookup.Receipt.ExitCode.IsSuccess() {
- // Create and Create2 return the same things.
- var ret eam.CreateExternalReturn
- if err := ret.UnmarshalCBOR(bytes.NewReader(lookup.Receipt.Return)); err != nil {
- return api.EthTxReceipt{}, xerrors.Errorf("failed to parse contract creation result: %w", err)
- }
- addr := ethtypes.EthAddress(ret.EthAddress)
- receipt.ContractAddress = &addr
- }
-
- if len(events) > 0 {
- receipt.Logs = make([]ethtypes.EthLog, 0, len(events))
- for i, evt := range events {
- l := ethtypes.EthLog{
- Removed: false,
- LogIndex: ethtypes.EthUint64(i),
- TransactionHash: tx.Hash,
- TransactionIndex: transactionIndex,
- BlockHash: blockHash,
- BlockNumber: blockNumber,
- }
-
- data, topics, ok := ethLogFromEvent(evt.Entries)
- if !ok {
- // not an eth event.
- continue
- }
- for _, topic := range topics {
- ethtypes.EthBloomSet(receipt.LogsBloom, topic[:])
- }
- l.Data = data
- l.Topics = topics
-
- addr, err := address.NewIDAddress(uint64(evt.Emitter))
- if err != nil {
- return api.EthTxReceipt{}, xerrors.Errorf("failed to create ID address: %w", err)
- }
-
- l.Address, err = lookupEthAddress(ctx, addr, sa)
- if err != nil {
- return api.EthTxReceipt{}, xerrors.Errorf("failed to resolve Ethereum address: %w", err)
- }
-
- ethtypes.EthBloomSet(receipt.LogsBloom, l.Address[:])
- receipt.Logs = append(receipt.Logs, l)
- }
- }
-
- return receipt, nil
-}
-
-func (m *EthTxHashManager) Apply(ctx context.Context, from, to *types.TipSet) error {
- for _, blk := range to.Blocks() {
- _, smsgs, err := m.StateAPI.Chain.MessagesForBlock(ctx, blk)
- if err != nil {
- return err
- }
-
- for _, smsg := range smsgs {
- if smsg.Signature.Type != crypto.SigTypeDelegated {
- continue
- }
-
- hash, err := EthTxHashFromSignedMessage(ctx, smsg, m.StateAPI)
- if err != nil {
- return err
- }
-
- err = m.TransactionHashLookup.UpsertHash(hash, smsg.Cid())
- if err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-type EthTxHashManager struct {
- StateAPI StateAPI
- TransactionHashLookup *ethhashlookup.EthTxHashLookup
-}
-
-func (m *EthTxHashManager) Revert(ctx context.Context, from, to *types.TipSet) error {
- return nil
-}
-
-func (m *EthTxHashManager) PopulateExistingMappings(ctx context.Context, minHeight abi.ChainEpoch) error {
- if minHeight < build.UpgradeHyggeHeight {
- minHeight = build.UpgradeHyggeHeight
- }
-
- ts := m.StateAPI.Chain.GetHeaviestTipSet()
- for ts.Height() > minHeight {
- for _, block := range ts.Blocks() {
- msgs, err := m.StateAPI.Chain.SecpkMessagesForBlock(ctx, block)
- if err != nil {
- // If we can't find the messages, we've either imported from snapshot or pruned the store
- log.Debug("exiting message mapping population at epoch ", ts.Height())
- return nil
- }
-
- for _, msg := range msgs {
- m.ProcessSignedMessage(ctx, msg)
- }
- }
-
- var err error
- ts, err = m.StateAPI.Chain.GetTipSetFromKey(ctx, ts.Parents())
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (m *EthTxHashManager) ProcessSignedMessage(ctx context.Context, msg *types.SignedMessage) {
- if msg.Signature.Type != crypto.SigTypeDelegated {
- return
- }
-
- ethTx, err := newEthTxFromSignedMessage(ctx, msg, m.StateAPI)
- if err != nil {
- log.Errorf("error converting filecoin message to eth tx: %s", err)
- return
- }
-
- err = m.TransactionHashLookup.UpsertHash(ethTx.Hash, msg.Cid())
- if err != nil {
- log.Errorf("error inserting tx mapping to db: %s", err)
- return
- }
-}
-
-func WaitForMpoolUpdates(ctx context.Context, ch <-chan api.MpoolUpdate, manager *EthTxHashManager) {
- for {
- select {
- case <-ctx.Done():
- return
- case u := <-ch:
- if u.Type != api.MpoolAdd {
- continue
- }
-
- manager.ProcessSignedMessage(ctx, u.Message)
- }
- }
-}
-
-func EthTxHashGC(ctx context.Context, retentionDays int, manager *EthTxHashManager) {
- if retentionDays == 0 {
- return
- }
-
- gcPeriod := 1 * time.Hour
- for {
- entriesDeleted, err := manager.TransactionHashLookup.DeleteEntriesOlderThan(retentionDays)
- if err != nil {
- log.Errorf("error garbage collecting eth transaction hash database: %s", err)
- }
- log.Info("garbage collection run on eth transaction hash lookup database. %d entries deleted", entriesDeleted)
- time.Sleep(gcPeriod)
- }
-}
-
-func parseEthTopics(topics ethtypes.EthTopicSpec) (map[string][][]byte, error) {
- keys := map[string][][]byte{}
- for idx, vals := range topics {
- if len(vals) == 0 {
- continue
- }
- // Ethereum topics are emitted using `LOG{0..4}` opcodes resulting in topics1..4
- key := fmt.Sprintf("t%d", idx+1)
- for _, v := range vals {
- v := v // copy the ethhash to avoid repeatedly referencing the same one.
- keys[key] = append(keys[key], v[:])
- }
- }
- return keys, nil
-}
-
-const errorFunctionSelector = "\x08\xc3\x79\xa0" // Error(string)
-const panicFunctionSelector = "\x4e\x48\x7b\x71" // Panic(uint256)
-// Eth ABI (solidity) panic codes.
-var panicErrorCodes map[uint64]string = map[uint64]string{
- 0x00: "Panic()",
- 0x01: "Assert()",
- 0x11: "ArithmeticOverflow()",
- 0x12: "DivideByZero()",
- 0x21: "InvalidEnumVariant()",
- 0x22: "InvalidStorageArray()",
- 0x31: "PopEmptyArray()",
- 0x32: "ArrayIndexOutOfBounds()",
- 0x41: "OutOfMemory()",
- 0x51: "CalledUninitializedFunction()",
-}
-
-// Parse an ABI encoded revert reason. This reason should be encoded as if it were the parameters to
-// an `Error(string)` function call.
-//
-// See https://docs.soliditylang.org/en/latest/control-structures.html#panic-via-assert-and-error-via-require
-func parseEthRevert(ret []byte) string {
- if len(ret) == 0 {
- return "none"
- }
- var cbytes abi.CborBytes
- if err := cbytes.UnmarshalCBOR(bytes.NewReader(ret)); err != nil {
- return "ERROR: revert reason is not cbor encoded bytes"
- }
- if len(cbytes) == 0 {
- return "none"
- }
- // If it's not long enough to contain an ABI encoded response, return immediately.
- if len(cbytes) < 4+32 {
- return ethtypes.EthBytes(cbytes).String()
- }
- switch string(cbytes[:4]) {
- case panicFunctionSelector:
- cbytes := cbytes[4 : 4+32]
- // Read the and check the code.
- code, err := ethtypes.EthUint64FromBytes(cbytes)
- if err != nil {
- // If it's too big, just return the raw value.
- codeInt := big.PositiveFromUnsignedBytes(cbytes)
- return fmt.Sprintf("Panic(%s)", ethtypes.EthBigInt(codeInt).String())
- }
- if s, ok := panicErrorCodes[uint64(code)]; ok {
- return s
- }
- return fmt.Sprintf("Panic(0x%x)", code)
- case errorFunctionSelector:
- cbytes := cbytes[4:]
- cbytesLen := ethtypes.EthUint64(len(cbytes))
- // Read the and check the offset.
- offset, err := ethtypes.EthUint64FromBytes(cbytes[:32])
- if err != nil {
- break
- }
- if cbytesLen < offset {
- break
- }
-
- // Read and check the length.
- if cbytesLen-offset < 32 {
- break
- }
- start := offset + 32
- length, err := ethtypes.EthUint64FromBytes(cbytes[offset : offset+32])
- if err != nil {
- break
- }
- if cbytesLen-start < length {
- break
- }
- // Slice the error message.
- return fmt.Sprintf("Error(%s)", cbytes[start:start+length])
- }
- return ethtypes.EthBytes(cbytes).String()
-}
-
func calculateRewardsAndGasUsed(rewardPercentiles []float64, txGasRewards gasRewardSorter) ([]ethtypes.EthBigInt, int64) {
var gasUsedTotal int64
for _, tx := range txGasRewards {
@@ -2586,25 +1595,6 @@ func calculateRewardsAndGasUsed(rewardPercentiles []float64, txGasRewards gasRew
return rewards, gasUsedTotal
}
-func getSignedMessage(ctx context.Context, cs *store.ChainStore, msgCid cid.Cid) (*types.SignedMessage, error) {
- smsg, err := cs.GetSignedMessage(ctx, msgCid)
- if err != nil {
- // We couldn't find the signed message, it might be a BLS message, so search for a regular message.
- msg, err := cs.GetMessage(ctx, msgCid)
- if err != nil {
- return nil, xerrors.Errorf("failed to find msg %s: %w", msgCid, err)
- }
- smsg = &types.SignedMessage{
- Message: *msg,
- Signature: crypto.Signature{
- Type: crypto.SigTypeBLS,
- },
- }
- }
-
- return smsg, nil
-}
-
type gasRewardTuple struct {
gasUsed int64
premium abi.TokenAmount
diff --git a/node/impl/full/eth_event.go b/node/impl/full/eth_event.go
new file mode 100644
index 000000000..69021e08a
--- /dev/null
+++ b/node/impl/full/eth_event.go
@@ -0,0 +1,382 @@
+package full
+
+import (
+ "context"
+ "encoding/json"
+ "sync"
+
+ "github.com/google/uuid"
+ "github.com/ipfs/go-cid"
+ "github.com/zyedidia/generic/queue"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-jsonrpc"
+
+ "github.com/filecoin-project/lotus/chain/events/filter"
+ "github.com/filecoin-project/lotus/chain/store"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/chain/types/ethtypes"
+)
+
+type filterEventCollector interface {
+ TakeCollectedEvents(context.Context) []*filter.CollectedEvent
+}
+
+type filterMessageCollector interface {
+ TakeCollectedMessages(context.Context) []*types.SignedMessage
+}
+
+type filterTipSetCollector interface {
+ TakeCollectedTipSets(context.Context) []types.TipSetKey
+}
+
+func ethLogFromEvent(entries []types.EventEntry) (data []byte, topics []ethtypes.EthHash, ok bool) {
+ var (
+ topicsFound [4]bool
+ topicsFoundCount int
+ dataFound bool
+ )
+ // Topics must be non-nil, even if empty. So we might as well pre-allocate for 4 (the max).
+ topics = make([]ethtypes.EthHash, 0, 4)
+ for _, entry := range entries {
+ // Drop events with non-raw topics to avoid mistakes.
+ if entry.Codec != cid.Raw {
+ log.Warnw("did not expect an event entry with a non-raw codec", "codec", entry.Codec, "key", entry.Key)
+ return nil, nil, false
+ }
+ // Check if the key is t1..t4
+ if len(entry.Key) == 2 && "t1" <= entry.Key && entry.Key <= "t4" {
+ // '1' - '1' == 0, etc.
+ idx := int(entry.Key[1] - '1')
+
+ // Drop events with mis-sized topics.
+ if len(entry.Value) != 32 {
+ log.Warnw("got an EVM event topic with an invalid size", "key", entry.Key, "size", len(entry.Value))
+ return nil, nil, false
+ }
+
+ // Drop events with duplicate topics.
+ if topicsFound[idx] {
+ log.Warnw("got a duplicate EVM event topic", "key", entry.Key)
+ return nil, nil, false
+ }
+ topicsFound[idx] = true
+ topicsFoundCount++
+
+ // Extend the topics array
+ for len(topics) <= idx {
+ topics = append(topics, ethtypes.EthHash{})
+ }
+ copy(topics[idx][:], entry.Value)
+ } else if entry.Key == "d" {
+ // Drop events with duplicate data fields.
+ if dataFound {
+ log.Warnw("got duplicate EVM event data")
+ return nil, nil, false
+ }
+
+ dataFound = true
+ data = entry.Value
+ } else {
+ // Skip entries we don't understand (makes it easier to extend things).
+ // But we warn for now because we don't expect them.
+ log.Warnw("unexpected event entry", "key", entry.Key)
+ }
+
+ }
+
+ // Drop events with skipped topics.
+ if len(topics) != topicsFoundCount {
+ log.Warnw("EVM event topic length mismatch", "expected", len(topics), "actual", topicsFoundCount)
+ return nil, nil, false
+ }
+ return data, topics, true
+}
+
+func ethFilterResultFromEvents(evs []*filter.CollectedEvent, sa StateAPI) (*ethtypes.EthFilterResult, error) {
+ res := ðtypes.EthFilterResult{}
+ for _, ev := range evs {
+ log := ethtypes.EthLog{
+ Removed: ev.Reverted,
+ LogIndex: ethtypes.EthUint64(ev.EventIdx),
+ TransactionIndex: ethtypes.EthUint64(ev.MsgIdx),
+ BlockNumber: ethtypes.EthUint64(ev.Height),
+ }
+ var (
+ err error
+ ok bool
+ )
+
+ log.Data, log.Topics, ok = ethLogFromEvent(ev.Entries)
+ if !ok {
+ continue
+ }
+
+ log.Address, err = ethtypes.EthAddressFromFilecoinAddress(ev.EmitterAddr)
+ if err != nil {
+ return nil, err
+ }
+
+ log.TransactionHash, err = ethTxHashFromMessageCid(context.TODO(), ev.MsgCid, sa)
+ if err != nil {
+ return nil, err
+ }
+ c, err := ev.TipSetKey.Cid()
+ if err != nil {
+ return nil, err
+ }
+ log.BlockHash, err = ethtypes.EthHashFromCid(c)
+ if err != nil {
+ return nil, err
+ }
+
+ res.Results = append(res.Results, log)
+ }
+
+ return res, nil
+}
+
+func ethFilterResultFromTipSets(tsks []types.TipSetKey) (*ethtypes.EthFilterResult, error) {
+ res := ðtypes.EthFilterResult{}
+
+ for _, tsk := range tsks {
+ c, err := tsk.Cid()
+ if err != nil {
+ return nil, err
+ }
+ hash, err := ethtypes.EthHashFromCid(c)
+ if err != nil {
+ return nil, err
+ }
+
+ res.Results = append(res.Results, hash)
+ }
+
+ return res, nil
+}
+
+func ethFilterResultFromMessages(cs []*types.SignedMessage, sa StateAPI) (*ethtypes.EthFilterResult, error) {
+ res := ðtypes.EthFilterResult{}
+
+ for _, c := range cs {
+ hash, err := ethTxHashFromSignedMessage(context.TODO(), c, sa)
+ if err != nil {
+ return nil, err
+ }
+
+ res.Results = append(res.Results, hash)
+ }
+
+ return res, nil
+}
+
+type EthSubscriptionManager struct {
+ Chain *store.ChainStore
+ StateAPI StateAPI
+ ChainAPI ChainAPI
+ mu sync.Mutex
+ subs map[ethtypes.EthSubscriptionID]*ethSubscription
+}
+
+func (e *EthSubscriptionManager) StartSubscription(ctx context.Context, out ethSubscriptionCallback, dropFilter func(context.Context, filter.Filter) error) (*ethSubscription, error) { // nolint
+ rawid, err := uuid.NewRandom()
+ if err != nil {
+ return nil, xerrors.Errorf("new uuid: %w", err)
+ }
+ id := ethtypes.EthSubscriptionID{}
+ copy(id[:], rawid[:]) // uuid is 16 bytes
+
+ ctx, quit := context.WithCancel(ctx)
+
+ sub := ðSubscription{
+ Chain: e.Chain,
+ StateAPI: e.StateAPI,
+ ChainAPI: e.ChainAPI,
+ uninstallFilter: dropFilter,
+ id: id,
+ in: make(chan interface{}, 200),
+ out: out,
+ quit: quit,
+
+ toSend: queue.New[[]byte](),
+ sendCond: make(chan struct{}, 1),
+ }
+
+ e.mu.Lock()
+ if e.subs == nil {
+ e.subs = make(map[ethtypes.EthSubscriptionID]*ethSubscription)
+ }
+ e.subs[sub.id] = sub
+ e.mu.Unlock()
+
+ go sub.start(ctx)
+ go sub.startOut(ctx)
+
+ return sub, nil
+}
+
+func (e *EthSubscriptionManager) StopSubscription(ctx context.Context, id ethtypes.EthSubscriptionID) error {
+ e.mu.Lock()
+ defer e.mu.Unlock()
+
+ sub, ok := e.subs[id]
+ if !ok {
+ return xerrors.Errorf("subscription not found")
+ }
+ sub.stop()
+ delete(e.subs, id)
+
+ return nil
+}
+
+type ethSubscriptionCallback func(context.Context, jsonrpc.RawParams) error
+
+const maxSendQueue = 20000
+
+type ethSubscription struct {
+ Chain *store.ChainStore
+ StateAPI StateAPI
+ ChainAPI ChainAPI
+ uninstallFilter func(context.Context, filter.Filter) error
+ id ethtypes.EthSubscriptionID
+ in chan interface{}
+ out ethSubscriptionCallback
+
+ mu sync.Mutex
+ filters []filter.Filter
+ quit func()
+
+ sendLk sync.Mutex
+ sendQueueLen int
+ toSend *queue.Queue[[]byte]
+ sendCond chan struct{}
+}
+
+func (e *ethSubscription) addFilter(ctx context.Context, f filter.Filter) {
+ e.mu.Lock()
+ defer e.mu.Unlock()
+
+ f.SetSubChannel(e.in)
+ e.filters = append(e.filters, f)
+}
+
+// sendOut processes the final subscription queue. It's here in case the subscriber
+// is slow, and we need to buffer the messages.
+func (e *ethSubscription) startOut(ctx context.Context) {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-e.sendCond:
+ e.sendLk.Lock()
+
+ for !e.toSend.Empty() {
+ front := e.toSend.Dequeue()
+ e.sendQueueLen--
+
+ e.sendLk.Unlock()
+
+ if err := e.out(ctx, front); err != nil {
+ log.Warnw("error sending subscription response, killing subscription", "sub", e.id, "error", err)
+ e.stop()
+ return
+ }
+
+ e.sendLk.Lock()
+ }
+
+ e.sendLk.Unlock()
+ }
+ }
+}
+
+func (e *ethSubscription) send(ctx context.Context, v interface{}) {
+ resp := ethtypes.EthSubscriptionResponse{
+ SubscriptionID: e.id,
+ Result: v,
+ }
+
+ outParam, err := json.Marshal(resp)
+ if err != nil {
+ log.Warnw("marshaling subscription response", "sub", e.id, "error", err)
+ return
+ }
+
+ e.sendLk.Lock()
+ defer e.sendLk.Unlock()
+
+ e.toSend.Enqueue(outParam)
+
+ e.sendQueueLen++
+ if e.sendQueueLen > maxSendQueue {
+ log.Warnw("subscription send queue full, killing subscription", "sub", e.id)
+ e.stop()
+ return
+ }
+
+ select {
+ case e.sendCond <- struct{}{}:
+ default: // already signalled, and we're holding the lock so we know that the event will be processed
+ }
+}
+
+func (e *ethSubscription) start(ctx context.Context) {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case v := <-e.in:
+ switch vt := v.(type) {
+ case *filter.CollectedEvent:
+ evs, err := ethFilterResultFromEvents([]*filter.CollectedEvent{vt}, e.StateAPI)
+ if err != nil {
+ continue
+ }
+
+ for _, r := range evs.Results {
+ e.send(ctx, r)
+ }
+ case *types.TipSet:
+ ev, err := newEthBlockFromFilecoinTipSet(ctx, vt, true, e.Chain, e.StateAPI)
+ if err != nil {
+ break
+ }
+
+ e.send(ctx, ev)
+ case *types.SignedMessage: // mpool txid
+ evs, err := ethFilterResultFromMessages([]*types.SignedMessage{vt}, e.StateAPI)
+ if err != nil {
+ continue
+ }
+
+ for _, r := range evs.Results {
+ e.send(ctx, r)
+ }
+ default:
+ log.Warnf("unexpected subscription value type: %T", vt)
+ }
+ }
+ }
+}
+
+func (e *ethSubscription) stop() {
+ e.mu.Lock()
+ if e.quit == nil {
+ e.mu.Unlock()
+ return
+ }
+
+ if e.quit != nil {
+ e.quit()
+ e.quit = nil
+ e.mu.Unlock()
+
+ for _, f := range e.filters {
+ // note: the context in actually unused in uninstallFilter
+ if err := e.uninstallFilter(context.TODO(), f); err != nil {
+ // this will leave the filter a zombie, collecting events up to the maximum allowed
+ log.Warnf("failed to remove filter when unsubscribing: %v", err)
+ }
+ }
+ }
+}
diff --git a/node/impl/full/eth_trace.go b/node/impl/full/eth_trace.go
new file mode 100644
index 000000000..3766c5448
--- /dev/null
+++ b/node/impl/full/eth_trace.go
@@ -0,0 +1,353 @@
+package full
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+ "fmt"
+ "io"
+
+ "github.com/multiformats/go-multicodec"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/builtin"
+ "github.com/filecoin-project/go-state-types/builtin/v10/evm"
+ "github.com/filecoin-project/go-state-types/exitcode"
+
+ builtinactors "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/chain/types/ethtypes"
+)
+
+// decodePayload is a utility function which decodes the payload using the given codec
+func decodePayload(payload []byte, codec uint64) (ethtypes.EthBytes, error) {
+ if len(payload) == 0 {
+ return nil, nil
+ }
+
+ switch multicodec.Code(codec) {
+ case multicodec.Identity:
+ return nil, nil
+ case multicodec.DagCbor, multicodec.Cbor:
+ buf, err := cbg.ReadByteArray(bytes.NewReader(payload), uint64(len(payload)))
+ if err != nil {
+ return nil, xerrors.Errorf("decodePayload: failed to decode cbor payload: %w", err)
+ }
+ return buf, nil
+ case multicodec.Raw:
+ return ethtypes.EthBytes(payload), nil
+ }
+
+ return nil, xerrors.Errorf("decodePayload: unsupported codec: %d", codec)
+}
+
+// buildTraces recursively builds the traces for a given ExecutionTrace by walking the subcalls
+func buildTraces(ctx context.Context, traces *[]*ethtypes.EthTrace, parent *ethtypes.EthTrace, addr []int, et types.ExecutionTrace, height int64, sa StateAPI) error {
+ // lookup the eth address from the from/to addresses. Note that this may fail but to support
+ // this we need to include the ActorID in the trace. For now, just log a warning and skip
+ // this trace.
+ //
+ // TODO: Add ActorID in trace, see https://github.com/filecoin-project/lotus/pull/11100#discussion_r1302442288
+ from, err := lookupEthAddress(ctx, et.Msg.From, sa)
+ if err != nil {
+ log.Warnf("buildTraces: failed to lookup from address %s: %v", et.Msg.From, err)
+ return nil
+ }
+ to, err := lookupEthAddress(ctx, et.Msg.To, sa)
+ if err != nil {
+ log.Warnf("buildTraces: failed to lookup to address %s: %w", et.Msg.To, err)
+ return nil
+ }
+
+ trace := ðtypes.EthTrace{
+ Action: ethtypes.EthTraceAction{
+ From: from,
+ To: to,
+ Gas: ethtypes.EthUint64(et.Msg.GasLimit),
+ Input: nil,
+ Value: ethtypes.EthBigInt(et.Msg.Value),
+
+ FilecoinFrom: et.Msg.From,
+ FilecoinTo: et.Msg.To,
+ FilecoinMethod: et.Msg.Method,
+ FilecoinCodeCid: et.Msg.CodeCid,
+ },
+ Result: ethtypes.EthTraceResult{
+ GasUsed: ethtypes.EthUint64(et.SumGas().TotalGas),
+ Output: nil,
+ },
+ Subtraces: 0, // will be updated by the children once they are added to the trace
+ TraceAddress: addr,
+
+ Parent: parent,
+ LastByteCode: nil,
+ }
+
+ trace.SetCallType("call")
+
+ if et.Msg.Method == builtin.MethodsEVM.InvokeContract {
+ log.Debugf("COND1 found InvokeContract call at height: %d", height)
+
+ // TODO: ignore return errors since actors can send gibberish and we don't want
+ // to fail the whole trace in that case
+ trace.Action.Input, err = decodePayload(et.Msg.Params, et.Msg.ParamsCodec)
+ if err != nil {
+ return xerrors.Errorf("buildTraces: %w", err)
+ }
+ trace.Result.Output, err = decodePayload(et.MsgRct.Return, et.MsgRct.ReturnCodec)
+ if err != nil {
+ return xerrors.Errorf("buildTraces: %w", err)
+ }
+ } else if et.Msg.To == builtin.EthereumAddressManagerActorAddr &&
+ et.Msg.Method == builtin.MethodsEAM.CreateExternal {
+ log.Debugf("COND2 found CreateExternal call at height: %d", height)
+ trace.Action.Input, err = decodePayload(et.Msg.Params, et.Msg.ParamsCodec)
+ if err != nil {
+ return xerrors.Errorf("buildTraces: %w", err)
+ }
+
+ if et.MsgRct.ExitCode.IsSuccess() {
+ // ignore return value
+ trace.Result.Output = nil
+ } else {
+ // return value is the error message
+ trace.Result.Output, err = decodePayload(et.MsgRct.Return, et.MsgRct.ReturnCodec)
+ if err != nil {
+ return xerrors.Errorf("buildTraces: %w", err)
+ }
+ }
+
+ // treat this as a contract creation
+ trace.SetCallType("create")
+ } else {
+ // we are going to assume a native method, but we may change it in one of the edge cases below
+ // TODO: only do this if we know it's a native method (optimization)
+ trace.Action.Input, err = handleFilecoinMethodInput(et.Msg.Method, et.Msg.ParamsCodec, et.Msg.Params)
+ if err != nil {
+ return xerrors.Errorf("buildTraces: %w", err)
+ }
+ trace.Result.Output, err = handleFilecoinMethodOutput(et.MsgRct.ExitCode, et.MsgRct.ReturnCodec, et.MsgRct.Return)
+ if err != nil {
+ return xerrors.Errorf("buildTraces: %w", err)
+ }
+ }
+
+ // TODO: is it OK to check this here or is this only specific to certain edge case (evm to evm)?
+ if et.Msg.ReadOnly {
+ trace.SetCallType("staticcall")
+ }
+
+ // there are several edge cases that require special handling when displaying the traces. Note that while iterating over
+ // the traces we update the trace backwards (through the parent pointer)
+ if parent != nil {
+ // Handle Native actor creation
+ //
+ // Actor A calls to the init actor on method 2 and The init actor creates the target actor B then calls it on method 1
+ if parent.Action.FilecoinTo == builtin.InitActorAddr &&
+ parent.Action.FilecoinMethod == builtin.MethodsInit.Exec &&
+ et.Msg.Method == builtin.MethodConstructor {
+ log.Debugf("COND3 Native actor creation! method:%d, code:%s, height:%d", et.Msg.Method, et.Msg.CodeCid.String(), height)
+ parent.SetCallType("create")
+ parent.Action.To = to
+ parent.Action.Input = []byte{0xFE}
+ parent.Result.Output = nil
+
+ // there should never be any subcalls when creating a native actor
+ //
+ // TODO: add support for native actors calling another when created
+ return nil
+ }
+
+ // Handle EVM contract creation
+ //
+ // To detect EVM contract creation we need to check for the following sequence of events:
+ //
+ // 1) EVM contract A calls the EAM (Ethereum Address Manager) on method 2 (create) or 3 (create2).
+ // 2) The EAM calls the init actor on method 3 (Exec4).
+ // 3) The init actor creates the target actor B then calls it on method 1.
+ if parent.Parent != nil {
+ calledCreateOnEAM := parent.Parent.Action.FilecoinTo == builtin.EthereumAddressManagerActorAddr &&
+ (parent.Parent.Action.FilecoinMethod == builtin.MethodsEAM.Create || parent.Parent.Action.FilecoinMethod == builtin.MethodsEAM.Create2)
+ eamCalledInitOnExec4 := parent.Action.FilecoinTo == builtin.InitActorAddr &&
+ parent.Action.FilecoinMethod == builtin.MethodsInit.Exec4
+ initCreatedActor := trace.Action.FilecoinMethod == builtin.MethodConstructor
+
+ // TODO: We need to handle failures in contract creations and support resurrections on an existing but dead EVM actor)
+ if calledCreateOnEAM && eamCalledInitOnExec4 && initCreatedActor {
+ log.Debugf("COND4 EVM contract creation method:%d, code:%s, height:%d", et.Msg.Method, et.Msg.CodeCid.String(), height)
+
+ if parent.Parent.Action.FilecoinMethod == builtin.MethodsEAM.Create {
+ parent.Parent.SetCallType("create")
+ } else {
+ parent.Parent.SetCallType("create2")
+ }
+
+ // update the parent.parent to make this
+ parent.Parent.Action.To = trace.Action.To
+ parent.Parent.Subtraces = 0
+
+ // delete the parent (the EAM) and skip the current trace (init)
+ *traces = (*traces)[:len(*traces)-1]
+
+ return nil
+ }
+ }
+
+ if builtinactors.IsEvmActor(parent.Action.FilecoinCodeCid) {
+ // Handle delegate calls
+ //
+ // 1) Look for trace from an EVM actor to itself on InvokeContractDelegate, method 6.
+ // 2) Check that the previous trace calls another actor on method 3 (GetByteCode) and they are at the same level (same parent)
+ // 3) Treat this as a delegate call to actor A.
+ if parent.LastByteCode != nil && trace.Action.From == trace.Action.To &&
+ trace.Action.FilecoinMethod == builtin.MethodsEVM.InvokeContractDelegate {
+ log.Debugf("COND7 found delegate call, height: %d", height)
+ prev := parent.LastByteCode
+ if prev.Action.From == trace.Action.From && prev.Action.FilecoinMethod == builtin.MethodsEVM.GetBytecode && prev.Parent == trace.Parent {
+ trace.SetCallType("delegatecall")
+ trace.Action.To = prev.Action.To
+
+ var dp evm.DelegateCallParams
+ err := dp.UnmarshalCBOR(bytes.NewReader(et.Msg.Params))
+ if err != nil {
+ return xerrors.Errorf("failed UnmarshalCBOR: %w", err)
+ }
+ trace.Action.Input = dp.Input
+
+ trace.Result.Output, err = decodePayload(et.MsgRct.Return, et.MsgRct.ReturnCodec)
+ if err != nil {
+ return xerrors.Errorf("failed decodePayload: %w", err)
+ }
+ }
+ } else {
+ // Handle EVM call special casing
+ //
+ // Any outbound call from an EVM actor on methods 1-1023 are side-effects from EVM instructions
+ // and should be dropped from the trace.
+ if et.Msg.Method > 0 &&
+ et.Msg.Method <= 1023 {
+ log.Debugf("Infof found outbound call from an EVM actor on method 1-1023 method:%d, code:%s, height:%d", et.Msg.Method, parent.Action.FilecoinCodeCid.String(), height)
+
+ if et.Msg.Method == builtin.MethodsEVM.GetBytecode {
+ // save the last bytecode trace to handle delegate calls
+ parent.LastByteCode = trace
+ }
+
+ return nil
+ }
+ }
+ }
+
+ }
+
+ // we are adding trace to the traces so update the parent subtraces count as it was originally set to zero
+ if parent != nil {
+ parent.Subtraces++
+ }
+
+ *traces = append(*traces, trace)
+
+ for i, call := range et.Subcalls {
+ err := buildTraces(ctx, traces, trace, append(addr, i), call, height, sa)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func writePadded(w io.Writer, data any, size int) error {
+ tmp := &bytes.Buffer{}
+
+ // first write data to tmp buffer to get the size
+ err := binary.Write(tmp, binary.BigEndian, data)
+ if err != nil {
+ return fmt.Errorf("writePadded: failed writing tmp data to buffer: %w", err)
+ }
+
+ if tmp.Len() > size {
+ return fmt.Errorf("writePadded: data is larger than size")
+ }
+
+ // write tailing zeros to pad up to size
+ cnt := size - tmp.Len()
+ for i := 0; i < cnt; i++ {
+ err = binary.Write(w, binary.BigEndian, uint8(0))
+ if err != nil {
+ return fmt.Errorf("writePadded: failed writing tailing zeros to buffer: %w", err)
+ }
+ }
+
+ // finally write the actual value
+ err = binary.Write(w, binary.BigEndian, tmp.Bytes())
+ if err != nil {
+ return fmt.Errorf("writePadded: failed writing data to buffer: %w", err)
+ }
+
+ return nil
+}
+
+func handleFilecoinMethodInput(method abi.MethodNum, codec uint64, params []byte) ([]byte, error) {
+ NATIVE_METHOD_SELECTOR := []byte{0x86, 0x8e, 0x10, 0xc4}
+ EVM_WORD_SIZE := 32
+
+ staticArgs := []uint64{
+ uint64(method),
+ codec,
+ uint64(EVM_WORD_SIZE) * 3,
+ uint64(len(params)),
+ }
+ totalWords := len(staticArgs) + (len(params) / EVM_WORD_SIZE)
+ if len(params)%EVM_WORD_SIZE != 0 {
+ totalWords++
+ }
+ len := 4 + totalWords*EVM_WORD_SIZE
+
+ w := &bytes.Buffer{}
+ err := binary.Write(w, binary.BigEndian, NATIVE_METHOD_SELECTOR)
+ if err != nil {
+ return nil, fmt.Errorf("handleFilecoinMethodInput: failed writing method selector: %w", err)
+ }
+
+ for _, arg := range staticArgs {
+ err := writePadded(w, arg, 32)
+ if err != nil {
+ return nil, fmt.Errorf("handleFilecoinMethodInput: %w", err)
+ }
+ }
+ err = binary.Write(w, binary.BigEndian, params)
+ if err != nil {
+ return nil, fmt.Errorf("handleFilecoinMethodInput: failed writing params: %w", err)
+ }
+ remain := len - w.Len()
+ for i := 0; i < remain; i++ {
+ err = binary.Write(w, binary.BigEndian, uint8(0))
+ if err != nil {
+ return nil, fmt.Errorf("handleFilecoinMethodInput: failed writing tailing zeros: %w", err)
+ }
+ }
+
+ return w.Bytes(), nil
+}
+
+func handleFilecoinMethodOutput(exitCode exitcode.ExitCode, codec uint64, data []byte) ([]byte, error) {
+ w := &bytes.Buffer{}
+
+ values := []interface{}{uint32(exitCode), codec, uint32(w.Len()), uint32(len(data))}
+ for _, v := range values {
+ err := writePadded(w, v, 32)
+ if err != nil {
+ return nil, fmt.Errorf("handleFilecoinMethodOutput: %w", err)
+ }
+ }
+
+ err := binary.Write(w, binary.BigEndian, data)
+ if err != nil {
+ return nil, fmt.Errorf("handleFilecoinMethodOutput: failed writing data: %w", err)
+ }
+
+ return w.Bytes(), nil
+}
diff --git a/node/impl/full/eth_utils.go b/node/impl/full/eth_utils.go
new file mode 100644
index 000000000..17695dd76
--- /dev/null
+++ b/node/impl/full/eth_utils.go
@@ -0,0 +1,695 @@
+package full
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+
+ "github.com/ipfs/go-cid"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ builtintypes "github.com/filecoin-project/go-state-types/builtin"
+ "github.com/filecoin-project/go-state-types/builtin/v10/eam"
+ "github.com/filecoin-project/go-state-types/crypto"
+
+ "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/store"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/chain/types/ethtypes"
+ "github.com/filecoin-project/lotus/chain/vm"
+)
+
+func getTipsetByBlockNumber(ctx context.Context, chain *store.ChainStore, blkParam string, strict bool) (*types.TipSet, error) {
+ if blkParam == "earliest" {
+ return nil, fmt.Errorf("block param \"earliest\" is not supported")
+ }
+
+ head := chain.GetHeaviestTipSet()
+ switch blkParam {
+ case "pending":
+ return head, nil
+ case "latest":
+ parent, err := chain.GetTipSetFromKey(ctx, head.Parents())
+ if err != nil {
+ return nil, fmt.Errorf("cannot get parent tipset")
+ }
+ return parent, nil
+ default:
+ var num ethtypes.EthUint64
+ err := num.UnmarshalJSON([]byte(`"` + blkParam + `"`))
+ if err != nil {
+ return nil, fmt.Errorf("cannot parse block number: %v", err)
+ }
+ if abi.ChainEpoch(num) > head.Height()-1 {
+ return nil, fmt.Errorf("requested a future epoch (beyond 'latest')")
+ }
+ ts, err := chain.GetTipsetByHeight(ctx, abi.ChainEpoch(num), head, true)
+ if err != nil {
+ return nil, fmt.Errorf("cannot get tipset at height: %v", num)
+ }
+ if strict && ts.Height() != abi.ChainEpoch(num) {
+ return nil, ErrNullRound
+ }
+ return ts, nil
+ }
+}
+
+func getTipsetByEthBlockNumberOrHash(ctx context.Context, chain *store.ChainStore, blkParam ethtypes.EthBlockNumberOrHash) (*types.TipSet, error) {
+ head := chain.GetHeaviestTipSet()
+
+ predefined := blkParam.PredefinedBlock
+ if predefined != nil {
+ if *predefined == "earliest" {
+ return nil, fmt.Errorf("block param \"earliest\" is not supported")
+ } else if *predefined == "pending" {
+ return head, nil
+ } else if *predefined == "latest" {
+ parent, err := chain.GetTipSetFromKey(ctx, head.Parents())
+ if err != nil {
+ return nil, fmt.Errorf("cannot get parent tipset")
+ }
+ return parent, nil
+ } else {
+ return nil, fmt.Errorf("unknown predefined block %s", *predefined)
+ }
+ }
+
+ if blkParam.BlockNumber != nil {
+ height := abi.ChainEpoch(*blkParam.BlockNumber)
+ if height > head.Height()-1 {
+ return nil, fmt.Errorf("requested a future epoch (beyond 'latest')")
+ }
+ ts, err := chain.GetTipsetByHeight(ctx, height, head, true)
+ if err != nil {
+ return nil, fmt.Errorf("cannot get tipset at height: %v", height)
+ }
+ return ts, nil
+ }
+
+ if blkParam.BlockHash != nil {
+ ts, err := chain.GetTipSetByCid(ctx, blkParam.BlockHash.ToCid())
+ if err != nil {
+ return nil, fmt.Errorf("cannot get tipset by hash: %v", err)
+ }
+
+ // verify that the tipset is in the canonical chain
+ if blkParam.RequireCanonical {
+ // walk up the current chain (our head) until we reach ts.Height()
+ walkTs, err := chain.GetTipsetByHeight(ctx, ts.Height(), head, true)
+ if err != nil {
+ return nil, fmt.Errorf("cannot get tipset at height: %v", ts.Height())
+ }
+
+ // verify that it equals the expected tipset
+ if !walkTs.Equals(ts) {
+ return nil, fmt.Errorf("tipset is not canonical")
+ }
+ }
+
+ return ts, nil
+ }
+
+ return nil, errors.New("invalid block param")
+}
+
+func ethCallToFilecoinMessage(ctx context.Context, tx ethtypes.EthCall) (*types.Message, error) {
+ var from address.Address
+ if tx.From == nil || *tx.From == (ethtypes.EthAddress{}) {
+ // Send from the filecoin "system" address.
+ var err error
+ from, err = (ethtypes.EthAddress{}).ToFilecoinAddress()
+ if err != nil {
+ return nil, fmt.Errorf("failed to construct the ethereum system address: %w", err)
+ }
+ } else {
+ // The from address must be translatable to an f4 address.
+ var err error
+ from, err = tx.From.ToFilecoinAddress()
+ if err != nil {
+ return nil, fmt.Errorf("failed to translate sender address (%s): %w", tx.From.String(), err)
+ }
+ if p := from.Protocol(); p != address.Delegated {
+ return nil, fmt.Errorf("expected a class 4 address, got: %d: %w", p, err)
+ }
+ }
+
+ var params []byte
+ if len(tx.Data) > 0 {
+ initcode := abi.CborBytes(tx.Data)
+ params2, err := actors.SerializeParams(&initcode)
+ if err != nil {
+ return nil, fmt.Errorf("failed to serialize params: %w", err)
+ }
+ params = params2
+ }
+
+ var to address.Address
+ var method abi.MethodNum
+ if tx.To == nil {
+ // this is a contract creation
+ to = builtintypes.EthereumAddressManagerActorAddr
+ method = builtintypes.MethodsEAM.CreateExternal
+ } else {
+ addr, err := tx.To.ToFilecoinAddress()
+ if err != nil {
+ return nil, xerrors.Errorf("cannot get Filecoin address: %w", err)
+ }
+ to = addr
+ method = builtintypes.MethodsEVM.InvokeContract
+ }
+
+ return &types.Message{
+ From: from,
+ To: to,
+ Value: big.Int(tx.Value),
+ Method: method,
+ Params: params,
+ GasLimit: build.BlockGasLimit,
+ GasFeeCap: big.Zero(),
+ GasPremium: big.Zero(),
+ }, nil
+}
+
+func newEthBlockFromFilecoinTipSet(ctx context.Context, ts *types.TipSet, fullTxInfo bool, cs *store.ChainStore, sa StateAPI) (ethtypes.EthBlock, error) {
+ parentKeyCid, err := ts.Parents().Cid()
+ if err != nil {
+ return ethtypes.EthBlock{}, err
+ }
+ parentBlkHash, err := ethtypes.EthHashFromCid(parentKeyCid)
+ if err != nil {
+ return ethtypes.EthBlock{}, err
+ }
+
+ bn := ethtypes.EthUint64(ts.Height())
+
+ blkCid, err := ts.Key().Cid()
+ if err != nil {
+ return ethtypes.EthBlock{}, err
+ }
+ blkHash, err := ethtypes.EthHashFromCid(blkCid)
+ if err != nil {
+ return ethtypes.EthBlock{}, err
+ }
+
+ msgs, rcpts, err := messagesAndReceipts(ctx, ts, cs, sa)
+ if err != nil {
+ return ethtypes.EthBlock{}, xerrors.Errorf("failed to retrieve messages and receipts: %w", err)
+ }
+
+ block := ethtypes.NewEthBlock(len(msgs) > 0)
+
+ gasUsed := int64(0)
+ for i, msg := range msgs {
+ rcpt := rcpts[i]
+ ti := ethtypes.EthUint64(i)
+ gasUsed += rcpt.GasUsed
+ var smsg *types.SignedMessage
+ switch msg := msg.(type) {
+ case *types.SignedMessage:
+ smsg = msg
+ case *types.Message:
+ smsg = &types.SignedMessage{
+ Message: *msg,
+ Signature: crypto.Signature{
+ Type: crypto.SigTypeBLS,
+ },
+ }
+ default:
+ return ethtypes.EthBlock{}, xerrors.Errorf("failed to get signed msg %s: %w", msg.Cid(), err)
+ }
+ tx, err := newEthTxFromSignedMessage(ctx, smsg, sa)
+ if err != nil {
+ return ethtypes.EthBlock{}, xerrors.Errorf("failed to convert msg to ethTx: %w", err)
+ }
+
+ tx.ChainID = ethtypes.EthUint64(build.Eip155ChainId)
+ tx.BlockHash = &blkHash
+ tx.BlockNumber = &bn
+ tx.TransactionIndex = &ti
+
+ if fullTxInfo {
+ block.Transactions = append(block.Transactions, tx)
+ } else {
+ block.Transactions = append(block.Transactions, tx.Hash.String())
+ }
+ }
+
+ block.Hash = blkHash
+ block.Number = bn
+ block.ParentHash = parentBlkHash
+ block.Timestamp = ethtypes.EthUint64(ts.Blocks()[0].Timestamp)
+ block.BaseFeePerGas = ethtypes.EthBigInt{Int: ts.Blocks()[0].ParentBaseFee.Int}
+ block.GasUsed = ethtypes.EthUint64(gasUsed)
+ return block, nil
+}
+
+func messagesAndReceipts(ctx context.Context, ts *types.TipSet, cs *store.ChainStore, sa StateAPI) ([]types.ChainMsg, []types.MessageReceipt, error) {
+ msgs, err := cs.MessagesForTipset(ctx, ts)
+ if err != nil {
+ return nil, nil, xerrors.Errorf("error loading messages for tipset: %v: %w", ts, err)
+ }
+
+ _, rcptRoot, err := sa.StateManager.TipSetState(ctx, ts)
+ if err != nil {
+ return nil, nil, xerrors.Errorf("failed to compute state: %w", err)
+ }
+
+ rcpts, err := cs.ReadReceipts(ctx, rcptRoot)
+ if err != nil {
+ return nil, nil, xerrors.Errorf("error loading receipts for tipset: %v: %w", ts, err)
+ }
+
+ if len(msgs) != len(rcpts) {
+ return nil, nil, xerrors.Errorf("receipts and message array lengths didn't match for tipset: %v: %w", ts, err)
+ }
+
+ return msgs, rcpts, nil
+}
+
+const errorFunctionSelector = "\x08\xc3\x79\xa0" // Error(string)
+const panicFunctionSelector = "\x4e\x48\x7b\x71" // Panic(uint256)
+// Eth ABI (solidity) panic codes.
+var panicErrorCodes map[uint64]string = map[uint64]string{
+ 0x00: "Panic()",
+ 0x01: "Assert()",
+ 0x11: "ArithmeticOverflow()",
+ 0x12: "DivideByZero()",
+ 0x21: "InvalidEnumVariant()",
+ 0x22: "InvalidStorageArray()",
+ 0x31: "PopEmptyArray()",
+ 0x32: "ArrayIndexOutOfBounds()",
+ 0x41: "OutOfMemory()",
+ 0x51: "CalledUninitializedFunction()",
+}
+
+// Parse an ABI encoded revert reason. This reason should be encoded as if it were the parameters to
+// an `Error(string)` function call.
+//
+// See https://docs.soliditylang.org/en/latest/control-structures.html#panic-via-assert-and-error-via-require
+func parseEthRevert(ret []byte) string {
+ if len(ret) == 0 {
+ return "none"
+ }
+ var cbytes abi.CborBytes
+ if err := cbytes.UnmarshalCBOR(bytes.NewReader(ret)); err != nil {
+ return "ERROR: revert reason is not cbor encoded bytes"
+ }
+ if len(cbytes) == 0 {
+ return "none"
+ }
+ // If it's not long enough to contain an ABI encoded response, return immediately.
+ if len(cbytes) < 4+32 {
+ return ethtypes.EthBytes(cbytes).String()
+ }
+ switch string(cbytes[:4]) {
+ case panicFunctionSelector:
+ cbytes := cbytes[4 : 4+32]
+ // Read the and check the code.
+ code, err := ethtypes.EthUint64FromBytes(cbytes)
+ if err != nil {
+ // If it's too big, just return the raw value.
+ codeInt := big.PositiveFromUnsignedBytes(cbytes)
+ return fmt.Sprintf("Panic(%s)", ethtypes.EthBigInt(codeInt).String())
+ }
+ if s, ok := panicErrorCodes[uint64(code)]; ok {
+ return s
+ }
+ return fmt.Sprintf("Panic(0x%x)", code)
+ case errorFunctionSelector:
+ cbytes := cbytes[4:]
+ cbytesLen := ethtypes.EthUint64(len(cbytes))
+ // Read the and check the offset.
+ offset, err := ethtypes.EthUint64FromBytes(cbytes[:32])
+ if err != nil {
+ break
+ }
+ if cbytesLen < offset {
+ break
+ }
+
+ // Read and check the length.
+ if cbytesLen-offset < 32 {
+ break
+ }
+ start := offset + 32
+ length, err := ethtypes.EthUint64FromBytes(cbytes[offset : offset+32])
+ if err != nil {
+ break
+ }
+ if cbytesLen-start < length {
+ break
+ }
+ // Slice the error message.
+ return fmt.Sprintf("Error(%s)", cbytes[start:start+length])
+ }
+ return ethtypes.EthBytes(cbytes).String()
+}
+
+// lookupEthAddress makes its best effort at finding the Ethereum address for a
+// Filecoin address. It does the following:
+//
+// 1. If the supplied address is an f410 address, we return its payload as the EthAddress.
+// 2. Otherwise (f0, f1, f2, f3), we look up the actor on the state tree. If it has a delegated address, we return it if it's f410 address.
+// 3. Otherwise, we fall back to returning a masked ID Ethereum address. If the supplied address is an f0 address, we
+// use that ID to form the masked ID address.
+// 4. Otherwise, we fetch the actor's ID from the state tree and form the masked ID with it.
+func lookupEthAddress(ctx context.Context, addr address.Address, sa StateAPI) (ethtypes.EthAddress, error) {
+ // BLOCK A: We are trying to get an actual Ethereum address from an f410 address.
+ // Attempt to convert directly, if it's an f4 address.
+ ethAddr, err := ethtypes.EthAddressFromFilecoinAddress(addr)
+ if err == nil && !ethAddr.IsMaskedID() {
+ return ethAddr, nil
+ }
+
+ // Lookup on the target actor and try to get an f410 address.
+ if actor, err := sa.StateGetActor(ctx, addr, types.EmptyTSK); err != nil {
+ return ethtypes.EthAddress{}, err
+ } else if actor.Address != nil {
+ if ethAddr, err := ethtypes.EthAddressFromFilecoinAddress(*actor.Address); err == nil && !ethAddr.IsMaskedID() {
+ return ethAddr, nil
+ }
+ }
+
+ // BLOCK B: We gave up on getting an actual Ethereum address and are falling back to a Masked ID address.
+ // Check if we already have an ID addr, and use it if possible.
+ if err == nil && ethAddr.IsMaskedID() {
+ return ethAddr, nil
+ }
+
+ // Otherwise, resolve the ID addr.
+ idAddr, err := sa.StateLookupID(ctx, addr, types.EmptyTSK)
+ if err != nil {
+ return ethtypes.EthAddress{}, err
+ }
+ return ethtypes.EthAddressFromFilecoinAddress(idAddr)
+}
+
+func parseEthTopics(topics ethtypes.EthTopicSpec) (map[string][][]byte, error) {
+ keys := map[string][][]byte{}
+ for idx, vals := range topics {
+ if len(vals) == 0 {
+ continue
+ }
+ // Ethereum topics are emitted using `LOG{0..4}` opcodes resulting in topics1..4
+ key := fmt.Sprintf("t%d", idx+1)
+ for _, v := range vals {
+ v := v // copy the ethhash to avoid repeatedly referencing the same one.
+ keys[key] = append(keys[key], v[:])
+ }
+ }
+ return keys, nil
+}
+
+func ethTxHashFromMessageCid(ctx context.Context, c cid.Cid, sa StateAPI) (ethtypes.EthHash, error) {
+ smsg, err := sa.Chain.GetSignedMessage(ctx, c)
+ if err == nil {
+ // This is an Eth Tx, Secp message, Or BLS message in the mpool
+ return ethTxHashFromSignedMessage(ctx, smsg, sa)
+ }
+
+ _, err = sa.Chain.GetMessage(ctx, c)
+ if err == nil {
+ // This is a BLS message
+ return ethtypes.EthHashFromCid(c)
+ }
+
+ return ethtypes.EmptyEthHash, nil
+}
+
+func ethTxHashFromSignedMessage(ctx context.Context, smsg *types.SignedMessage, sa StateAPI) (ethtypes.EthHash, error) {
+ if smsg.Signature.Type == crypto.SigTypeDelegated {
+ ethTx, err := newEthTxFromSignedMessage(ctx, smsg, sa)
+ if err != nil {
+ return ethtypes.EmptyEthHash, err
+ }
+ return ethTx.Hash, nil
+ } else if smsg.Signature.Type == crypto.SigTypeSecp256k1 {
+ return ethtypes.EthHashFromCid(smsg.Cid())
+ } else { // BLS message
+ return ethtypes.EthHashFromCid(smsg.Message.Cid())
+ }
+}
+
+func newEthTxFromSignedMessage(ctx context.Context, smsg *types.SignedMessage, sa StateAPI) (ethtypes.EthTx, error) {
+ var tx ethtypes.EthTx
+ var err error
+
+ // This is an eth tx
+ if smsg.Signature.Type == crypto.SigTypeDelegated {
+ tx, err = ethtypes.EthTxFromSignedEthMessage(smsg)
+ if err != nil {
+ return ethtypes.EthTx{}, xerrors.Errorf("failed to convert from signed message: %w", err)
+ }
+
+ tx.Hash, err = tx.TxHash()
+ if err != nil {
+ return ethtypes.EthTx{}, xerrors.Errorf("failed to calculate hash for ethTx: %w", err)
+ }
+
+ fromAddr, err := lookupEthAddress(ctx, smsg.Message.From, sa)
+ if err != nil {
+ return ethtypes.EthTx{}, xerrors.Errorf("failed to resolve Ethereum address: %w", err)
+ }
+
+ tx.From = fromAddr
+ } else if smsg.Signature.Type == crypto.SigTypeSecp256k1 { // Secp Filecoin Message
+ tx = ethTxFromNativeMessage(ctx, smsg.VMMessage(), sa)
+ tx.Hash, err = ethtypes.EthHashFromCid(smsg.Cid())
+ if err != nil {
+ return tx, err
+ }
+ } else { // BLS Filecoin message
+ tx = ethTxFromNativeMessage(ctx, smsg.VMMessage(), sa)
+ tx.Hash, err = ethtypes.EthHashFromCid(smsg.Message.Cid())
+ if err != nil {
+ return tx, err
+ }
+ }
+
+ return tx, nil
+}
+
+// ethTxFromNativeMessage does NOT populate:
+// - BlockHash
+// - BlockNumber
+// - TransactionIndex
+// - Hash
+func ethTxFromNativeMessage(ctx context.Context, msg *types.Message, sa StateAPI) ethtypes.EthTx {
+ // We don't care if we error here, conversion is best effort for non-eth transactions
+ from, _ := lookupEthAddress(ctx, msg.From, sa)
+ to, _ := lookupEthAddress(ctx, msg.To, sa)
+ return ethtypes.EthTx{
+ To: &to,
+ From: from,
+ Nonce: ethtypes.EthUint64(msg.Nonce),
+ ChainID: ethtypes.EthUint64(build.Eip155ChainId),
+ Value: ethtypes.EthBigInt(msg.Value),
+ Type: ethtypes.Eip1559TxType,
+ Gas: ethtypes.EthUint64(msg.GasLimit),
+ MaxFeePerGas: ethtypes.EthBigInt(msg.GasFeeCap),
+ MaxPriorityFeePerGas: ethtypes.EthBigInt(msg.GasPremium),
+ AccessList: []ethtypes.EthHash{},
+ }
+}
+
+func getSignedMessage(ctx context.Context, cs *store.ChainStore, msgCid cid.Cid) (*types.SignedMessage, error) {
+ smsg, err := cs.GetSignedMessage(ctx, msgCid)
+ if err != nil {
+ // We couldn't find the signed message, it might be a BLS message, so search for a regular message.
+ msg, err := cs.GetMessage(ctx, msgCid)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to find msg %s: %w", msgCid, err)
+ }
+ smsg = &types.SignedMessage{
+ Message: *msg,
+ Signature: crypto.Signature{
+ Type: crypto.SigTypeBLS,
+ },
+ }
+ }
+
+ return smsg, nil
+}
+
+// newEthTxFromMessageLookup creates an ethereum transaction from filecoin message lookup. If a negative txIdx is passed
+// into the function, it looks up the transaction index of the message in the tipset, otherwise it uses the txIdx passed into the
+// function
+func newEthTxFromMessageLookup(ctx context.Context, msgLookup *api.MsgLookup, txIdx int, cs *store.ChainStore, sa StateAPI) (ethtypes.EthTx, error) {
+ ts, err := cs.LoadTipSet(ctx, msgLookup.TipSet)
+ if err != nil {
+ return ethtypes.EthTx{}, err
+ }
+
+ // This tx is located in the parent tipset
+ parentTs, err := cs.LoadTipSet(ctx, ts.Parents())
+ if err != nil {
+ return ethtypes.EthTx{}, err
+ }
+
+ parentTsCid, err := parentTs.Key().Cid()
+ if err != nil {
+ return ethtypes.EthTx{}, err
+ }
+
+ // lookup the transactionIndex
+ if txIdx < 0 {
+ msgs, err := cs.MessagesForTipset(ctx, parentTs)
+ if err != nil {
+ return ethtypes.EthTx{}, err
+ }
+ for i, msg := range msgs {
+ if msg.Cid() == msgLookup.Message {
+ txIdx = i
+ break
+ }
+ }
+ if txIdx < 0 {
+ return ethtypes.EthTx{}, fmt.Errorf("cannot find the msg in the tipset")
+ }
+ }
+
+ blkHash, err := ethtypes.EthHashFromCid(parentTsCid)
+ if err != nil {
+ return ethtypes.EthTx{}, err
+ }
+
+ smsg, err := getSignedMessage(ctx, cs, msgLookup.Message)
+ if err != nil {
+ return ethtypes.EthTx{}, xerrors.Errorf("failed to get signed msg: %w", err)
+ }
+
+ tx, err := newEthTxFromSignedMessage(ctx, smsg, sa)
+ if err != nil {
+ return ethtypes.EthTx{}, err
+ }
+
+ var (
+ bn = ethtypes.EthUint64(parentTs.Height())
+ ti = ethtypes.EthUint64(txIdx)
+ )
+
+ tx.ChainID = ethtypes.EthUint64(build.Eip155ChainId)
+ tx.BlockHash = &blkHash
+ tx.BlockNumber = &bn
+ tx.TransactionIndex = &ti
+ return tx, nil
+}
+
+func newEthTxReceipt(ctx context.Context, tx ethtypes.EthTx, lookup *api.MsgLookup, events []types.Event, cs *store.ChainStore, sa StateAPI) (api.EthTxReceipt, error) {
+ var (
+ transactionIndex ethtypes.EthUint64
+ blockHash ethtypes.EthHash
+ blockNumber ethtypes.EthUint64
+ )
+
+ if tx.TransactionIndex != nil {
+ transactionIndex = *tx.TransactionIndex
+ }
+ if tx.BlockHash != nil {
+ blockHash = *tx.BlockHash
+ }
+ if tx.BlockNumber != nil {
+ blockNumber = *tx.BlockNumber
+ }
+
+ receipt := api.EthTxReceipt{
+ TransactionHash: tx.Hash,
+ From: tx.From,
+ To: tx.To,
+ TransactionIndex: transactionIndex,
+ BlockHash: blockHash,
+ BlockNumber: blockNumber,
+ Type: ethtypes.EthUint64(2),
+ Logs: []ethtypes.EthLog{}, // empty log array is compulsory when no logs, or libraries like ethers.js break
+ LogsBloom: ethtypes.EmptyEthBloom[:],
+ }
+
+ if lookup.Receipt.ExitCode.IsSuccess() {
+ receipt.Status = 1
+ } else {
+ receipt.Status = 0
+ }
+
+ receipt.GasUsed = ethtypes.EthUint64(lookup.Receipt.GasUsed)
+
+ // TODO: handle CumulativeGasUsed
+ receipt.CumulativeGasUsed = ethtypes.EmptyEthInt
+
+ // TODO: avoid loading the tipset twice (once here, once when we convert the message to a txn)
+ ts, err := cs.GetTipSetFromKey(ctx, lookup.TipSet)
+ if err != nil {
+ return api.EthTxReceipt{}, xerrors.Errorf("failed to lookup tipset %s when constructing the eth txn receipt: %w", lookup.TipSet, err)
+ }
+
+ // The tx is located in the parent tipset
+ parentTs, err := cs.LoadTipSet(ctx, ts.Parents())
+ if err != nil {
+ return api.EthTxReceipt{}, xerrors.Errorf("failed to lookup tipset %s when constructing the eth txn receipt: %w", ts.Parents(), err)
+ }
+
+ baseFee := parentTs.Blocks()[0].ParentBaseFee
+ gasOutputs := vm.ComputeGasOutputs(lookup.Receipt.GasUsed, int64(tx.Gas), baseFee, big.Int(tx.MaxFeePerGas), big.Int(tx.MaxPriorityFeePerGas), true)
+ totalSpent := big.Sum(gasOutputs.BaseFeeBurn, gasOutputs.MinerTip, gasOutputs.OverEstimationBurn)
+
+ effectiveGasPrice := big.Zero()
+ if lookup.Receipt.GasUsed > 0 {
+ effectiveGasPrice = big.Div(totalSpent, big.NewInt(lookup.Receipt.GasUsed))
+ }
+ receipt.EffectiveGasPrice = ethtypes.EthBigInt(effectiveGasPrice)
+
+ if receipt.To == nil && lookup.Receipt.ExitCode.IsSuccess() {
+ // Create and Create2 return the same things.
+ var ret eam.CreateExternalReturn
+ if err := ret.UnmarshalCBOR(bytes.NewReader(lookup.Receipt.Return)); err != nil {
+ return api.EthTxReceipt{}, xerrors.Errorf("failed to parse contract creation result: %w", err)
+ }
+ addr := ethtypes.EthAddress(ret.EthAddress)
+ receipt.ContractAddress = &addr
+ }
+
+ if len(events) > 0 {
+ receipt.Logs = make([]ethtypes.EthLog, 0, len(events))
+ for i, evt := range events {
+ l := ethtypes.EthLog{
+ Removed: false,
+ LogIndex: ethtypes.EthUint64(i),
+ TransactionHash: tx.Hash,
+ TransactionIndex: transactionIndex,
+ BlockHash: blockHash,
+ BlockNumber: blockNumber,
+ }
+
+ data, topics, ok := ethLogFromEvent(evt.Entries)
+ if !ok {
+ // not an eth event.
+ continue
+ }
+ for _, topic := range topics {
+ ethtypes.EthBloomSet(receipt.LogsBloom, topic[:])
+ }
+ l.Data = data
+ l.Topics = topics
+
+ addr, err := address.NewIDAddress(uint64(evt.Emitter))
+ if err != nil {
+ return api.EthTxReceipt{}, xerrors.Errorf("failed to create ID address: %w", err)
+ }
+
+ l.Address, err = lookupEthAddress(ctx, addr, sa)
+ if err != nil {
+ return api.EthTxReceipt{}, xerrors.Errorf("failed to resolve Ethereum address: %w", err)
+ }
+
+ ethtypes.EthBloomSet(receipt.LogsBloom, l.Address[:])
+ receipt.Logs = append(receipt.Logs, l)
+ }
+ }
+
+ return receipt, nil
+}
diff --git a/node/impl/full/state.go b/node/impl/full/state.go
index b392083e1..0e92c8e5b 100644
--- a/node/impl/full/state.go
+++ b/node/impl/full/state.go
@@ -1887,6 +1887,7 @@ func (a *StateAPI) StateGetNetworkParams(ctx context.Context) (*api.NetworkParam
ConsensusMinerMinPower: build.ConsensusMinerMinPower,
SupportedProofTypes: build.SupportedProofTypes,
PreCommitChallengeDelay: build.PreCommitChallengeDelay,
+ Eip155ChainID: build.Eip155ChainId,
ForkUpgradeParams: api.ForkUpgradeParams{
UpgradeSmokeHeight: build.UpgradeSmokeHeight,
UpgradeBreezeHeight: build.UpgradeBreezeHeight,
diff --git a/node/impl/full/txhashmanager.go b/node/impl/full/txhashmanager.go
new file mode 100644
index 000000000..6757cc6dd
--- /dev/null
+++ b/node/impl/full/txhashmanager.go
@@ -0,0 +1,129 @@
+package full
+
+import (
+ "context"
+ "time"
+
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/crypto"
+
+ "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/ethhashlookup"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+type EthTxHashManager struct {
+ StateAPI StateAPI
+ TransactionHashLookup *ethhashlookup.EthTxHashLookup
+}
+
+func (m *EthTxHashManager) Revert(ctx context.Context, from, to *types.TipSet) error {
+ return nil
+}
+
+func (m *EthTxHashManager) PopulateExistingMappings(ctx context.Context, minHeight abi.ChainEpoch) error {
+ if minHeight < build.UpgradeHyggeHeight {
+ minHeight = build.UpgradeHyggeHeight
+ }
+
+ ts := m.StateAPI.Chain.GetHeaviestTipSet()
+ for ts.Height() > minHeight {
+ for _, block := range ts.Blocks() {
+ msgs, err := m.StateAPI.Chain.SecpkMessagesForBlock(ctx, block)
+ if err != nil {
+ // If we can't find the messages, we've either imported from snapshot or pruned the store
+ log.Debug("exiting message mapping population at epoch ", ts.Height())
+ return nil
+ }
+
+ for _, msg := range msgs {
+ m.ProcessSignedMessage(ctx, msg)
+ }
+ }
+
+ var err error
+ ts, err = m.StateAPI.Chain.GetTipSetFromKey(ctx, ts.Parents())
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *EthTxHashManager) Apply(ctx context.Context, from, to *types.TipSet) error {
+ for _, blk := range to.Blocks() {
+ _, smsgs, err := m.StateAPI.Chain.MessagesForBlock(ctx, blk)
+ if err != nil {
+ return err
+ }
+
+ for _, smsg := range smsgs {
+ if smsg.Signature.Type != crypto.SigTypeDelegated {
+ continue
+ }
+
+ hash, err := ethTxHashFromSignedMessage(ctx, smsg, m.StateAPI)
+ if err != nil {
+ return err
+ }
+
+ err = m.TransactionHashLookup.UpsertHash(hash, smsg.Cid())
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (m *EthTxHashManager) ProcessSignedMessage(ctx context.Context, msg *types.SignedMessage) {
+ if msg.Signature.Type != crypto.SigTypeDelegated {
+ return
+ }
+
+ ethTx, err := newEthTxFromSignedMessage(ctx, msg, m.StateAPI)
+ if err != nil {
+ log.Errorf("error converting filecoin message to eth tx: %s", err)
+ return
+ }
+
+ err = m.TransactionHashLookup.UpsertHash(ethTx.Hash, msg.Cid())
+ if err != nil {
+ log.Errorf("error inserting tx mapping to db: %s", err)
+ return
+ }
+}
+
+func WaitForMpoolUpdates(ctx context.Context, ch <-chan api.MpoolUpdate, manager *EthTxHashManager) {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case u := <-ch:
+ if u.Type != api.MpoolAdd {
+ continue
+ }
+
+ manager.ProcessSignedMessage(ctx, u.Message)
+ }
+ }
+}
+
+func EthTxHashGC(ctx context.Context, retentionDays int, manager *EthTxHashManager) {
+ if retentionDays == 0 {
+ return
+ }
+
+ gcPeriod := 1 * time.Hour
+ for {
+ entriesDeleted, err := manager.TransactionHashLookup.DeleteEntriesOlderThan(retentionDays)
+ if err != nil {
+ log.Errorf("error garbage collecting eth transaction hash database: %s", err)
+ }
+ log.Info("garbage collection run on eth transaction hash lookup database. %d entries deleted", entriesDeleted)
+ time.Sleep(gcPeriod)
+ }
+}
diff --git a/node/impl/storminer.go b/node/impl/storminer.go
index 6f460eccd..6fd6045b4 100644
--- a/node/impl/storminer.go
+++ b/node/impl/storminer.go
@@ -278,7 +278,16 @@ func (sm *StorageMinerAPI) SectorUnseal(ctx context.Context, sectorNum abi.Secto
ProofType: status.SealProof,
}
- return sm.StorageMgr.SectorsUnsealPiece(ctx, sector, storiface.UnpaddedByteIndex(0), abi.UnpaddedPieceSize(0), status.Ticket.Value, status.CommD)
+ bgCtx := context.Background()
+
+ go func() {
+ err := sm.StorageMgr.SectorsUnsealPiece(bgCtx, sector, storiface.UnpaddedByteIndex(0), abi.UnpaddedPieceSize(0), status.Ticket.Value, status.CommD)
+ if err != nil {
+ log.Errorf("unseal for sector %d failed: %+v", sectorNum, err)
+ }
+ }()
+
+ return nil
}
// List all staged sectors
diff --git a/node/modules/lp2p/pubsub.go b/node/modules/lp2p/pubsub.go
index 33a03f844..2b3efce6c 100644
--- a/node/modules/lp2p/pubsub.go
+++ b/node/modules/lp2p/pubsub.go
@@ -559,6 +559,7 @@ func (trw *tracerWrapper) Trace(evt *pubsub_pb.TraceEvent) {
}
case pubsub_pb.TraceEvent_PRUNE:
+ stats.Record(context.TODO(), metrics.PubsubPruneMessage.M(1))
if trw.traceMessage(evt.GetPrune().GetTopic()) {
if trw.lp2pTracer != nil {
trw.lp2pTracer.Trace(evt)
diff --git a/node/modules/services.go b/node/modules/services.go
index 9acebd071..f3dd443d9 100644
--- a/node/modules/services.go
+++ b/node/modules/services.go
@@ -11,8 +11,8 @@ import (
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/event"
"github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
- "github.com/libp2p/go-libp2p/core/protocol"
"github.com/libp2p/go-libp2p/p2p/host/eventbus"
"go.uber.org/fx"
"golang.org/x/xerrors"
@@ -66,18 +66,22 @@ func RunHello(mctx helpers.MetricsCtx, lc fx.Lifecycle, h host.Host, svc *hello.
ctx := helpers.LifecycleCtx(mctx, lc)
go func() {
+ // We want to get information on connected peers, we don't want to trigger new connections.
+ ctx := network.WithNoDial(ctx, "filecoin hello")
for evt := range sub.Out() {
pic := evt.(event.EvtPeerIdentificationCompleted)
+ // We just finished identifying the peer, that means we should know what
+ // protocols it speaks. Check if it speeks the Filecoin hello protocol
+ // before continuing.
+ if p, _ := h.Peerstore().FirstSupportedProtocol(pic.Peer, hello.ProtocolID); p != hello.ProtocolID {
+ continue
+ }
+
go func() {
if err := svc.SayHello(ctx, pic.Peer); err != nil {
protos, _ := h.Peerstore().GetProtocols(pic.Peer)
agent, _ := h.Peerstore().Get(pic.Peer, "AgentVersion")
- if protosContains(protos, hello.ProtocolID) {
- log.Warnw("failed to say hello", "error", err, "peer", pic.Peer, "supported", protos, "agent", agent)
- } else {
- log.Debugw("failed to say hello", "error", err, "peer", pic.Peer, "supported", protos, "agent", agent)
- }
- return
+ log.Warnw("failed to say hello", "error", err, "peer", pic.Peer, "supported", protos, "agent", agent)
}
}()
}
@@ -85,15 +89,6 @@ func RunHello(mctx helpers.MetricsCtx, lc fx.Lifecycle, h host.Host, svc *hello.
return nil
}
-func protosContains(protos []protocol.ID, search protocol.ID) bool {
- for _, p := range protos {
- if p == search {
- return true
- }
- }
- return false
-}
-
func RunPeerMgr(mctx helpers.MetricsCtx, lc fx.Lifecycle, pmgr *peermgr.PeerMgr) {
go pmgr.Run(helpers.LifecycleCtx(mctx, lc))
}
@@ -265,13 +260,9 @@ func RandomSchedule(lc fx.Lifecycle, mctx helpers.MetricsCtx, p RandomBeaconPara
return nil, err
}
- shd := beacon.Schedule{}
- for _, dc := range p.DrandConfig {
- bc, err := drand.NewDrandBeacon(gen.Timestamp, build.BlockDelaySecs, p.PubSub, dc.Config)
- if err != nil {
- return nil, xerrors.Errorf("creating drand beacon: %w", err)
- }
- shd = append(shd, beacon.BeaconPoint{Start: dc.Start, Beacon: bc})
+ shd, err := drand.BeaconScheduleFromDrandSchedule(p.DrandConfig, gen.Timestamp, p.PubSub)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to create beacon schedule: %w", err)
}
return shd, nil
diff --git a/scripts/snapshot-summary.py b/scripts/snapshot-summary.py
new file mode 100644
index 000000000..f37623cd2
--- /dev/null
+++ b/scripts/snapshot-summary.py
@@ -0,0 +1,30 @@
+import plotly.express as px
+import sys, json
+import pathlib
+
+snapshot_data = json.load(sys.stdin)
+
+# Possible extensions:
+# 1. parameterize to use block count as value instead of byte size
+# 2. parameterize on different types of px chart types
+# 3. parameterize on output port so we can serve this from infra
+
+parents = []
+names = []
+values = []
+
+for key in snapshot_data:
+ path = pathlib.Path(key)
+ name = key
+ parent = str(path.parent)
+ if key == '/':
+ parent = ''
+ stats = snapshot_data[key]
+ parents.append(parent)
+ names.append(name)
+ values.append(stats['Size'])
+
+data = dict(names=names, parents=parents, values=values)
+fig = px.treemap(data, names='names', parents='parents', values='values')
+fig.show()
+
diff --git a/storage/paths/fetch.go b/storage/paths/fetch.go
index 2d87380bd..6b87c0dd9 100644
--- a/storage/paths/fetch.go
+++ b/storage/paths/fetch.go
@@ -91,7 +91,7 @@ func FetchWithTemp(ctx context.Context, urls []string, dest string, header http.
continue
}
- if err := move(tempDest, dest); err != nil {
+ if err := Move(tempDest, dest); err != nil {
return "", xerrors.Errorf("fetch move error %s -> %s: %w", tempDest, dest, err)
}
diff --git a/storage/paths/index.go b/storage/paths/index.go
index ce11eec9c..8d70fce2a 100644
--- a/storage/paths/index.go
+++ b/storage/paths/index.go
@@ -366,7 +366,7 @@ loop:
if !sid.primary && primary {
sid.primary = true
} else {
- log.Warnf("sector %v redeclared in %s", s, storageID)
+ log.Debugf("sector %v redeclared in %s", s, storageID)
}
continue loop
}
diff --git a/storage/paths/local.go b/storage/paths/local.go
index a866f5bbe..577d4dbe0 100644
--- a/storage/paths/local.go
+++ b/storage/paths/local.go
@@ -548,7 +548,7 @@ func (st *Local) AcquireSector(ctx context.Context, sid storiface.SectorRef, exi
}
if best == "" {
- return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.Errorf("couldn't find a suitable path for a sector")
+ return storiface.SectorPaths{}, storiface.SectorPaths{}, storiface.Err(storiface.ErrTempAllocateSpace, xerrors.Errorf("couldn't find a suitable path for a sector"))
}
storiface.SetPathByType(&out, fileType, best)
@@ -720,7 +720,7 @@ func (st *Local) MoveStorage(ctx context.Context, s storiface.SectorRef, types s
return xerrors.Errorf("dropping source sector from index: %w", err)
}
- if err := move(storiface.PathByType(src, fileType), storiface.PathByType(dest, fileType)); err != nil {
+ if err := Move(storiface.PathByType(src, fileType), storiface.PathByType(dest, fileType)); err != nil {
// TODO: attempt some recovery (check if src is still there, re-declare)
return xerrors.Errorf("moving sector %v(%d): %w", s, fileType, err)
}
diff --git a/storage/paths/remote.go b/storage/paths/remote.go
index ab23e9789..0b7563bb2 100644
--- a/storage/paths/remote.go
+++ b/storage/paths/remote.go
@@ -249,7 +249,7 @@ func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, fileType
continue
}
- if err := move(tempDest, dest); err != nil {
+ if err := Move(tempDest, dest); err != nil {
return "", xerrors.Errorf("fetch move error (storage %s) %s -> %s: %w", info.ID, tempDest, dest, err)
}
diff --git a/storage/paths/util_unix.go b/storage/paths/util_unix.go
index f691bad09..8796e601a 100644
--- a/storage/paths/util_unix.go
+++ b/storage/paths/util_unix.go
@@ -12,7 +12,7 @@ import (
"golang.org/x/xerrors"
)
-func move(from, to string) error {
+func Move(from, to string) error {
from, err := homedir.Expand(from)
if err != nil {
return xerrors.Errorf("move: expanding from: %w", err)
diff --git a/storage/pipeline/cbor_gen.go b/storage/pipeline/cbor_gen.go
index 7ece009cc..c832f8a14 100644
--- a/storage/pipeline/cbor_gen.go
+++ b/storage/pipeline/cbor_gen.go
@@ -31,7 +31,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error {
cw := cbg.NewCborWriter(w)
- if _, err := cw.Write([]byte{184, 38}); err != nil {
+ if _, err := cw.Write([]byte{184, 39}); err != nil {
return err
}
@@ -565,6 +565,22 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error {
}
}
+ // t.PreCommit1Fails (uint64) (uint64)
+ if len("PreCommit1Fails") > cbg.MaxLength {
+ return xerrors.Errorf("Value in field \"PreCommit1Fails\" was too long")
+ }
+
+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PreCommit1Fails"))); err != nil {
+ return err
+ }
+ if _, err := cw.WriteString(string("PreCommit1Fails")); err != nil {
+ return err
+ }
+
+ if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PreCommit1Fails)); err != nil {
+ return err
+ }
+
// t.PreCommit2Fails (uint64) (uint64)
if len("PreCommit2Fails") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"PreCommit2Fails\" was too long")
@@ -1429,6 +1445,21 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) {
t.UpdateUnsealed = &c
}
+ }
+ // t.PreCommit1Fails (uint64) (uint64)
+ case "PreCommit1Fails":
+
+ {
+
+ maj, extra, err = cr.ReadHeader()
+ if err != nil {
+ return err
+ }
+ if maj != cbg.MajUnsignedInt {
+ return fmt.Errorf("wrong type for uint64 field")
+ }
+ t.PreCommit1Fails = uint64(extra)
+
}
// t.PreCommit2Fails (uint64) (uint64)
case "PreCommit2Fails":
diff --git a/storage/pipeline/fsm_events.go b/storage/pipeline/fsm_events.go
index 122691ca3..a798a884b 100644
--- a/storage/pipeline/fsm_events.go
+++ b/storage/pipeline/fsm_events.go
@@ -182,6 +182,8 @@ func (evt SectorSealPreCommit1Failed) FormatError(xerrors.Printer) (next error)
func (evt SectorSealPreCommit1Failed) apply(si *SectorInfo) {
si.InvalidProofs = 0 // reset counter
si.PreCommit2Fails = 0
+
+ si.PreCommit1Fails++
}
type SectorSealPreCommit2Failed struct{ error }
diff --git a/storage/pipeline/fsm_test.go b/storage/pipeline/fsm_test.go
index 4dfc8548d..7d7201953 100644
--- a/storage/pipeline/fsm_test.go
+++ b/storage/pipeline/fsm_test.go
@@ -1,14 +1,18 @@
package sealing
import (
+ "context"
"testing"
logging "github.com/ipfs/go-log/v2"
"github.com/stretchr/testify/require"
+ "golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-statemachine"
+
+ "github.com/filecoin-project/lotus/storage/sealer/storiface"
)
func init() {
@@ -451,3 +455,24 @@ func TestCreationTimeCleared(t *testing.T) {
require.NotEqual(t, int64(0), m.state.CreationTime)
}
+
+func TestRetrySoftErr(t *testing.T) {
+ i := 0
+
+ tf := func() error {
+ i++
+ switch i {
+ case 1:
+ return storiface.Err(storiface.ErrTempAllocateSpace, xerrors.New("foo"))
+ case 2:
+ return nil
+ default:
+ t.Fatalf("what")
+ return xerrors.Errorf("this error didn't ever happen, and will never happen")
+ }
+ }
+
+ err := retrySoftErr(context.Background(), tf)
+ require.NoError(t, err)
+ require.Equal(t, 2, i)
+}
diff --git a/storage/pipeline/receive.go b/storage/pipeline/receive.go
index b1b0ce99f..8427eba54 100644
--- a/storage/pipeline/receive.go
+++ b/storage/pipeline/receive.go
@@ -123,7 +123,7 @@ func (m *Sealing) checkSectorMeta(ctx context.Context, meta api.RemoteSectorMeta
if err := m.maddr.MarshalCBOR(maddrBuf); err != nil {
return SectorInfo{}, xerrors.Errorf("marshal miner address for seed check: %w", err)
}
- rand, err := m.Api.StateGetRandomnessFromTickets(ctx, crypto.DomainSeparationTag_InteractiveSealChallengeSeed, meta.SeedEpoch, maddrBuf.Bytes(), ts.Key())
+ rand, err := m.Api.StateGetRandomnessFromBeacon(ctx, crypto.DomainSeparationTag_InteractiveSealChallengeSeed, meta.SeedEpoch, maddrBuf.Bytes(), ts.Key())
if err != nil {
return SectorInfo{}, xerrors.Errorf("generating check seed: %w", err)
}
diff --git a/storage/pipeline/states_failed.go b/storage/pipeline/states_failed.go
index cf9bd4d12..3323c4c9b 100644
--- a/storage/pipeline/states_failed.go
+++ b/storage/pipeline/states_failed.go
@@ -54,7 +54,13 @@ func (m *Sealing) checkPreCommitted(ctx statemachine.Context, sector SectorInfo)
return info, true
}
+var MaxPreCommit1Retries = uint64(3)
+
func (m *Sealing) handleSealPrecommit1Failed(ctx statemachine.Context, sector SectorInfo) error {
+ if sector.PreCommit1Fails > MaxPreCommit1Retries {
+ return ctx.Send(SectorRemove{})
+ }
+
if err := failedCooldown(ctx, sector); err != nil {
return err
}
@@ -301,8 +307,21 @@ func (m *Sealing) handleCommitFailed(ctx statemachine.Context, sector SectorInfo
switch mw.Receipt.ExitCode {
case exitcode.Ok:
- // API error in CcommitWait
- return ctx.Send(SectorRetryCommitWait{})
+ si, err := m.Api.StateSectorGetInfo(ctx.Context(), m.maddr, sector.SectorNumber, mw.TipSet)
+ if err != nil {
+ // API error
+ if err := failedCooldown(ctx, sector); err != nil {
+ return err
+ }
+
+ return ctx.Send(SectorRetryCommitWait{})
+ }
+ if si != nil {
+ // API error in CommitWait?
+ return ctx.Send(SectorRetryCommitWait{})
+ }
+ // if si == nil, something else went wrong; Likely expired deals, we'll
+ // find out in checkCommit
case exitcode.SysErrOutOfGas:
// API error in CommitWait AND gas estimator guessed a wrong number in SubmitCommit
return ctx.Send(SectorRetrySubmitCommit{})
diff --git a/storage/pipeline/states_sealing.go b/storage/pipeline/states_sealing.go
index 3210109cc..5c91161ef 100644
--- a/storage/pipeline/states_sealing.go
+++ b/storage/pipeline/states_sealing.go
@@ -4,8 +4,10 @@ import (
"bytes"
"context"
"encoding/json"
+ "errors"
"io"
"net/http"
+ "time"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
@@ -213,6 +215,42 @@ func (m *Sealing) handleGetTicket(ctx statemachine.Context, sector SectorInfo) e
})
}
+var SoftErrRetryWait = 5 * time.Second
+
+func retrySoftErr(ctx context.Context, cb func() error) error {
+ for {
+ err := cb()
+ if err == nil {
+ return nil
+ }
+
+ var cerr storiface.WorkError
+
+ if errors.As(err, &cerr) {
+ switch cerr.ErrCode() {
+ case storiface.ErrTempWorkerRestart:
+ fallthrough
+ case storiface.ErrTempAllocateSpace:
+ // retry
+ log.Errorw("retrying soft error", "err", err, "code", cerr.ErrCode())
+ default:
+ // non-temp error
+ return err
+ }
+
+ // check if the context got cancelled early
+ if ctx.Err() != nil {
+ return ctx.Err()
+ }
+
+ // retry
+ time.Sleep(SoftErrRetryWait)
+ } else {
+ return err
+ }
+ }
+}
+
func (m *Sealing) handlePreCommit1(ctx statemachine.Context, sector SectorInfo) error {
if err := checkPieces(ctx.Context(), m.maddr, sector.SectorNumber, sector.Pieces, m.Api, false); err != nil { // Sanity check state
switch err.(type) {
@@ -269,7 +307,11 @@ func (m *Sealing) handlePreCommit1(ctx statemachine.Context, sector SectorInfo)
}
}
- pc1o, err := m.sealer.SealPreCommit1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.TicketValue, sector.pieceInfos())
+ var pc1o storiface.PreCommit1Out
+ err = retrySoftErr(ctx.Context(), func() (err error) {
+ pc1o, err = m.sealer.SealPreCommit1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.TicketValue, sector.pieceInfos())
+ return err
+ })
if err != nil {
return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("seal pre commit(1) failed: %w", err)})
}
@@ -280,7 +322,12 @@ func (m *Sealing) handlePreCommit1(ctx statemachine.Context, sector SectorInfo)
}
func (m *Sealing) handlePreCommit2(ctx statemachine.Context, sector SectorInfo) error {
- cids, err := m.sealer.SealPreCommit2(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.PreCommit1Out)
+ var cids storiface.SectorCids
+
+ err := retrySoftErr(ctx.Context(), func() (err error) {
+ cids, err = m.sealer.SealPreCommit2(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.PreCommit1Out)
+ return err
+ })
if err != nil {
return ctx.Send(SectorSealPreCommit2Failed{xerrors.Errorf("seal pre commit(2) failed: %w", err)})
}
diff --git a/storage/pipeline/types.go b/storage/pipeline/types.go
index 6329b5666..e752eb2b9 100644
--- a/storage/pipeline/types.go
+++ b/storage/pipeline/types.go
@@ -56,6 +56,8 @@ type SectorInfo struct {
TicketEpoch abi.ChainEpoch
PreCommit1Out storiface.PreCommit1Out
+ PreCommit1Fails uint64
+
// PreCommit2
CommD *cid.Cid
CommR *cid.Cid // SectorKey
diff --git a/storage/sealer/ffiwrapper/basicfs/fs.go b/storage/sealer/ffiwrapper/basicfs/fs.go
index 7a9f70d59..4fd8e271f 100644
--- a/storage/sealer/ffiwrapper/basicfs/fs.go
+++ b/storage/sealer/ffiwrapper/basicfs/fs.go
@@ -89,3 +89,7 @@ func (b *Provider) AcquireSector(ctx context.Context, id storiface.SectorRef, ex
return out, done, nil
}
+
+func (b *Provider) AcquireSectorCopy(ctx context.Context, id storiface.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) {
+ return b.AcquireSector(ctx, id, existing, allocate, ptype)
+}
diff --git a/storage/sealer/ffiwrapper/sealer_cgo.go b/storage/sealer/ffiwrapper/sealer_cgo.go
index 85c80a9c1..fec12f231 100644
--- a/storage/sealer/ffiwrapper/sealer_cgo.go
+++ b/storage/sealer/ffiwrapper/sealer_cgo.go
@@ -10,6 +10,7 @@ import (
"crypto/rand"
"encoding/base64"
"encoding/json"
+ "errors"
"io"
"math/bits"
"os"
@@ -31,9 +32,9 @@ import (
"github.com/filecoin-project/lotus/lib/nullreader"
spaths "github.com/filecoin-project/lotus/storage/paths"
- nr "github.com/filecoin-project/lotus/storage/pipeline/lib/nullreader"
"github.com/filecoin-project/lotus/storage/sealer/fr32"
"github.com/filecoin-project/lotus/storage/sealer/partialfile"
+ "github.com/filecoin-project/lotus/storage/sealer/proofpaths"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
@@ -192,7 +193,7 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector storiface.SectorRef, exis
defer func() {
closer, ok := origPieceData.(io.Closer)
if !ok {
- log.Warnf("AddPiece: cannot close pieceData reader %T because it is not an io.Closer", origPieceData)
+ log.Debugf("AddPiece: cannot close pieceData reader %T because it is not an io.Closer", origPieceData)
return
}
if err := closer.Close(); err != nil {
@@ -403,92 +404,190 @@ func (sb *Sealer) pieceCid(spt abi.RegisteredSealProof, in []byte) (cid.Cid, err
return pieceCID, werr()
}
-func (sb *Sealer) tryDecodeUpdatedReplica(ctx context.Context, sector storiface.SectorRef, commD cid.Cid, unsealedPath string, randomness abi.SealRandomness) (bool, error) {
- replicaPath, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTNone, storiface.PathSealing)
+func (sb *Sealer) acquireUpdatePath(ctx context.Context, sector storiface.SectorRef) (string, func(), error) {
+ // copy so that the sector doesn't get removed from a long-term storage path
+ replicaPath, releaseSector, err := sb.sectors.AcquireSectorCopy(ctx, sector, storiface.FTUpdate, storiface.FTNone, storiface.PathSealing)
if xerrors.Is(err, storiface.ErrSectorNotFound) {
- return false, nil
+ return "", releaseSector, nil
} else if err != nil {
- return false, xerrors.Errorf("reading updated replica: %w", err)
+ return "", releaseSector, xerrors.Errorf("reading updated replica: %w", err)
}
- defer done()
- sealedPaths, done2, err := sb.AcquireSectorKeyOrRegenerate(ctx, sector, randomness)
+ return replicaPath.Update, releaseSector, nil
+}
+
+func (sb *Sealer) decodeUpdatedReplica(ctx context.Context, sector storiface.SectorRef, commD cid.Cid, updatePath, unsealedPath string, randomness abi.SealRandomness) error {
+ keyPaths, done2, err := sb.acquireSectorKeyOrRegenerate(ctx, sector, randomness)
if err != nil {
- return false, xerrors.Errorf("acquiring sealed sector: %w", err)
+ return xerrors.Errorf("acquiring sealed sector: %w", err)
}
defer done2()
// Sector data stored in replica update
updateProof, err := sector.ProofType.RegisteredUpdateProof()
if err != nil {
- return false, err
+ return err
}
- return true, ffi.SectorUpdate.DecodeFrom(updateProof, unsealedPath, replicaPath.Update, sealedPaths.Sealed, sealedPaths.Cache, commD)
-}
-
-func (sb *Sealer) AcquireSectorKeyOrRegenerate(ctx context.Context, sector storiface.SectorRef, randomness abi.SealRandomness) (storiface.SectorPaths, func(), error) {
- paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathStorage)
- if err == nil {
- return paths, done, err
- } else if !xerrors.Is(err, storiface.ErrSectorNotFound) {
- return paths, done, xerrors.Errorf("reading sector key: %w", err)
+ if err := ffi.SectorUpdate.DecodeFrom(updateProof, unsealedPath, updatePath, keyPaths.Sealed, keyPaths.Cache, commD); err != nil {
+ return xerrors.Errorf("decoding unsealed sector data: %w", err)
}
- // Sector key can't be found, so let's regenerate it
- sectorSize, err := sector.ProofType.SectorSize()
- if err != nil {
- return paths, done, xerrors.Errorf("retrieving sector size: %w", err)
- }
- paddedSize := abi.PaddedPieceSize(sectorSize)
-
- _, err = sb.AddPiece(ctx, sector, nil, paddedSize.Unpadded(), nr.NewNullReader(paddedSize.Unpadded()))
- if err != nil {
- return paths, done, xerrors.Errorf("recomputing empty data: %w", err)
- }
-
- err = sb.RegenerateSectorKey(ctx, sector, randomness, []abi.PieceInfo{{PieceCID: zerocomm.ZeroPieceCommitment(paddedSize.Unpadded()), Size: paddedSize}})
- if err != nil {
- return paths, done, xerrors.Errorf("during pc1: %w", err)
- }
-
- // Sector key should exist now, let's grab the paths
- return sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathStorage)
-}
-
-func (sb *Sealer) UnsealPiece(ctx context.Context, sector storiface.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error {
ssize, err := sector.ProofType.SectorSize()
if err != nil {
return err
}
maxPieceSize := abi.PaddedPieceSize(ssize)
- // try finding existing
+ pf, err := partialfile.OpenPartialFile(maxPieceSize, unsealedPath)
+ if err != nil {
+ return xerrors.Errorf("opening partial file: %w", err)
+ }
+
+ if err := pf.MarkAllocated(0, maxPieceSize); err != nil {
+ return xerrors.Errorf("marking range allocated: %w", err)
+ }
+
+ if err := pf.Close(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (sb *Sealer) acquireSectorKeyOrRegenerate(ctx context.Context, sector storiface.SectorRef, randomness abi.SealRandomness) (storiface.SectorPaths, func(), error) {
+ // copy so that the files aren't removed from long-term storage
+ paths, done, err := sb.sectors.AcquireSectorCopy(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathSealing)
+ if err == nil {
+ return paths, done, err
+ } else if !xerrors.Is(err, storiface.ErrSectorNotFound) {
+ return paths, done, xerrors.Errorf("reading sector key: %w", err)
+ }
+
+ sectorSize, err := sector.ProofType.SectorSize()
+ if err != nil {
+ return storiface.SectorPaths{}, nil, xerrors.Errorf("retrieving sector size: %w", err)
+ }
+
+ err = sb.regenerateSectorKey(ctx, sector, randomness, zerocomm.ZeroPieceCommitment(abi.PaddedPieceSize(sectorSize).Unpadded()))
+ if err != nil {
+ return storiface.SectorPaths{}, nil, xerrors.Errorf("regenerating sector key: %w", err)
+ }
+
+ // Sector key should exist now, let's grab the paths
+ return sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathSealing)
+}
+
+func (sb *Sealer) regenerateSectorKey(ctx context.Context, sector storiface.SectorRef, ticket abi.SealRandomness, keyDataCid cid.Cid) error {
+ paths, releaseSector, err := sb.sectors.AcquireSectorCopy(ctx, sector, storiface.FTCache, storiface.FTSealed, storiface.PathSealing)
+ if err != nil {
+ return xerrors.Errorf("acquiring sector paths: %w", err)
+ }
+ defer releaseSector()
+
+ // stat paths.Sealed, make sure it doesn't exist
+ _, err = os.Stat(paths.Sealed)
+ if err == nil {
+ return xerrors.Errorf("sealed file exists before regenerating sector key")
+ }
+ if !os.IsNotExist(err) {
+ return xerrors.Errorf("stat sealed path: %w", err)
+ }
+
+ // prepare SDR params
+ commp, err := commcid.CIDToDataCommitmentV1(keyDataCid)
+ if err != nil {
+ return xerrors.Errorf("computing commP: %w", err)
+ }
+
+ replicaID, err := sector.ProofType.ReplicaId(sector.ID.Miner, sector.ID.Number, ticket, commp)
+ if err != nil {
+ return xerrors.Errorf("computing replica id: %w", err)
+ }
+
+ // generate new sector key
+ err = ffi.GenerateSDR(
+ sector.ProofType,
+ paths.Cache,
+ replicaID,
+ )
+ if err != nil {
+ return xerrors.Errorf("presealing sector %d (%s): %w", sector.ID.Number, paths.Unsealed, err)
+ }
+
+ // move the last layer (sector key) to the sealed location
+ layerCount, err := proofpaths.SDRLayers(sector.ProofType)
+ if err != nil {
+ return xerrors.Errorf("getting SDR layer count: %w", err)
+ }
+
+ lastLayer := filepath.Join(paths.Cache, proofpaths.LayerFileName(layerCount))
+
+ sealedInCache := filepath.Join(paths.Cache, filepath.Base(paths.Sealed))
+ // rename last layer to sealed sector file name in the cache dir, which is
+ // almost guaranteed to happen on one filesystem
+ err = os.Rename(lastLayer, sealedInCache)
+ if err != nil {
+ return xerrors.Errorf("renaming last layer: %w", err)
+ }
+
+ err = spaths.Move(sealedInCache, paths.Sealed)
+ if err != nil {
+ return xerrors.Errorf("moving sector key: %w", err)
+ }
+
+ // remove other layer files
+ for i := 1; i < layerCount; i++ {
+ err = os.Remove(filepath.Join(paths.Cache, proofpaths.LayerFileName(i)))
+ if err != nil {
+ return xerrors.Errorf("removing layer file %d: %w", i, err)
+ }
+ }
+
+ return nil
+}
+
+func (sb *Sealer) UnsealPiece(ctx context.Context, sector storiface.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error {
+ // NOTE: This function will copy sealed/unsealed (and possible update) files
+ // into sealing storage. Those copies get cleaned up in LocalWorker.UnsealPiece
+ // after this call exists. The resulting unsealed file is going to be moved to
+ // long-term storage as well.
+
+ ssize, err := sector.ProofType.SectorSize()
+ if err != nil {
+ return err
+ }
+ maxPieceSize := abi.PaddedPieceSize(ssize)
+
+ // try finding existing (also move to a sealing path if it's not here)
unsealedPath, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, storiface.FTNone, storiface.PathSealing)
var pf *partialfile.PartialFile
switch {
case xerrors.Is(err, storiface.ErrSectorNotFound):
+ // allocate if doesn't exist
unsealedPath, done, err = sb.sectors.AcquireSector(ctx, sector, storiface.FTNone, storiface.FTUnsealed, storiface.PathSealing)
if err != nil {
return xerrors.Errorf("acquire unsealed sector path (allocate): %w", err)
}
- defer done()
-
- pf, err = partialfile.CreatePartialFile(maxPieceSize, unsealedPath.Unsealed)
- if err != nil {
- return xerrors.Errorf("create unsealed file: %w", err)
- }
-
case err == nil:
- defer done()
-
- pf, err = partialfile.OpenPartialFile(maxPieceSize, unsealedPath.Unsealed)
- if err != nil {
- return xerrors.Errorf("opening partial file: %w", err)
- }
+ // no-op
default:
return xerrors.Errorf("acquire unsealed sector path (existing): %w", err)
}
+
+ defer done()
+
+ pf, err = partialfile.OpenPartialFile(maxPieceSize, unsealedPath.Unsealed)
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ pf, err = partialfile.CreatePartialFile(maxPieceSize, unsealedPath.Unsealed)
+ if err != nil {
+ return xerrors.Errorf("creating partial file: %w", err)
+ }
+ } else {
+ return xerrors.Errorf("opening partial file: %w", err)
+ }
+ }
defer pf.Close() // nolint
allocated, err := pf.Allocated()
@@ -496,6 +595,8 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector storiface.SectorRef, o
return xerrors.Errorf("getting bitruns of allocated data: %w", err)
}
+ // figure out if there's anything that needs to be unsealed
+
toUnseal, err := computeUnsealRanges(allocated, offset, size)
if err != nil {
return xerrors.Errorf("computing unseal ranges: %w", err)
@@ -505,21 +606,36 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector storiface.SectorRef, o
return nil
}
+ // need to unseal
+
// If piece data stored in updated replica decode whole sector
- decoded, err := sb.tryDecodeUpdatedReplica(ctx, sector, commd, unsealedPath.Unsealed, randomness)
+ upd, updDone, err := sb.acquireUpdatePath(ctx, sector)
if err != nil {
- return xerrors.Errorf("decoding sector from replica: %w", err)
- }
- if decoded {
- return pf.MarkAllocated(0, maxPieceSize)
+ return xerrors.Errorf("acquiring update path: %w", err)
}
- // Piece data sealed in sector
- srcPaths, srcDone, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTCache|storiface.FTSealed, storiface.FTNone, storiface.PathSealing)
+ if upd != "" {
+ defer updDone()
+
+ // decodeUpdatedReplica mill modify the unsealed file
+ if err := pf.Close(); err != nil {
+ return err
+ }
+
+ err := sb.decodeUpdatedReplica(ctx, sector, commd, upd, unsealedPath.Unsealed, randomness)
+ if err != nil {
+ return xerrors.Errorf("decoding sector from replica: %w", err)
+ }
+ return nil
+ }
+
+ // Piece data non-upgrade sealed in sector
+ // (copy so that files stay in long-term storage)
+ srcPaths, releaseSector, err := sb.sectors.AcquireSectorCopy(ctx, sector, storiface.FTCache|storiface.FTSealed, storiface.FTNone, storiface.PathSealing)
if err != nil {
return xerrors.Errorf("acquire sealed sector paths: %w", err)
}
- defer srcDone()
+ defer releaseSector()
sealed, err := os.OpenFile(srcPaths.Sealed, os.O_RDONLY, 0644) // nolint:gosec
if err != nil {
@@ -687,51 +803,6 @@ func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector storif
return true, nil
}
-func (sb *Sealer) RegenerateSectorKey(ctx context.Context, sector storiface.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) error {
- paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed|storiface.FTCache, storiface.FTSealed, storiface.PathSealing)
- if err != nil {
- return xerrors.Errorf("acquiring sector paths: %w", err)
- }
- defer done()
-
- e, err := os.OpenFile(paths.Sealed, os.O_RDWR|os.O_CREATE, 0644) // nolint:gosec
- if err != nil {
- return xerrors.Errorf("ensuring sealed file exists: %w", err)
- }
- if err := e.Close(); err != nil {
- return err
- }
-
- var sum abi.UnpaddedPieceSize
- for _, piece := range pieces {
- sum += piece.Size.Unpadded()
- }
- ssize, err := sector.ProofType.SectorSize()
- if err != nil {
- return err
- }
- ussize := abi.PaddedPieceSize(ssize).Unpadded()
- if sum != ussize {
- return xerrors.Errorf("aggregated piece sizes don't match sector size: %d != %d (%d)", sum, ussize, int64(ussize-sum))
- }
-
- // TODO: context cancellation respect
- _, err = ffi.SealPreCommitPhase1(
- sector.ProofType,
- paths.Cache,
- paths.Unsealed,
- paths.Sealed,
- sector.ID.Number,
- sector.ID.Miner,
- ticket,
- pieces,
- )
- if err != nil {
- return xerrors.Errorf("presealing sector %d (%s): %w", sector.ID.Number, paths.Unsealed, err)
- }
- return nil
-}
-
func (sb *Sealer) SealPreCommit1(ctx context.Context, sector storiface.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storiface.PreCommit1Out, err error) {
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, storiface.FTSealed|storiface.FTCache, storiface.PathSealing)
if err != nil {
diff --git a/storage/sealer/ffiwrapper/sealer_test.go b/storage/sealer/ffiwrapper/sealer_test.go
index 98c562fd2..c0cdb77a9 100644
--- a/storage/sealer/ffiwrapper/sealer_test.go
+++ b/storage/sealer/ffiwrapper/sealer_test.go
@@ -5,6 +5,7 @@ import (
"context"
"fmt"
"io"
+ "io/fs"
"math/rand"
"os"
"path/filepath"
@@ -22,6 +23,7 @@ import (
ffi "github.com/filecoin-project/filecoin-ffi"
"github.com/filecoin-project/filecoin-ffi/cgo"
commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper"
+ commcid "github.com/filecoin-project/go-fil-commcid"
"github.com/filecoin-project/go-paramfetch"
"github.com/filecoin-project/go-state-types/abi"
prooftypes "github.com/filecoin-project/go-state-types/proof"
@@ -412,6 +414,16 @@ func TestSealPoStNoCommit(t *testing.T) {
fmt.Printf("EPoSt: %s\n", epost.Sub(precommit).String())
}
+func TestMain(m *testing.M) {
+ //setup()
+ // Here it no-longer is bound to 30s but has 1m30s for the whole suite.
+ getGrothParamFileAndVerifyingKeys(sectorSize)
+
+ code := m.Run()
+ //shutdown()
+ os.Exit(code)
+}
+
func TestSealAndVerify3(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
@@ -424,8 +436,6 @@ func TestSealAndVerify3(t *testing.T) {
}
_ = os.Setenv("RUST_LOG", "trace")
- getGrothParamFileAndVerifyingKeys(sectorSize)
-
dir, err := os.MkdirTemp("", "sbtest")
if err != nil {
t.Fatal(err)
@@ -595,12 +605,18 @@ func BenchmarkWriteWithAlignment(b *testing.B) {
}
func openFDs(t *testing.T) int {
- dent, err := os.ReadDir("/proc/self/fd")
- require.NoError(t, err)
+ path := "/proc/self/fd"
+ if runtime.GOOS == "darwin" {
+ path = "/dev/fd"
+ }
+ dent, err := os.ReadDir(path)
+ if err != nil && !strings.Contains(err.Error(), "/dev/fd/3: bad file descriptor") {
+ require.NoError(t, err)
+ }
var skip int
for _, info := range dent {
- l, err := os.Readlink(filepath.Join("/proc/self/fd", info.Name()))
+ l, err := os.Readlink(filepath.Join(path, info.Name()))
if err != nil {
continue
}
@@ -621,11 +637,15 @@ func requireFDsClosed(t *testing.T, start int) {
openNow := openFDs(t)
if start != openNow {
- dent, err := os.ReadDir("/proc/self/fd")
+ path := "/proc/self/fd"
+ if runtime.GOOS == "darwin" {
+ path = "/dev/fd"
+ }
+ dent, err := os.ReadDir(path)
require.NoError(t, err)
for _, info := range dent {
- l, err := os.Readlink(filepath.Join("/proc/self/fd", info.Name()))
+ l, err := os.Readlink(filepath.Join(path, info.Name()))
if err != nil {
fmt.Printf("FD err %s\n", err)
continue
@@ -1189,3 +1209,66 @@ func (c *closeAssertReader) Close() error {
}
var _ io.Closer = &closeAssertReader{}
+
+func TestGenerateSDR(t *testing.T) {
+ d := t.TempDir()
+
+ miner := abi.ActorID(123)
+
+ sp := &basicfs.Provider{
+ Root: d,
+ }
+ sb, err := New(sp)
+ require.NoError(t, err)
+
+ si := storiface.SectorRef{
+ ID: abi.SectorID{Miner: miner, Number: 1},
+ ProofType: sealProofType,
+ }
+
+ s := seal{ref: si}
+
+ sz := abi.PaddedPieceSize(sectorSize).Unpadded()
+
+ s.pi, err = sb.AddPiece(context.TODO(), si, []abi.UnpaddedPieceSize{}, sz, nullreader.NewNullReader(sz))
+ require.NoError(t, err)
+
+ s.ticket = sealRand
+
+ _, err = sb.SealPreCommit1(context.TODO(), si, s.ticket, []abi.PieceInfo{s.pi})
+ require.NoError(t, err)
+
+ // sdr for comparison
+
+ sdrCache := filepath.Join(d, "sdrcache")
+
+ commd, err := commcid.CIDToDataCommitmentV1(s.pi.PieceCID)
+ require.NoError(t, err)
+
+ replicaID, err := sealProofType.ReplicaId(si.ID.Miner, si.ID.Number, s.ticket, commd)
+ require.NoError(t, err)
+
+ err = ffi.GenerateSDR(sealProofType, sdrCache, replicaID)
+ require.NoError(t, err)
+
+ // list files in d recursively, for debug
+
+ require.NoError(t, filepath.Walk(d, func(path string, info fs.FileInfo, err error) error {
+ fmt.Println(path)
+ return nil
+ }))
+
+ // compare
+ lastLayerFile := "sc-02-data-layer-2.dat"
+
+ sdrFile := filepath.Join(sdrCache, lastLayerFile)
+ pc1File := filepath.Join(d, "cache/s-t0123-1/", lastLayerFile)
+
+ sdrData, err := os.ReadFile(sdrFile)
+ require.NoError(t, err)
+
+ pc1Data, err := os.ReadFile(pc1File)
+ require.NoError(t, err)
+
+ require.Equal(t, sdrData, pc1Data)
+}
diff --git a/storage/sealer/ffiwrapper/types.go b/storage/sealer/ffiwrapper/types.go
index d20d581db..1c039cd87 100644
--- a/storage/sealer/ffiwrapper/types.go
+++ b/storage/sealer/ffiwrapper/types.go
@@ -11,6 +11,8 @@ type SectorProvider interface {
// * returns storiface.ErrSectorNotFound if a requested existing sector doesn't exist
// * returns an error when allocate is set, and existing isn't, and the sector exists
AcquireSector(ctx context.Context, id storiface.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error)
+ // AcquireSector, but a copy to preseve its long-term storage location.
+ AcquireSectorCopy(ctx context.Context, id storiface.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error)
}
var _ SectorProvider = &basicfs.Provider{}
diff --git a/storage/sealer/manager.go b/storage/sealer/manager.go
index 3f496b7de..700a5aec5 100644
--- a/storage/sealer/manager.go
+++ b/storage/sealer/manager.go
@@ -330,7 +330,7 @@ func (m *Manager) SectorsUnsealPiece(ctx context.Context, sector storiface.Secto
// if the selected worker does NOT have the sealed files for the sector, instruct it to fetch it from a worker that has them and
// put it in the sealing scratch space.
- sealFetch := PrepareAction{
+ unsealFetch := PrepareAction{
Action: func(ctx context.Context, worker Worker) error {
log.Debugf("copy sealed/cache sector data for sector %d", sector.ID)
_, err := m.waitSimpleCall(ctx)(worker.Fetch(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.PathSealing, storiface.AcquireCopy))
@@ -359,7 +359,7 @@ func (m *Manager) SectorsUnsealPiece(ctx context.Context, sector storiface.Secto
selector := newExistingSelector(m.index, sector.ID, storiface.FTSealed|storiface.FTCache, true)
log.Debugf("will schedule unseal for sector %d", sector.ID)
- err = m.sched.Schedule(ctx, sector, sealtasks.TTUnseal, selector, sealFetch, func(ctx context.Context, w Worker) error {
+ err = m.sched.Schedule(ctx, sector, sealtasks.TTUnseal, selector, unsealFetch, func(ctx context.Context, w Worker) error {
// TODO: make restartable
// NOTE: we're unsealing the whole sector here as with SDR we can't really
diff --git a/storage/sealer/manager_test.go b/storage/sealer/manager_test.go
index 99ead1e8e..7c3e1a1f2 100644
--- a/storage/sealer/manager_test.go
+++ b/storage/sealer/manager_test.go
@@ -261,7 +261,7 @@ func TestSnapDeals(t *testing.T) {
// Precommit and Seal a CC sector
fmt.Printf("PC1\n")
- ticket := abi.SealRandomness{9, 9, 9, 9, 9, 9, 9, 9}
+ ticket := abi.SealRandomness{9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9}
pc1Out, err := m.SealPreCommit1(ctx, sid, ticket, ccPieces)
require.NoError(t, err)
fmt.Printf("PC2\n")
diff --git a/storage/sealer/proofpaths/cachefiles.go b/storage/sealer/proofpaths/cachefiles.go
new file mode 100644
index 000000000..24b29e9f5
--- /dev/null
+++ b/storage/sealer/proofpaths/cachefiles.go
@@ -0,0 +1,30 @@
+package proofpaths
+
+import (
+ "fmt"
+
+ "github.com/filecoin-project/go-state-types/abi"
+)
+
+var dataFilePrefix = "sc-02-data-"
+
+func LayerFileName(layer int) string {
+ return fmt.Sprintf("%slayer-%d.dat", dataFilePrefix, layer)
+}
+
+func SDRLayers(spt abi.RegisteredSealProof) (int, error) {
+ switch spt {
+ case abi.RegisteredSealProof_StackedDrg2KiBV1, abi.RegisteredSealProof_StackedDrg2KiBV1_1, abi.RegisteredSealProof_StackedDrg2KiBV1_1_Feat_SyntheticPoRep:
+ return 2, nil
+ case abi.RegisteredSealProof_StackedDrg8MiBV1, abi.RegisteredSealProof_StackedDrg8MiBV1_1, abi.RegisteredSealProof_StackedDrg8MiBV1_1_Feat_SyntheticPoRep:
+ return 2, nil
+ case abi.RegisteredSealProof_StackedDrg512MiBV1, abi.RegisteredSealProof_StackedDrg512MiBV1_1, abi.RegisteredSealProof_StackedDrg512MiBV1_1_Feat_SyntheticPoRep:
+ return 2, nil
+ case abi.RegisteredSealProof_StackedDrg32GiBV1, abi.RegisteredSealProof_StackedDrg32GiBV1_1, abi.RegisteredSealProof_StackedDrg32GiBV1_1_Feat_SyntheticPoRep:
+ return 11, nil
+ case abi.RegisteredSealProof_StackedDrg64GiBV1, abi.RegisteredSealProof_StackedDrg64GiBV1_1, abi.RegisteredSealProof_StackedDrg64GiBV1_1_Feat_SyntheticPoRep:
+ return 11, nil
+ default:
+ return 0, fmt.Errorf("unsupported proof type: %v", spt)
+ }
+}
diff --git a/storage/sealer/proofpaths/cachefiles_test.go b/storage/sealer/proofpaths/cachefiles_test.go
new file mode 100644
index 000000000..b2c0639c8
--- /dev/null
+++ b/storage/sealer/proofpaths/cachefiles_test.go
@@ -0,0 +1,16 @@
+package proofpaths
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/filecoin-project/go-state-types/abi"
+)
+
+func TestSDRLayersDefined(t *testing.T) {
+ for proof := range abi.SealProofInfos {
+ _, err := SDRLayers(proof)
+ require.NoError(t, err)
+ }
+}
diff --git a/storage/sealer/roprov.go b/storage/sealer/roprov.go
index c225fda78..bc38efd7a 100644
--- a/storage/sealer/roprov.go
+++ b/storage/sealer/roprov.go
@@ -36,3 +36,7 @@ func (l *readonlyProvider) AcquireSector(ctx context.Context, id storiface.Secto
return p, cancel, err
}
+
+func (l *readonlyProvider) AcquireSectorCopy(ctx context.Context, id storiface.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) {
+ return storiface.SectorPaths{}, nil, xerrors.New("read-only storage")
+}
diff --git a/storage/sealer/storiface/worker.go b/storage/sealer/storiface/worker.go
index 2badad292..e84fd8aa9 100644
--- a/storage/sealer/storiface/worker.go
+++ b/storage/sealer/storiface/worker.go
@@ -186,12 +186,20 @@ const (
ErrTempAllocateSpace
)
+type WorkError interface {
+ ErrCode() ErrorCode
+}
+
type CallError struct {
Code ErrorCode
Message string
sub error
}
+func (c *CallError) ErrCode() ErrorCode {
+ return c.Code
+}
+
func (c *CallError) Error() string {
return fmt.Sprintf("storage call error %d: %s", c.Code, c.Message)
}
@@ -204,6 +212,8 @@ func (c *CallError) Unwrap() error {
return errors.New(c.Message)
}
+var _ WorkError = &CallError{}
+
func Err(code ErrorCode, sub error) *CallError {
return &CallError{
Code: code,
diff --git a/storage/sealer/worker_local.go b/storage/sealer/worker_local.go
index 24b9ff247..cc4a81599 100644
--- a/storage/sealer/worker_local.go
+++ b/storage/sealer/worker_local.go
@@ -180,6 +180,10 @@ func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector stor
}, nil
}
+func (l *localWorkerPathProvider) AcquireSectorCopy(ctx context.Context, id storiface.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) {
+ return (&localWorkerPathProvider{w: l.w, op: storiface.AcquireCopy}).AcquireSector(ctx, id, existing, allocate, ptype)
+}
+
func (l *LocalWorker) ffiExec() (storiface.Storage, error) {
return ffiwrapper.New(&localWorkerPathProvider{w: l})
}
@@ -571,15 +575,16 @@ func (l *LocalWorker) UnsealPiece(ctx context.Context, sector storiface.SectorRe
return nil, xerrors.Errorf("unsealing sector: %w", err)
}
- if err = l.storage.RemoveCopies(ctx, sector.ID, storiface.FTSealed); err != nil {
- return nil, xerrors.Errorf("removing source data: %w", err)
+ // note: the unsealed file is moved to long-term storage in Manager.SectorsUnsealPiece
+
+ storageTypes := []storiface.SectorFileType{storiface.FTSealed, storiface.FTCache, storiface.FTUpdate, storiface.FTUpdateCache}
+ for _, fileType := range storageTypes {
+ if err = l.storage.RemoveCopies(ctx, sector.ID, fileType); err != nil {
+ return nil, xerrors.Errorf("removing source data: %w", err)
+ }
}
- if err = l.storage.RemoveCopies(ctx, sector.ID, storiface.FTCache); err != nil {
- return nil, xerrors.Errorf("removing source data: %w", err)
- }
-
- log.Debugf("worker has unsealed piece, sector=%+v", sector.ID)
+ log.Debugf("unsealed piece, sector=%+v", sector.ID)
return nil, nil
})