Merge branch 'master' into feat/docs-reorg
This commit is contained in:
commit
6849d18b03
@ -225,12 +225,17 @@ jobs:
|
|||||||
cd extern/test-vectors
|
cd extern/test-vectors
|
||||||
git fetch
|
git fetch
|
||||||
git checkout origin/<< parameters.vectors-branch >>
|
git checkout origin/<< parameters.vectors-branch >>
|
||||||
- run:
|
|
||||||
name: go get vectors branch
|
|
||||||
command: go get github.com/filecoin-project/test-vectors@<< parameters.vectors-branch >>
|
|
||||||
- go/install-gotestsum:
|
- go/install-gotestsum:
|
||||||
gobin: $HOME/.local/bin
|
gobin: $HOME/.local/bin
|
||||||
version: 0.5.2
|
version: 0.5.2
|
||||||
|
- run:
|
||||||
|
name: install statediff globally
|
||||||
|
command: |
|
||||||
|
## statediff is optional; we succeed even if compilation fails.
|
||||||
|
mkdir -p /tmp/statediff
|
||||||
|
git clone https://github.com/filecoin-project/statediff.git /tmp/statediff
|
||||||
|
cd /tmp/statediff
|
||||||
|
go install ./cmd/statediff || exit 0
|
||||||
- run:
|
- run:
|
||||||
name: go test
|
name: go test
|
||||||
environment:
|
environment:
|
||||||
@ -249,6 +254,25 @@ jobs:
|
|||||||
path: /tmp/test-reports
|
path: /tmp/test-reports
|
||||||
- store_artifacts:
|
- store_artifacts:
|
||||||
path: /tmp/test-artifacts/conformance-coverage.html
|
path: /tmp/test-artifacts/conformance-coverage.html
|
||||||
|
build-lotus-soup:
|
||||||
|
description: |
|
||||||
|
Compile `lotus-soup` Testground test plan using the current version of Lotus.
|
||||||
|
parameters:
|
||||||
|
<<: *test-params
|
||||||
|
executor: << parameters.executor >>
|
||||||
|
steps:
|
||||||
|
- install-deps
|
||||||
|
- prepare
|
||||||
|
- run: cd extern/oni && git submodule sync
|
||||||
|
- run: cd extern/oni && git submodule update --init
|
||||||
|
- run: cd extern/filecoin-ffi && make
|
||||||
|
- run:
|
||||||
|
name: "replace lotus, filecoin-ffi, blst and fil-blst deps"
|
||||||
|
command: cd extern/oni/lotus-soup && go mod edit -replace github.com/filecoin-project/lotus=../../../ && go mod edit -replace github.com/filecoin-project/filecoin-ffi=../../filecoin-ffi && go mod edit -replace github.com/supranational/blst=../../fil-blst/blst && go mod edit -replace github.com/filecoin-project/fil-blst=../../fil-blst
|
||||||
|
- run:
|
||||||
|
name: "build lotus-soup testplan"
|
||||||
|
command: pushd extern/oni/lotus-soup && go build -tags=testground .
|
||||||
|
|
||||||
|
|
||||||
build-macos:
|
build-macos:
|
||||||
description: build darwin lotus binary
|
description: build darwin lotus binary
|
||||||
@ -395,7 +419,8 @@ workflows:
|
|||||||
version: 2.1
|
version: 2.1
|
||||||
ci:
|
ci:
|
||||||
jobs:
|
jobs:
|
||||||
- lint-all
|
- lint-all:
|
||||||
|
concurrency: "16" # expend all docker 2xlarge CPUs.
|
||||||
- mod-tidy-check
|
- mod-tidy-check
|
||||||
- gofmt
|
- gofmt
|
||||||
- cbor-gen-check
|
- cbor-gen-check
|
||||||
@ -422,6 +447,7 @@ workflows:
|
|||||||
test-suite-name: conformance-bleeding-edge
|
test-suite-name: conformance-bleeding-edge
|
||||||
packages: "./conformance"
|
packages: "./conformance"
|
||||||
vectors-branch: master
|
vectors-branch: master
|
||||||
|
- build-lotus-soup
|
||||||
- build-debug
|
- build-debug
|
||||||
- build-all:
|
- build-all:
|
||||||
requires:
|
requires:
|
||||||
|
15
.github/CODEOWNERS
vendored
Normal file
15
.github/CODEOWNERS
vendored
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
## filecoin-project/lotus CODEOWNERS
|
||||||
|
## Refer to https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners.
|
||||||
|
##
|
||||||
|
## These users or groups will be automatically assigned as reviewers every time
|
||||||
|
## a PR is submitted that modifies code in the specified locations.
|
||||||
|
##
|
||||||
|
## The Lotus repo configuration requires that at least ONE codeowner approves
|
||||||
|
## the PR before merging.
|
||||||
|
|
||||||
|
### Global owners.
|
||||||
|
* @magik6k @whyrusleeping @Kubuxu
|
||||||
|
|
||||||
|
### Conformance testing.
|
||||||
|
conformance/ @raulk
|
||||||
|
extern/test-vectors @raulk
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -10,6 +10,8 @@
|
|||||||
/lotus-fountain
|
/lotus-fountain
|
||||||
/lotus-stats
|
/lotus-stats
|
||||||
/lotus-bench
|
/lotus-bench
|
||||||
|
/lotus-gateway
|
||||||
|
/lotus-pcr
|
||||||
/bench.json
|
/bench.json
|
||||||
/lotuspond/front/node_modules
|
/lotuspond/front/node_modules
|
||||||
/lotuspond/front/build
|
/lotuspond/front/build
|
||||||
|
6
.gitmodules
vendored
6
.gitmodules
vendored
@ -8,3 +8,9 @@
|
|||||||
[submodule "extern/test-vectors"]
|
[submodule "extern/test-vectors"]
|
||||||
path = extern/test-vectors
|
path = extern/test-vectors
|
||||||
url = https://github.com/filecoin-project/test-vectors.git
|
url = https://github.com/filecoin-project/test-vectors.git
|
||||||
|
[submodule "extern/fil-blst"]
|
||||||
|
path = extern/fil-blst
|
||||||
|
url = https://github.com/filecoin-project/fil-blst.git
|
||||||
|
[submodule "extern/oni"]
|
||||||
|
path = extern/oni
|
||||||
|
url = https://github.com/filecoin-project/oni
|
||||||
|
189
CHANGELOG.md
189
CHANGELOG.md
@ -1,5 +1,194 @@
|
|||||||
# Lotus changelog
|
# Lotus changelog
|
||||||
|
|
||||||
|
# 0.7.0 / 2020-09-10
|
||||||
|
|
||||||
|
This consensus-breaking release of Lotus is designed to test a network upgrade on the space race testnet. The changes that break consensus are:
|
||||||
|
|
||||||
|
- Upgrading the Drand network used from the test Drand network to the League of Entropy main drand network. This is the same Drand network that will be used in the Filecoin mainnet.
|
||||||
|
- Upgrading to specs-actors v0.9.8, which adds a new method to the Multisig actor.
|
||||||
|
|
||||||
|
## Changes
|
||||||
|
|
||||||
|
#### Core Lotus
|
||||||
|
|
||||||
|
- Fix IsAncestorOf (https://github.com/filecoin-project/lotus/pull/3717)
|
||||||
|
- Update to specs-actors v0.9.8 (https://github.com/filecoin-project/lotus/pull/3725)
|
||||||
|
- Increase chain throughput by 20% (https://github.com/filecoin-project/lotus/pull/3732)
|
||||||
|
- Updare to go-libp2p-pubsub `master` (https://github.com/filecoin-project/lotus/pull/3735)
|
||||||
|
- Drand upgrade (https://github.com/filecoin-project/lotus/pull/3670)
|
||||||
|
- Multisig API additions (https://github.com/filecoin-project/lotus/pull/3590)
|
||||||
|
|
||||||
|
#### Storage Miner
|
||||||
|
|
||||||
|
- Increase the number of times precommit2 is attempted before moving back to precommit1 (https://github.com/filecoin-project/lotus/pull/3720)
|
||||||
|
|
||||||
|
#### Message pool
|
||||||
|
|
||||||
|
- Relax mpool add strictness checks for local pushes (https://github.com/filecoin-project/lotus/pull/3724)
|
||||||
|
|
||||||
|
|
||||||
|
#### Maintenance
|
||||||
|
|
||||||
|
- Fix devnets (https://github.com/filecoin-project/lotus/pull/3712)
|
||||||
|
- Fix(chainwatch): compare prev miner with cur miner (https://github.com/filecoin-project/lotus/pull/3715)
|
||||||
|
- CI: fix statediff build; make optional (https://github.com/filecoin-project/lotus/pull/3729)
|
||||||
|
- Feat: Chaos abort (https://github.com/filecoin-project/lotus/pull/3733)
|
||||||
|
|
||||||
|
## Contributors
|
||||||
|
|
||||||
|
The following contributors had commits go into this release.
|
||||||
|
We are grateful for every contribution!
|
||||||
|
|
||||||
|
| Contributor | Commits | Lines ± |
|
||||||
|
|--------------------|---------|---------------|
|
||||||
|
| arajasek | 28 | +1144/-239 |
|
||||||
|
| Kubuxu | 19 | +452/-261 |
|
||||||
|
| whyrusleeping | 13 | +456/-87 |
|
||||||
|
| vyzo | 11 | +318/-20 |
|
||||||
|
| raulk | 10 | +1289/-350 |
|
||||||
|
| magik6k | 6 | +188/-55 |
|
||||||
|
| dirkmc | 3 | +31/-8 |
|
||||||
|
| alanshaw | 3 | +176/-37 |
|
||||||
|
| Stebalien | 2 | +9/-12 |
|
||||||
|
| lanzafame | 1 | +1/-1 |
|
||||||
|
| frrist | 1 | +1/-1 |
|
||||||
|
| mishmosh | 1 | +1/-1 |
|
||||||
|
| nonsense | 1 | +1/-0 |
|
||||||
|
|
||||||
|
# 0.6.2 / 2020-09-09
|
||||||
|
|
||||||
|
This release introduces some critical fixes to message selection and gas estimation logic. It also adds the ability for nodes to mark a certain tipset as checkpointed, as well as various minor improvements and bugfixes.
|
||||||
|
|
||||||
|
## Changes
|
||||||
|
|
||||||
|
#### Messagepool
|
||||||
|
|
||||||
|
- Warn when optimal selection fails to pack a block and we fall back to random selection (https://github.com/filecoin-project/lotus/pull/3708)
|
||||||
|
- Add basic command for printing gas performance of messages in the mpool (https://github.com/filecoin-project/lotus/pull/3701)
|
||||||
|
- Adjust optimal selection to always try to fill blocks (https://github.com/filecoin-project/lotus/pull/3685)
|
||||||
|
- Fix very minor bug in repub baseFeeLowerBound (https://github.com/filecoin-project/lotus/pull/3663)
|
||||||
|
- Add an auto flag to mpool replace (https://github.com/filecoin-project/lotus/pull/3676)
|
||||||
|
- Fix mpool optimal selection packing failure (https://github.com/filecoin-project/lotus/pull/3698)
|
||||||
|
|
||||||
|
#### Core Lotus
|
||||||
|
|
||||||
|
- Don't use latency as initital estimate for blocksync (https://github.com/filecoin-project/lotus/pull/3648)
|
||||||
|
- Add niceSleep 1 second when drand errors (https://github.com/filecoin-project/lotus/pull/3664)
|
||||||
|
- Fix isChainNearSync check in block validator (https://github.com/filecoin-project/lotus/pull/3650)
|
||||||
|
- Add peer to peer manager before fetching the tipset (https://github.com/filecoin-project/lotus/pull/3667)
|
||||||
|
- Add StageFetchingMessages to sync status (https://github.com/filecoin-project/lotus/pull/3668)
|
||||||
|
- Pass tipset through upgrade logic (https://github.com/filecoin-project/lotus/pull/3673)
|
||||||
|
- Allow nodes to mark tipsets as checkpointed (https://github.com/filecoin-project/lotus/pull/3680)
|
||||||
|
- Remove hard-coded late-fee in window PoSt (https://github.com/filecoin-project/lotus/pull/3702)
|
||||||
|
- Gas: Fix median calc (https://github.com/filecoin-project/lotus/pull/3686)
|
||||||
|
|
||||||
|
#### Storage
|
||||||
|
|
||||||
|
- Storage manager: bail out with an error if unsealed cid is undefined (https://github.com/filecoin-project/lotus/pull/3655)
|
||||||
|
- Storage: return true from Sealer.ReadPiece() on success (https://github.com/filecoin-project/lotus/pull/3657)
|
||||||
|
|
||||||
|
#### Maintenance
|
||||||
|
|
||||||
|
- Resolve lotus, test-vectors, statediff dependency cycle (https://github.com/filecoin-project/lotus/pull/3688)
|
||||||
|
- Paych: add docs on how to use paych status (https://github.com/filecoin-project/lotus/pull/3690)
|
||||||
|
- Initial CODEOWNERS (https://github.com/filecoin-project/lotus/pull/3691)
|
||||||
|
|
||||||
|
# 0.6.1 / 2020-09-08
|
||||||
|
|
||||||
|
This optional release introduces a minor improvement to the sync process, ensuring nodes don't fall behind and then resync.
|
||||||
|
|
||||||
|
## Changes
|
||||||
|
|
||||||
|
- Update `test-vectors` (https://github.com/filecoin-project/lotus/pull/3645)
|
||||||
|
- Revert "only subscribe to pubsub topics once we are synced" (https://github.com/filecoin-project/lotus/pull/3643)
|
||||||
|
|
||||||
|
# 0.6.0 / 2020-09-07
|
||||||
|
|
||||||
|
This consensus-breaking release of Lotus is designed to test a network upgrade on the space race testnet. The changes that break consensus are:
|
||||||
|
|
||||||
|
- Tweaking of some cryptoecon parameters in specs-actors 0.9.7 (https://github.com/filecoin-project/specs-actors/releases/tag/v0.9.7)
|
||||||
|
- Rebalancing FIL distribution to make testnet FIL scarce, which prevents base fee spikes and sets better expectations for mainnet
|
||||||
|
|
||||||
|
This release also introduces many improvements to Lotus! Among them are a new version of go-fil-markets that supports non-blocking retrieval, various spam reduction measures in the messagepool and p2p logic, and UX improvements to payment channels, dealmaking, and state inspection.
|
||||||
|
|
||||||
|
## Changes
|
||||||
|
|
||||||
|
#### Core Lotus and dependencies
|
||||||
|
|
||||||
|
- Implement faucet funds reallocation logic (https://github.com/filecoin-project/lotus/pull/3632)
|
||||||
|
- Network upgrade: Upgrade to correct fork threshold (https://github.com/filecoin-project/lotus/pull/3628)
|
||||||
|
- Update to specs 0.9.7 and markets 0.6.0 (https://github.com/filecoin-project/lotus/pull/3627)
|
||||||
|
- Network upgrade: Perform base fee tamping (https://github.com/filecoin-project/lotus/pull/3623)
|
||||||
|
- Chain events: if cache best() is nil, return chain head (https://github.com/filecoin-project/lotus/pull/3611)
|
||||||
|
- Update to specs actors v0.9.6 (https://github.com/filecoin-project/lotus/pull/3603)
|
||||||
|
|
||||||
|
#### Messagepool
|
||||||
|
|
||||||
|
- Temporarily allow negative chains (https://github.com/filecoin-project/lotus/pull/3625)
|
||||||
|
- Improve publish/republish logic (https://github.com/filecoin-project/lotus/pull/3592)
|
||||||
|
- Fix selection bug; priority messages were not included if other chains were negative (https://github.com/filecoin-project/lotus/pull/3580)
|
||||||
|
- Add defensive check for minimum GasFeeCap for inclusion within the next 20 blocks (https://github.com/filecoin-project/lotus/pull/3579)
|
||||||
|
- Add additional info about gas premium (https://github.com/filecoin-project/lotus/pull/3578)
|
||||||
|
- Fix GasPremium capping logic (https://github.com/filecoin-project/lotus/pull/3552)
|
||||||
|
|
||||||
|
#### Payment channels
|
||||||
|
|
||||||
|
- Get available funds by address or by from/to (https://github.com/filecoin-project/lotus/pull/3547)
|
||||||
|
- Create `lotus paych status` command (https://github.com/filecoin-project/lotus/pull/3523)
|
||||||
|
- Rename CLI command from "paych get" to "paych add-funds" (https://github.com/filecoin-project/lotus/pull/3520)
|
||||||
|
|
||||||
|
#### Peer-to-peer
|
||||||
|
|
||||||
|
- Only subscribe to pubsub topics once we are synced (https://github.com/filecoin-project/lotus/pull/3602)
|
||||||
|
- Reduce mpool add failure log spam (https://github.com/filecoin-project/lotus/pull/3562)
|
||||||
|
- Republish messages even if the chains have negative performance(https://github.com/filecoin-project/lotus/pull/3557)
|
||||||
|
- Adjust gossipsub gossip factor (https://github.com/filecoin-project/lotus/pull/3556)
|
||||||
|
- Integrate pubsub Random Early Drop (https://github.com/filecoin-project/lotus/pull/3518)
|
||||||
|
|
||||||
|
#### Miscellaneous
|
||||||
|
|
||||||
|
- Fix panic in OnDealExpiredSlashed (https://github.com/filecoin-project/lotus/pull/3553)
|
||||||
|
- Robustify state manager against holes in actor method numbers (https://github.com/filecoin-project/lotus/pull/3538)
|
||||||
|
|
||||||
|
#### UX
|
||||||
|
|
||||||
|
- VM: Fix an error message (https://github.com/filecoin-project/lotus/pull/3608)
|
||||||
|
- Documentation: Batch replacement,update lotus-storage-miner to lotus-miner (https://github.com/filecoin-project/lotus/pull/3571)
|
||||||
|
- CLI: Robust actor lookup (https://github.com/filecoin-project/lotus/pull/3535)
|
||||||
|
- Add agent flag to net peers (https://github.com/filecoin-project/lotus/pull/3534)
|
||||||
|
- Add watch option to storage-deals list (https://github.com/filecoin-project/lotus/pull/3527)
|
||||||
|
|
||||||
|
#### Testing & tooling
|
||||||
|
|
||||||
|
- Decommission chain-validation (https://github.com/filecoin-project/lotus/pull/3606)
|
||||||
|
- Metrics: add expected height metric (https://github.com/filecoin-project/lotus/pull/3586)
|
||||||
|
- PCR: Use current tipset during refund (https://github.com/filecoin-project/lotus/pull/3570)
|
||||||
|
- Lotus-shed: Add math command (https://github.com/filecoin-project/lotus/pull/3568)
|
||||||
|
- PCR: Add tipset aggergation (https://github.com/filecoin-project/lotus/pull/3565)- Fix broken paych tests (https://github.com/filecoin-project/lotus/pull/3551)
|
||||||
|
- Make chain export ~1000x times faster (https://github.com/filecoin-project/lotus/pull/3533)
|
||||||
|
- Chainwatch: Stop SyncIncomingBlocks from leaking into chainwatch processing; No panics during processing (https://github.com/filecoin-project/lotus/pull/3526)
|
||||||
|
- Conformance: various changes (https://github.com/filecoin-project/lotus/pull/3521)
|
||||||
|
|
||||||
|
# 0.5.10 / 2020-09-03
|
||||||
|
|
||||||
|
This patch includes a crucial fix to the message pool selection logic, strongly disfavouring messages that might cause a miner penalty.
|
||||||
|
|
||||||
|
## Changes
|
||||||
|
|
||||||
|
- Fix calculation of GasReward in messagepool (https://github.com/filecoin-project/lotus/pull/3528)
|
||||||
|
|
||||||
|
# 0.5.9 / 2020-09-03
|
||||||
|
|
||||||
|
This patch includes a hotfix to the `GasEstimateFeeCap` method, capping the estimated fee to a reasonable level by default.
|
||||||
|
|
||||||
|
## Changes
|
||||||
|
|
||||||
|
- Added target height to sync wait (https://github.com/filecoin-project/lotus/pull/3502)
|
||||||
|
- Disable codecov annotations (https://github.com/filecoin-project/lotus/pull/3514)
|
||||||
|
- Cap fees to reasonable level by default (https://github.com/filecoin-project/lotus/pull/3516)
|
||||||
|
- Add APIs and command to inspect bandwidth usage (https://github.com/filecoin-project/lotus/pull/3497)
|
||||||
|
- Track expected nonce in mpool, ignore messages with large nonce gaps (https://github.com/filecoin-project/lotus/pull/3450)
|
||||||
|
|
||||||
# 0.5.8 / 2020-09-02
|
# 0.5.8 / 2020-09-02
|
||||||
|
|
||||||
This patch includes some bugfixes to the sector sealing process, and updates go-fil-markets. It also improves the performance of blocksync, adds a method to export chain state trees, and improves chainwatch.
|
This patch includes some bugfixes to the sector sealing process, and updates go-fil-markets. It also improves the performance of blocksync, adds a method to export chain state trees, and improves chainwatch.
|
||||||
|
6
Makefile
6
Makefile
@ -92,6 +92,12 @@ lotus-shed: $(BUILD_DEPS)
|
|||||||
.PHONY: lotus-shed
|
.PHONY: lotus-shed
|
||||||
BINS+=lotus-shed
|
BINS+=lotus-shed
|
||||||
|
|
||||||
|
lotus-gateway: $(BUILD_DEPS)
|
||||||
|
rm -f lotus-gateway
|
||||||
|
go build $(GOFLAGS) -o lotus-gateway ./cmd/lotus-gateway
|
||||||
|
.PHONY: lotus-gateway
|
||||||
|
BINS+=lotus-gateway
|
||||||
|
|
||||||
build: lotus lotus-miner lotus-worker
|
build: lotus lotus-miner lotus-worker
|
||||||
@[[ $$(type -P "lotus") ]] && echo "Caution: you have \
|
@[[ $$(type -P "lotus") ]] && echo "Caution: you have \
|
||||||
an existing lotus binary in your PATH. This may cause problems if you don't run 'sudo make install'" || true
|
an existing lotus binary in your PATH. This may cause problems if you don't run 'sudo make install'" || true
|
||||||
|
@ -30,6 +30,7 @@ type Common interface {
|
|||||||
NetFindPeer(context.Context, peer.ID) (peer.AddrInfo, error)
|
NetFindPeer(context.Context, peer.ID) (peer.AddrInfo, error)
|
||||||
NetPubsubScores(context.Context) ([]PubsubScore, error)
|
NetPubsubScores(context.Context) ([]PubsubScore, error)
|
||||||
NetAutoNatStatus(context.Context) (NatInfo, error)
|
NetAutoNatStatus(context.Context) (NatInfo, error)
|
||||||
|
NetAgentVersion(ctx context.Context, p peer.ID) (string, error)
|
||||||
|
|
||||||
// NetBandwidthStats returns statistics about the nodes total bandwidth
|
// NetBandwidthStats returns statistics about the nodes total bandwidth
|
||||||
// usage and current rate across all peers and protocols.
|
// usage and current rate across all peers and protocols.
|
||||||
|
126
api/api_full.go
126
api/api_full.go
@ -2,23 +2,29 @@ package api
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/dline"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/runtime/proof"
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
"github.com/libp2p/go-libp2p-core/peer"
|
"github.com/libp2p/go-libp2p-core/peer"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-bitfield"
|
||||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||||
"github.com/filecoin-project/go-multistore"
|
"github.com/filecoin-project/go-multistore"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/market"
|
"github.com/filecoin-project/specs-actors/actors/builtin/market"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/paych"
|
"github.com/filecoin-project/specs-actors/actors/builtin/paych"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/power"
|
"github.com/filecoin-project/specs-actors/actors/builtin/power"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
|
"github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
|
||||||
"github.com/filecoin-project/specs-actors/actors/crypto"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||||
@ -112,7 +118,8 @@ type FullNode interface {
|
|||||||
// The exported chain data includes the header chain from the given tipset
|
// The exported chain data includes the header chain from the given tipset
|
||||||
// back to genesis, the entire genesis state, and the most recent 'nroots'
|
// back to genesis, the entire genesis state, and the most recent 'nroots'
|
||||||
// state trees.
|
// state trees.
|
||||||
ChainExport(ctx context.Context, nroots abi.ChainEpoch, tsk types.TipSetKey) (<-chan []byte, error)
|
// If oldmsgskip is set, messages from before the requested roots are also not included.
|
||||||
|
ChainExport(ctx context.Context, nroots abi.ChainEpoch, oldmsgskip bool, tsk types.TipSetKey) (<-chan []byte, error)
|
||||||
|
|
||||||
// MethodGroup: Beacon
|
// MethodGroup: Beacon
|
||||||
// The Beacon method group contains methods for interacting with the random beacon (DRAND)
|
// The Beacon method group contains methods for interacting with the random beacon (DRAND)
|
||||||
@ -153,10 +160,16 @@ type FullNode interface {
|
|||||||
// yet synced block headers.
|
// yet synced block headers.
|
||||||
SyncIncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error)
|
SyncIncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error)
|
||||||
|
|
||||||
|
// SyncCheckpoint marks a blocks as checkpointed, meaning that it won't ever fork away from it.
|
||||||
|
SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) error
|
||||||
|
|
||||||
// SyncMarkBad marks a blocks as bad, meaning that it won't ever by synced.
|
// SyncMarkBad marks a blocks as bad, meaning that it won't ever by synced.
|
||||||
// Use with extreme caution.
|
// Use with extreme caution.
|
||||||
SyncMarkBad(ctx context.Context, bcid cid.Cid) error
|
SyncMarkBad(ctx context.Context, bcid cid.Cid) error
|
||||||
|
|
||||||
|
// SyncUnmarkBad unmarks a blocks as bad, making it possible to be validated and synced again.
|
||||||
|
SyncUnmarkBad(ctx context.Context, bcid cid.Cid) error
|
||||||
|
|
||||||
// SyncCheckBad checks if a block was marked as bad, and if it was, returns
|
// SyncCheckBad checks if a block was marked as bad, and if it was, returns
|
||||||
// the reason.
|
// the reason.
|
||||||
SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error)
|
SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error)
|
||||||
@ -270,6 +283,9 @@ type FullNode interface {
|
|||||||
// ClientListTransfers returns the status of all ongoing transfers of data
|
// ClientListTransfers returns the status of all ongoing transfers of data
|
||||||
ClientListDataTransfers(ctx context.Context) ([]DataTransferChannel, error)
|
ClientListDataTransfers(ctx context.Context) ([]DataTransferChannel, error)
|
||||||
ClientDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error)
|
ClientDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error)
|
||||||
|
// ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel
|
||||||
|
// which are stuck due to insufficient funds
|
||||||
|
ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error
|
||||||
|
|
||||||
// ClientUnimport removes references to the specified file from filestore
|
// ClientUnimport removes references to the specified file from filestore
|
||||||
//ClientUnimport(path string)
|
//ClientUnimport(path string)
|
||||||
@ -300,12 +316,12 @@ type FullNode interface {
|
|||||||
// StateMinerSectors returns info about the given miner's sectors. If the filter bitfield is nil, all sectors are included.
|
// StateMinerSectors returns info about the given miner's sectors. If the filter bitfield is nil, all sectors are included.
|
||||||
// If the filterOut boolean is set to true, any sectors in the filter are excluded.
|
// If the filterOut boolean is set to true, any sectors in the filter are excluded.
|
||||||
// If false, only those sectors in the filter are included.
|
// If false, only those sectors in the filter are included.
|
||||||
StateMinerSectors(context.Context, address.Address, *abi.BitField, bool, types.TipSetKey) ([]*ChainSectorInfo, error)
|
StateMinerSectors(context.Context, address.Address, *bitfield.BitField, bool, types.TipSetKey) ([]*ChainSectorInfo, error)
|
||||||
// StateMinerActiveSectors returns info about sectors that a given miner is actively proving.
|
// StateMinerActiveSectors returns info about sectors that a given miner is actively proving.
|
||||||
StateMinerActiveSectors(context.Context, address.Address, types.TipSetKey) ([]*ChainSectorInfo, error)
|
StateMinerActiveSectors(context.Context, address.Address, types.TipSetKey) ([]*ChainSectorInfo, error)
|
||||||
// StateMinerProvingDeadline calculates the deadline at some epoch for a proving period
|
// StateMinerProvingDeadline calculates the deadline at some epoch for a proving period
|
||||||
// and returns the deadline-related calculations.
|
// and returns the deadline-related calculations.
|
||||||
StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*miner.DeadlineInfo, error)
|
StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error)
|
||||||
// StateMinerPower returns the power of the indicated miner
|
// StateMinerPower returns the power of the indicated miner
|
||||||
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error)
|
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error)
|
||||||
// StateMinerInfo returns info about the indicated miner
|
// StateMinerInfo returns info about the indicated miner
|
||||||
@ -315,11 +331,11 @@ type FullNode interface {
|
|||||||
// StateMinerPartitions loads miner partitions for the specified miner/deadline
|
// StateMinerPartitions loads miner partitions for the specified miner/deadline
|
||||||
StateMinerPartitions(context.Context, address.Address, uint64, types.TipSetKey) ([]*miner.Partition, error)
|
StateMinerPartitions(context.Context, address.Address, uint64, types.TipSetKey) ([]*miner.Partition, error)
|
||||||
// StateMinerFaults returns a bitfield indicating the faulty sectors of the given miner
|
// StateMinerFaults returns a bitfield indicating the faulty sectors of the given miner
|
||||||
StateMinerFaults(context.Context, address.Address, types.TipSetKey) (abi.BitField, error)
|
StateMinerFaults(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error)
|
||||||
// StateAllMinerFaults returns all non-expired Faults that occur within lookback epochs of the given tipset
|
// StateAllMinerFaults returns all non-expired Faults that occur within lookback epochs of the given tipset
|
||||||
StateAllMinerFaults(ctx context.Context, lookback abi.ChainEpoch, ts types.TipSetKey) ([]*Fault, error)
|
StateAllMinerFaults(ctx context.Context, lookback abi.ChainEpoch, ts types.TipSetKey) ([]*Fault, error)
|
||||||
// StateMinerRecoveries returns a bitfield indicating the recovering sectors of the given miner
|
// StateMinerRecoveries returns a bitfield indicating the recovering sectors of the given miner
|
||||||
StateMinerRecoveries(context.Context, address.Address, types.TipSetKey) (abi.BitField, error)
|
StateMinerRecoveries(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error)
|
||||||
// StateMinerInitialPledgeCollateral returns the precommit deposit for the specified miner's sector
|
// StateMinerInitialPledgeCollateral returns the precommit deposit for the specified miner's sector
|
||||||
StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error)
|
StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error)
|
||||||
// StateMinerInitialPledgeCollateral returns the initial pledge collateral for the specified miner's sector
|
// StateMinerInitialPledgeCollateral returns the initial pledge collateral for the specified miner's sector
|
||||||
@ -338,6 +354,8 @@ type FullNode interface {
|
|||||||
StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*SectorLocation, error)
|
StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*SectorLocation, error)
|
||||||
// StateSearchMsg searches for a message in the chain, and returns its receipt and the tipset where it was executed
|
// StateSearchMsg searches for a message in the chain, and returns its receipt and the tipset where it was executed
|
||||||
StateSearchMsg(context.Context, cid.Cid) (*MsgLookup, error)
|
StateSearchMsg(context.Context, cid.Cid) (*MsgLookup, error)
|
||||||
|
// StateMsgGasCost searches for a message in the chain, and returns details of the messages gas costs, including the penalty and miner tip
|
||||||
|
StateMsgGasCost(context.Context, cid.Cid, types.TipSetKey) (*MsgGasCost, error)
|
||||||
// StateWaitMsg looks back in the chain for a message. If not found, it blocks until the
|
// StateWaitMsg looks back in the chain for a message. If not found, it blocks until the
|
||||||
// message arrives on chain, and gets to the indicated confidence depth.
|
// message arrives on chain, and gets to the indicated confidence depth.
|
||||||
StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64) (*MsgLookup, error)
|
StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64) (*MsgLookup, error)
|
||||||
@ -384,6 +402,9 @@ type FullNode interface {
|
|||||||
|
|
||||||
// MsigGetAvailableBalance returns the portion of a multisig's balance that can be withdrawn or spent
|
// MsigGetAvailableBalance returns the portion of a multisig's balance that can be withdrawn or spent
|
||||||
MsigGetAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error)
|
MsigGetAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error)
|
||||||
|
// MsigGetVested returns the amount of FIL that vested in a multisig in a certain period.
|
||||||
|
// It takes the following params: <multisig address>, <start epoch>, <end epoch>
|
||||||
|
MsigGetVested(context.Context, address.Address, types.TipSetKey, types.TipSetKey) (types.BigInt, error)
|
||||||
// MsigCreate creates a multisig wallet
|
// MsigCreate creates a multisig wallet
|
||||||
// It takes the following params: <required number of senders>, <approving addresses>, <unlock duration>
|
// It takes the following params: <required number of senders>, <approving addresses>, <unlock duration>
|
||||||
//<initial balance>, <sender address of the create msg>, <gas price>
|
//<initial balance>, <sender address of the create msg>, <gas price>
|
||||||
@ -400,17 +421,29 @@ type FullNode interface {
|
|||||||
// It takes the following params: <multisig address>, <proposed message ID>, <recipient address>, <value to transfer>,
|
// It takes the following params: <multisig address>, <proposed message ID>, <recipient address>, <value to transfer>,
|
||||||
// <sender address of the cancel msg>, <method to call in the proposed message>, <params to include in the proposed message>
|
// <sender address of the cancel msg>, <method to call in the proposed message>, <params to include in the proposed message>
|
||||||
MsigCancel(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error)
|
MsigCancel(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error)
|
||||||
|
// MsigAddPropose proposes adding a signer in the multisig
|
||||||
|
// It takes the following params: <multisig address>, <sender address of the propose msg>,
|
||||||
|
// <new signer>, <whether the number of required signers should be increased>
|
||||||
|
MsigAddPropose(context.Context, address.Address, address.Address, address.Address, bool) (cid.Cid, error)
|
||||||
|
// MsigAddApprove approves a previously proposed AddSigner message
|
||||||
|
// It takes the following params: <multisig address>, <sender address of the approve msg>, <proposed message ID>,
|
||||||
|
// <proposer address>, <new signer>, <whether the number of required signers should be increased>
|
||||||
|
MsigAddApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, bool) (cid.Cid, error)
|
||||||
|
// MsigAddCancel cancels a previously proposed AddSigner message
|
||||||
|
// It takes the following params: <multisig address>, <sender address of the cancel msg>, <proposed message ID>,
|
||||||
|
// <new signer>, <whether the number of required signers should be increased>
|
||||||
|
MsigAddCancel(context.Context, address.Address, address.Address, uint64, address.Address, bool) (cid.Cid, error)
|
||||||
// MsigSwapPropose proposes swapping 2 signers in the multisig
|
// MsigSwapPropose proposes swapping 2 signers in the multisig
|
||||||
// It takes the following params: <multisig address>, <sender address of the propose msg>,
|
// It takes the following params: <multisig address>, <sender address of the propose msg>,
|
||||||
// <old signer> <new signer>
|
// <old signer>, <new signer>
|
||||||
MsigSwapPropose(context.Context, address.Address, address.Address, address.Address, address.Address) (cid.Cid, error)
|
MsigSwapPropose(context.Context, address.Address, address.Address, address.Address, address.Address) (cid.Cid, error)
|
||||||
// MsigSwapApprove approves a previously proposed SwapSigner
|
// MsigSwapApprove approves a previously proposed SwapSigner
|
||||||
// It takes the following params: <multisig address>, <sender address of the approve msg>, <proposed message ID>,
|
// It takes the following params: <multisig address>, <sender address of the approve msg>, <proposed message ID>,
|
||||||
// <proposer address>, <old signer> <new signer>
|
// <proposer address>, <old signer>, <new signer>
|
||||||
MsigSwapApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, address.Address) (cid.Cid, error)
|
MsigSwapApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, address.Address) (cid.Cid, error)
|
||||||
// MsigSwapCancel cancels a previously proposed SwapSigner message
|
// MsigSwapCancel cancels a previously proposed SwapSigner message
|
||||||
// It takes the following params: <multisig address>, <sender address of the cancel msg>, <proposed message ID>,
|
// It takes the following params: <multisig address>, <sender address of the cancel msg>, <proposed message ID>,
|
||||||
// <old signer> <new signer>
|
// <old signer>, <new signer>
|
||||||
MsigSwapCancel(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (cid.Cid, error)
|
MsigSwapCancel(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (cid.Cid, error)
|
||||||
|
|
||||||
MarketEnsureAvailable(context.Context, address.Address, address.Address, types.BigInt) (cid.Cid, error)
|
MarketEnsureAvailable(context.Context, address.Address, address.Address, types.BigInt) (cid.Cid, error)
|
||||||
@ -421,6 +454,8 @@ type FullNode interface {
|
|||||||
|
|
||||||
PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*ChannelInfo, error)
|
PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*ChannelInfo, error)
|
||||||
PaychGetWaitReady(context.Context, cid.Cid) (address.Address, error)
|
PaychGetWaitReady(context.Context, cid.Cid) (address.Address, error)
|
||||||
|
PaychAvailableFunds(ctx context.Context, ch address.Address) (*ChannelAvailableFunds, error)
|
||||||
|
PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*ChannelAvailableFunds, error)
|
||||||
PaychList(context.Context) ([]address.Address, error)
|
PaychList(context.Context) ([]address.Address, error)
|
||||||
PaychStatus(context.Context, address.Address) (*PaychStatus, error)
|
PaychStatus(context.Context, address.Address) (*PaychStatus, error)
|
||||||
PaychSettle(context.Context, address.Address) (cid.Cid, error)
|
PaychSettle(context.Context, address.Address) (cid.Cid, error)
|
||||||
@ -429,7 +464,7 @@ type FullNode interface {
|
|||||||
PaychNewPayment(ctx context.Context, from, to address.Address, vouchers []VoucherSpec) (*PaymentInfo, error)
|
PaychNewPayment(ctx context.Context, from, to address.Address, vouchers []VoucherSpec) (*PaymentInfo, error)
|
||||||
PaychVoucherCheckValid(context.Context, address.Address, *paych.SignedVoucher) error
|
PaychVoucherCheckValid(context.Context, address.Address, *paych.SignedVoucher) error
|
||||||
PaychVoucherCheckSpendable(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (bool, error)
|
PaychVoucherCheckSpendable(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (bool, error)
|
||||||
PaychVoucherCreate(context.Context, address.Address, types.BigInt, uint64) (*paych.SignedVoucher, error)
|
PaychVoucherCreate(context.Context, address.Address, types.BigInt, uint64) (*VoucherCreateResult, error)
|
||||||
PaychVoucherAdd(context.Context, address.Address, *paych.SignedVoucher, []byte, types.BigInt) (types.BigInt, error)
|
PaychVoucherAdd(context.Context, address.Address, *paych.SignedVoucher, []byte, types.BigInt) (types.BigInt, error)
|
||||||
PaychVoucherList(context.Context, address.Address) ([]*paych.SignedVoucher, error)
|
PaychVoucherList(context.Context, address.Address) ([]*paych.SignedVoucher, error)
|
||||||
PaychVoucherSubmit(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (cid.Cid, error)
|
PaychVoucherSubmit(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (cid.Cid, error)
|
||||||
@ -498,6 +533,17 @@ type MsgLookup struct {
|
|||||||
Height abi.ChainEpoch
|
Height abi.ChainEpoch
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type MsgGasCost struct {
|
||||||
|
Message cid.Cid // Can be different than requested, in case it was replaced, but only gas values changed
|
||||||
|
GasUsed abi.TokenAmount
|
||||||
|
BaseFeeBurn abi.TokenAmount
|
||||||
|
OverEstimationBurn abi.TokenAmount
|
||||||
|
MinerPenalty abi.TokenAmount
|
||||||
|
MinerTip abi.TokenAmount
|
||||||
|
Refund abi.TokenAmount
|
||||||
|
TotalCost abi.TokenAmount
|
||||||
|
}
|
||||||
|
|
||||||
type BlockMessages struct {
|
type BlockMessages struct {
|
||||||
BlsMessages []*types.Message
|
BlsMessages []*types.Message
|
||||||
SecpkMessages []*types.SignedMessage
|
SecpkMessages []*types.SignedMessage
|
||||||
@ -538,6 +584,28 @@ type ChannelInfo struct {
|
|||||||
WaitSentinel cid.Cid
|
WaitSentinel cid.Cid
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ChannelAvailableFunds struct {
|
||||||
|
// Channel is the address of the channel
|
||||||
|
Channel *address.Address
|
||||||
|
// From is the from address of the channel (channel creator)
|
||||||
|
From address.Address
|
||||||
|
// To is the to address of the channel
|
||||||
|
To address.Address
|
||||||
|
// ConfirmedAmt is the amount of funds that have been confirmed on-chain
|
||||||
|
// for the channel
|
||||||
|
ConfirmedAmt types.BigInt
|
||||||
|
// PendingAmt is the amount of funds that are pending confirmation on-chain
|
||||||
|
PendingAmt types.BigInt
|
||||||
|
// PendingWaitSentinel can be used with PaychGetWaitReady to wait for
|
||||||
|
// confirmation of pending funds
|
||||||
|
PendingWaitSentinel *cid.Cid
|
||||||
|
// QueuedAmt is the amount that is queued up behind a pending request
|
||||||
|
QueuedAmt types.BigInt
|
||||||
|
// VoucherRedeemedAmt is the amount that is redeemed by vouchers on-chain
|
||||||
|
// and in the local datastore
|
||||||
|
VoucherReedeemedAmt types.BigInt
|
||||||
|
}
|
||||||
|
|
||||||
type PaymentInfo struct {
|
type PaymentInfo struct {
|
||||||
Channel address.Address
|
Channel address.Address
|
||||||
WaitSentinel cid.Cid
|
WaitSentinel cid.Cid
|
||||||
@ -553,6 +621,16 @@ type VoucherSpec struct {
|
|||||||
Extra *paych.ModVerifyParams
|
Extra *paych.ModVerifyParams
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// VoucherCreateResult is the response to calling PaychVoucherCreate
|
||||||
|
type VoucherCreateResult struct {
|
||||||
|
// Voucher that was created, or nil if there was an error or if there
|
||||||
|
// were insufficient funds in the channel
|
||||||
|
Voucher *paych.SignedVoucher
|
||||||
|
// Shortfall is the additional amount that would be needed in the channel
|
||||||
|
// in order to be able to create the voucher
|
||||||
|
Shortfall types.BigInt
|
||||||
|
}
|
||||||
|
|
||||||
type MinerPower struct {
|
type MinerPower struct {
|
||||||
MinerPower power.Claim
|
MinerPower power.Claim
|
||||||
TotalPower power.Claim
|
TotalPower power.Claim
|
||||||
@ -669,8 +747,28 @@ const (
|
|||||||
StageMessages
|
StageMessages
|
||||||
StageSyncComplete
|
StageSyncComplete
|
||||||
StageSyncErrored
|
StageSyncErrored
|
||||||
|
StageFetchingMessages
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func (v SyncStateStage) String() string {
|
||||||
|
switch v {
|
||||||
|
case StageHeaders:
|
||||||
|
return "header sync"
|
||||||
|
case StagePersistHeaders:
|
||||||
|
return "persisting headers"
|
||||||
|
case StageMessages:
|
||||||
|
return "message sync"
|
||||||
|
case StageSyncComplete:
|
||||||
|
return "complete"
|
||||||
|
case StageSyncErrored:
|
||||||
|
return "error"
|
||||||
|
case StageFetchingMessages:
|
||||||
|
return "fetching messages"
|
||||||
|
default:
|
||||||
|
return fmt.Sprintf("<unknown: %d>", v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type MpoolChange int
|
type MpoolChange int
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -704,7 +802,7 @@ type CirculatingSupply struct {
|
|||||||
type MiningBaseInfo struct {
|
type MiningBaseInfo struct {
|
||||||
MinerPower types.BigInt
|
MinerPower types.BigInt
|
||||||
NetworkPower types.BigInt
|
NetworkPower types.BigInt
|
||||||
Sectors []abi.SectorInfo
|
Sectors []proof.SectorInfo
|
||||||
WorkerKey address.Address
|
WorkerKey address.Address
|
||||||
SectorSize abi.SectorSize
|
SectorSize abi.SectorSize
|
||||||
PrevBeaconEntry types.BeaconEntry
|
PrevBeaconEntry types.BeaconEntry
|
||||||
@ -721,7 +819,7 @@ type BlockTemplate struct {
|
|||||||
Messages []*types.SignedMessage
|
Messages []*types.SignedMessage
|
||||||
Epoch abi.ChainEpoch
|
Epoch abi.ChainEpoch
|
||||||
Timestamp uint64
|
Timestamp uint64
|
||||||
WinningPoStProof []abi.PoStProof
|
WinningPoStProof []proof.PoStProof
|
||||||
}
|
}
|
||||||
|
|
||||||
type DataSize struct {
|
type DataSize struct {
|
||||||
|
@ -11,11 +11,11 @@ import (
|
|||||||
"github.com/filecoin-project/go-fil-markets/piecestore"
|
"github.com/filecoin-project/go-fil-markets/piecestore"
|
||||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// StorageMiner is a low-level interface to the Filecoin network storage miner node
|
// StorageMiner is a low-level interface to the Filecoin network storage miner node
|
||||||
@ -73,7 +73,7 @@ type StorageMiner interface {
|
|||||||
MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error
|
MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error
|
||||||
MarketListDeals(ctx context.Context) ([]storagemarket.StorageDeal, error)
|
MarketListDeals(ctx context.Context) ([]storagemarket.StorageDeal, error)
|
||||||
MarketListRetrievalDeals(ctx context.Context) ([]retrievalmarket.ProviderDealState, error)
|
MarketListRetrievalDeals(ctx context.Context) ([]retrievalmarket.ProviderDealState, error)
|
||||||
MarketGetDealUpdates(ctx context.Context, d cid.Cid) (<-chan storagemarket.MinerDeal, error)
|
MarketGetDealUpdates(ctx context.Context) (<-chan storagemarket.MinerDeal, error)
|
||||||
MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error)
|
MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error)
|
||||||
MarketSetAsk(ctx context.Context, price types.BigInt, verifiedPrice types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error
|
MarketSetAsk(ctx context.Context, price types.BigInt, verifiedPrice types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error
|
||||||
MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error)
|
MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error)
|
||||||
|
@ -6,10 +6,10 @@ import (
|
|||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
|
||||||
"github.com/filecoin-project/specs-storage/storage"
|
"github.com/filecoin-project/specs-storage/storage"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
@ -5,6 +5,8 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/dline"
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
metrics "github.com/libp2p/go-libp2p-core/metrics"
|
metrics "github.com/libp2p/go-libp2p-core/metrics"
|
||||||
"github.com/libp2p/go-libp2p-core/network"
|
"github.com/libp2p/go-libp2p-core/network"
|
||||||
@ -12,22 +14,23 @@ import (
|
|||||||
protocol "github.com/libp2p/go-libp2p-core/protocol"
|
protocol "github.com/libp2p/go-libp2p-core/protocol"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-bitfield"
|
||||||
"github.com/filecoin-project/go-fil-markets/piecestore"
|
"github.com/filecoin-project/go-fil-markets/piecestore"
|
||||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||||
"github.com/filecoin-project/go-multistore"
|
"github.com/filecoin-project/go-multistore"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/paych"
|
"github.com/filecoin-project/specs-actors/actors/builtin/paych"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
|
"github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
|
||||||
"github.com/filecoin-project/specs-actors/actors/crypto"
|
|
||||||
"github.com/filecoin-project/specs-storage/storage"
|
"github.com/filecoin-project/specs-storage/storage"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
@ -55,6 +58,7 @@ type CommonStruct struct {
|
|||||||
NetBandwidthStats func(ctx context.Context) (metrics.Stats, error) `perm:"read"`
|
NetBandwidthStats func(ctx context.Context) (metrics.Stats, error) `perm:"read"`
|
||||||
NetBandwidthStatsByPeer func(ctx context.Context) (map[string]metrics.Stats, error) `perm:"read"`
|
NetBandwidthStatsByPeer func(ctx context.Context) (map[string]metrics.Stats, error) `perm:"read"`
|
||||||
NetBandwidthStatsByProtocol func(ctx context.Context) (map[protocol.ID]metrics.Stats, error) `perm:"read"`
|
NetBandwidthStatsByProtocol func(ctx context.Context) (map[protocol.ID]metrics.Stats, error) `perm:"read"`
|
||||||
|
NetAgentVersion func(ctx context.Context, p peer.ID) (string, error) `perm:"read"`
|
||||||
|
|
||||||
ID func(context.Context) (peer.ID, error) `perm:"read"`
|
ID func(context.Context) (peer.ID, error) `perm:"read"`
|
||||||
Version func(context.Context) (api.Version, error) `perm:"read"`
|
Version func(context.Context) (api.Version, error) `perm:"read"`
|
||||||
@ -91,7 +95,7 @@ type FullNodeStruct struct {
|
|||||||
ChainGetNode func(ctx context.Context, p string) (*api.IpldObject, error) `perm:"read"`
|
ChainGetNode func(ctx context.Context, p string) (*api.IpldObject, error) `perm:"read"`
|
||||||
ChainGetMessage func(context.Context, cid.Cid) (*types.Message, error) `perm:"read"`
|
ChainGetMessage func(context.Context, cid.Cid) (*types.Message, error) `perm:"read"`
|
||||||
ChainGetPath func(context.Context, types.TipSetKey, types.TipSetKey) ([]*api.HeadChange, error) `perm:"read"`
|
ChainGetPath func(context.Context, types.TipSetKey, types.TipSetKey) ([]*api.HeadChange, error) `perm:"read"`
|
||||||
ChainExport func(context.Context, abi.ChainEpoch, types.TipSetKey) (<-chan []byte, error) `perm:"read"`
|
ChainExport func(context.Context, abi.ChainEpoch, bool, types.TipSetKey) (<-chan []byte, error) `perm:"read"`
|
||||||
|
|
||||||
BeaconGetEntry func(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) `perm:"read"`
|
BeaconGetEntry func(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) `perm:"read"`
|
||||||
|
|
||||||
@ -103,7 +107,9 @@ type FullNodeStruct struct {
|
|||||||
SyncState func(context.Context) (*api.SyncState, error) `perm:"read"`
|
SyncState func(context.Context) (*api.SyncState, error) `perm:"read"`
|
||||||
SyncSubmitBlock func(ctx context.Context, blk *types.BlockMsg) error `perm:"write"`
|
SyncSubmitBlock func(ctx context.Context, blk *types.BlockMsg) error `perm:"write"`
|
||||||
SyncIncomingBlocks func(ctx context.Context) (<-chan *types.BlockHeader, error) `perm:"read"`
|
SyncIncomingBlocks func(ctx context.Context) (<-chan *types.BlockHeader, error) `perm:"read"`
|
||||||
|
SyncCheckpoint func(ctx context.Context, key types.TipSetKey) error `perm:"admin"`
|
||||||
SyncMarkBad func(ctx context.Context, bcid cid.Cid) error `perm:"admin"`
|
SyncMarkBad func(ctx context.Context, bcid cid.Cid) error `perm:"admin"`
|
||||||
|
SyncUnmarkBad func(ctx context.Context, bcid cid.Cid) error `perm:"admin"`
|
||||||
SyncCheckBad func(ctx context.Context, bcid cid.Cid) (string, error) `perm:"read"`
|
SyncCheckBad func(ctx context.Context, bcid cid.Cid) (string, error) `perm:"read"`
|
||||||
|
|
||||||
MpoolGetConfig func(context.Context) (*types.MpoolConfig, error) `perm:"read"`
|
MpoolGetConfig func(context.Context) (*types.MpoolConfig, error) `perm:"read"`
|
||||||
@ -135,36 +141,37 @@ type FullNodeStruct struct {
|
|||||||
WalletImport func(context.Context, *types.KeyInfo) (address.Address, error) `perm:"admin"`
|
WalletImport func(context.Context, *types.KeyInfo) (address.Address, error) `perm:"admin"`
|
||||||
WalletDelete func(context.Context, address.Address) error `perm:"write"`
|
WalletDelete func(context.Context, address.Address) error `perm:"write"`
|
||||||
|
|
||||||
ClientImport func(ctx context.Context, ref api.FileRef) (*api.ImportRes, error) `perm:"admin"`
|
ClientImport func(ctx context.Context, ref api.FileRef) (*api.ImportRes, error) `perm:"admin"`
|
||||||
ClientListImports func(ctx context.Context) ([]api.Import, error) `perm:"write"`
|
ClientListImports func(ctx context.Context) ([]api.Import, error) `perm:"write"`
|
||||||
ClientRemoveImport func(ctx context.Context, importID multistore.StoreID) error `perm:"admin"`
|
ClientRemoveImport func(ctx context.Context, importID multistore.StoreID) error `perm:"admin"`
|
||||||
ClientHasLocal func(ctx context.Context, root cid.Cid) (bool, error) `perm:"write"`
|
ClientHasLocal func(ctx context.Context, root cid.Cid) (bool, error) `perm:"write"`
|
||||||
ClientFindData func(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]api.QueryOffer, error) `perm:"read"`
|
ClientFindData func(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]api.QueryOffer, error) `perm:"read"`
|
||||||
ClientMinerQueryOffer func(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (api.QueryOffer, error) `perm:"read"`
|
ClientMinerQueryOffer func(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (api.QueryOffer, error) `perm:"read"`
|
||||||
ClientStartDeal func(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) `perm:"admin"`
|
ClientStartDeal func(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) `perm:"admin"`
|
||||||
ClientGetDealInfo func(context.Context, cid.Cid) (*api.DealInfo, error) `perm:"read"`
|
ClientGetDealInfo func(context.Context, cid.Cid) (*api.DealInfo, error) `perm:"read"`
|
||||||
ClientListDeals func(ctx context.Context) ([]api.DealInfo, error) `perm:"write"`
|
ClientListDeals func(ctx context.Context) ([]api.DealInfo, error) `perm:"write"`
|
||||||
ClientGetDealUpdates func(ctx context.Context) (<-chan api.DealInfo, error) `perm:"read"`
|
ClientGetDealUpdates func(ctx context.Context) (<-chan api.DealInfo, error) `perm:"read"`
|
||||||
ClientRetrieve func(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error `perm:"admin"`
|
ClientRetrieve func(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error `perm:"admin"`
|
||||||
ClientRetrieveWithEvents func(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"`
|
ClientRetrieveWithEvents func(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"`
|
||||||
ClientQueryAsk func(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.SignedStorageAsk, error) `perm:"read"`
|
ClientQueryAsk func(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.SignedStorageAsk, error) `perm:"read"`
|
||||||
ClientCalcCommP func(ctx context.Context, inpath string) (*api.CommPRet, error) `perm:"read"`
|
ClientCalcCommP func(ctx context.Context, inpath string) (*api.CommPRet, error) `perm:"read"`
|
||||||
ClientGenCar func(ctx context.Context, ref api.FileRef, outpath string) error `perm:"write"`
|
ClientGenCar func(ctx context.Context, ref api.FileRef, outpath string) error `perm:"write"`
|
||||||
ClientDealSize func(ctx context.Context, root cid.Cid) (api.DataSize, error) `perm:"read"`
|
ClientDealSize func(ctx context.Context, root cid.Cid) (api.DataSize, error) `perm:"read"`
|
||||||
ClientListDataTransfers func(ctx context.Context) ([]api.DataTransferChannel, error) `perm:"write"`
|
ClientListDataTransfers func(ctx context.Context) ([]api.DataTransferChannel, error) `perm:"write"`
|
||||||
ClientDataTransferUpdates func(ctx context.Context) (<-chan api.DataTransferChannel, error) `perm:"write"`
|
ClientDataTransferUpdates func(ctx context.Context) (<-chan api.DataTransferChannel, error) `perm:"write"`
|
||||||
|
ClientRetrieveTryRestartInsufficientFunds func(ctx context.Context, paymentChannel address.Address) error `perm:"write"`
|
||||||
|
|
||||||
StateNetworkName func(context.Context) (dtypes.NetworkName, error) `perm:"read"`
|
StateNetworkName func(context.Context) (dtypes.NetworkName, error) `perm:"read"`
|
||||||
StateMinerSectors func(context.Context, address.Address, *abi.BitField, bool, types.TipSetKey) ([]*api.ChainSectorInfo, error) `perm:"read"`
|
StateMinerSectors func(context.Context, address.Address, *bitfield.BitField, bool, types.TipSetKey) ([]*api.ChainSectorInfo, error) `perm:"read"`
|
||||||
StateMinerActiveSectors func(context.Context, address.Address, types.TipSetKey) ([]*api.ChainSectorInfo, error) `perm:"read"`
|
StateMinerActiveSectors func(context.Context, address.Address, types.TipSetKey) ([]*api.ChainSectorInfo, error) `perm:"read"`
|
||||||
StateMinerProvingDeadline func(context.Context, address.Address, types.TipSetKey) (*miner.DeadlineInfo, error) `perm:"read"`
|
StateMinerProvingDeadline func(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) `perm:"read"`
|
||||||
StateMinerPower func(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) `perm:"read"`
|
StateMinerPower func(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) `perm:"read"`
|
||||||
StateMinerInfo func(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error) `perm:"read"`
|
StateMinerInfo func(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error) `perm:"read"`
|
||||||
StateMinerDeadlines func(context.Context, address.Address, types.TipSetKey) ([]*miner.Deadline, error) `perm:"read"`
|
StateMinerDeadlines func(context.Context, address.Address, types.TipSetKey) ([]*miner.Deadline, error) `perm:"read"`
|
||||||
StateMinerPartitions func(context.Context, address.Address, uint64, types.TipSetKey) ([]*miner.Partition, error) `perm:"read"`
|
StateMinerPartitions func(context.Context, address.Address, uint64, types.TipSetKey) ([]*miner.Partition, error) `perm:"read"`
|
||||||
StateMinerFaults func(context.Context, address.Address, types.TipSetKey) (abi.BitField, error) `perm:"read"`
|
StateMinerFaults func(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) `perm:"read"`
|
||||||
StateAllMinerFaults func(context.Context, abi.ChainEpoch, types.TipSetKey) ([]*api.Fault, error) `perm:"read"`
|
StateAllMinerFaults func(context.Context, abi.ChainEpoch, types.TipSetKey) ([]*api.Fault, error) `perm:"read"`
|
||||||
StateMinerRecoveries func(context.Context, address.Address, types.TipSetKey) (abi.BitField, error) `perm:"read"`
|
StateMinerRecoveries func(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) `perm:"read"`
|
||||||
StateMinerPreCommitDepositForPower func(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
StateMinerPreCommitDepositForPower func(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
||||||
StateMinerInitialPledgeCollateral func(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
StateMinerInitialPledgeCollateral func(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
||||||
StateMinerAvailableBalance func(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
StateMinerAvailableBalance func(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
||||||
@ -176,6 +183,7 @@ type FullNodeStruct struct {
|
|||||||
StateReplay func(context.Context, types.TipSetKey, cid.Cid) (*api.InvocResult, error) `perm:"read"`
|
StateReplay func(context.Context, types.TipSetKey, cid.Cid) (*api.InvocResult, error) `perm:"read"`
|
||||||
StateGetActor func(context.Context, address.Address, types.TipSetKey) (*types.Actor, error) `perm:"read"`
|
StateGetActor func(context.Context, address.Address, types.TipSetKey) (*types.Actor, error) `perm:"read"`
|
||||||
StateReadState func(context.Context, address.Address, types.TipSetKey) (*api.ActorState, error) `perm:"read"`
|
StateReadState func(context.Context, address.Address, types.TipSetKey) (*api.ActorState, error) `perm:"read"`
|
||||||
|
StateMsgGasCost func(context.Context, cid.Cid, types.TipSetKey) (*api.MsgGasCost, error) `perm:"read"`
|
||||||
StateWaitMsg func(ctx context.Context, cid cid.Cid, confidence uint64) (*api.MsgLookup, error) `perm:"read"`
|
StateWaitMsg func(ctx context.Context, cid cid.Cid, confidence uint64) (*api.MsgLookup, error) `perm:"read"`
|
||||||
StateSearchMsg func(context.Context, cid.Cid) (*api.MsgLookup, error) `perm:"read"`
|
StateSearchMsg func(context.Context, cid.Cid) (*api.MsgLookup, error) `perm:"read"`
|
||||||
StateListMiners func(context.Context, types.TipSetKey) ([]address.Address, error) `perm:"read"`
|
StateListMiners func(context.Context, types.TipSetKey) ([]address.Address, error) `perm:"read"`
|
||||||
@ -196,31 +204,37 @@ type FullNodeStruct struct {
|
|||||||
StateCirculatingSupply func(context.Context, types.TipSetKey) (api.CirculatingSupply, error) `perm:"read"`
|
StateCirculatingSupply func(context.Context, types.TipSetKey) (api.CirculatingSupply, error) `perm:"read"`
|
||||||
|
|
||||||
MsigGetAvailableBalance func(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
MsigGetAvailableBalance func(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
||||||
|
MsigGetVested func(context.Context, address.Address, types.TipSetKey, types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
||||||
MsigCreate func(context.Context, uint64, []address.Address, abi.ChainEpoch, types.BigInt, address.Address, types.BigInt) (cid.Cid, error) `perm:"sign"`
|
MsigCreate func(context.Context, uint64, []address.Address, abi.ChainEpoch, types.BigInt, address.Address, types.BigInt) (cid.Cid, error) `perm:"sign"`
|
||||||
MsigPropose func(context.Context, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) `perm:"sign"`
|
MsigPropose func(context.Context, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) `perm:"sign"`
|
||||||
MsigApprove func(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) `perm:"sign"`
|
MsigApprove func(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) `perm:"sign"`
|
||||||
MsigCancel func(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) `perm:"sign"`
|
MsigCancel func(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) `perm:"sign"`
|
||||||
|
MsigAddPropose func(context.Context, address.Address, address.Address, address.Address, bool) (cid.Cid, error) `perm:"sign"`
|
||||||
|
MsigAddApprove func(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, bool) (cid.Cid, error) `perm:"sign"`
|
||||||
|
MsigAddCancel func(context.Context, address.Address, address.Address, uint64, address.Address, bool) (cid.Cid, error) `perm:"sign"`
|
||||||
MsigSwapPropose func(context.Context, address.Address, address.Address, address.Address, address.Address) (cid.Cid, error) `perm:"sign"`
|
MsigSwapPropose func(context.Context, address.Address, address.Address, address.Address, address.Address) (cid.Cid, error) `perm:"sign"`
|
||||||
MsigSwapApprove func(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, address.Address) (cid.Cid, error) `perm:"sign"`
|
MsigSwapApprove func(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, address.Address) (cid.Cid, error) `perm:"sign"`
|
||||||
MsigSwapCancel func(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (cid.Cid, error) `perm:"sign"`
|
MsigSwapCancel func(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (cid.Cid, error) `perm:"sign"`
|
||||||
|
|
||||||
MarketEnsureAvailable func(context.Context, address.Address, address.Address, types.BigInt) (cid.Cid, error) `perm:"sign"`
|
MarketEnsureAvailable func(context.Context, address.Address, address.Address, types.BigInt) (cid.Cid, error) `perm:"sign"`
|
||||||
|
|
||||||
PaychGet func(ctx context.Context, from, to address.Address, amt types.BigInt) (*api.ChannelInfo, error) `perm:"sign"`
|
PaychGet func(ctx context.Context, from, to address.Address, amt types.BigInt) (*api.ChannelInfo, error) `perm:"sign"`
|
||||||
PaychGetWaitReady func(context.Context, cid.Cid) (address.Address, error) `perm:"sign"`
|
PaychGetWaitReady func(context.Context, cid.Cid) (address.Address, error) `perm:"sign"`
|
||||||
PaychList func(context.Context) ([]address.Address, error) `perm:"read"`
|
PaychAvailableFunds func(context.Context, address.Address) (*api.ChannelAvailableFunds, error) `perm:"sign"`
|
||||||
PaychStatus func(context.Context, address.Address) (*api.PaychStatus, error) `perm:"read"`
|
PaychAvailableFundsByFromTo func(context.Context, address.Address, address.Address) (*api.ChannelAvailableFunds, error) `perm:"sign"`
|
||||||
PaychSettle func(context.Context, address.Address) (cid.Cid, error) `perm:"sign"`
|
PaychList func(context.Context) ([]address.Address, error) `perm:"read"`
|
||||||
PaychCollect func(context.Context, address.Address) (cid.Cid, error) `perm:"sign"`
|
PaychStatus func(context.Context, address.Address) (*api.PaychStatus, error) `perm:"read"`
|
||||||
PaychAllocateLane func(context.Context, address.Address) (uint64, error) `perm:"sign"`
|
PaychSettle func(context.Context, address.Address) (cid.Cid, error) `perm:"sign"`
|
||||||
PaychNewPayment func(ctx context.Context, from, to address.Address, vouchers []api.VoucherSpec) (*api.PaymentInfo, error) `perm:"sign"`
|
PaychCollect func(context.Context, address.Address) (cid.Cid, error) `perm:"sign"`
|
||||||
PaychVoucherCheck func(context.Context, *paych.SignedVoucher) error `perm:"read"`
|
PaychAllocateLane func(context.Context, address.Address) (uint64, error) `perm:"sign"`
|
||||||
PaychVoucherCheckValid func(context.Context, address.Address, *paych.SignedVoucher) error `perm:"read"`
|
PaychNewPayment func(ctx context.Context, from, to address.Address, vouchers []api.VoucherSpec) (*api.PaymentInfo, error) `perm:"sign"`
|
||||||
PaychVoucherCheckSpendable func(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (bool, error) `perm:"read"`
|
PaychVoucherCheck func(context.Context, *paych.SignedVoucher) error `perm:"read"`
|
||||||
PaychVoucherAdd func(context.Context, address.Address, *paych.SignedVoucher, []byte, types.BigInt) (types.BigInt, error) `perm:"write"`
|
PaychVoucherCheckValid func(context.Context, address.Address, *paych.SignedVoucher) error `perm:"read"`
|
||||||
PaychVoucherCreate func(context.Context, address.Address, big.Int, uint64) (*paych.SignedVoucher, error) `perm:"sign"`
|
PaychVoucherCheckSpendable func(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (bool, error) `perm:"read"`
|
||||||
PaychVoucherList func(context.Context, address.Address) ([]*paych.SignedVoucher, error) `perm:"write"`
|
PaychVoucherAdd func(context.Context, address.Address, *paych.SignedVoucher, []byte, types.BigInt) (types.BigInt, error) `perm:"write"`
|
||||||
PaychVoucherSubmit func(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (cid.Cid, error) `perm:"sign"`
|
PaychVoucherCreate func(context.Context, address.Address, big.Int, uint64) (*api.VoucherCreateResult, error) `perm:"sign"`
|
||||||
|
PaychVoucherList func(context.Context, address.Address) ([]*paych.SignedVoucher, error) `perm:"write"`
|
||||||
|
PaychVoucherSubmit func(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (cid.Cid, error) `perm:"sign"`
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -240,7 +254,7 @@ type StorageMinerStruct struct {
|
|||||||
MarketImportDealData func(context.Context, cid.Cid, string) error `perm:"write"`
|
MarketImportDealData func(context.Context, cid.Cid, string) error `perm:"write"`
|
||||||
MarketListDeals func(ctx context.Context) ([]storagemarket.StorageDeal, error) `perm:"read"`
|
MarketListDeals func(ctx context.Context) ([]storagemarket.StorageDeal, error) `perm:"read"`
|
||||||
MarketListRetrievalDeals func(ctx context.Context) ([]retrievalmarket.ProviderDealState, error) `perm:"read"`
|
MarketListRetrievalDeals func(ctx context.Context) ([]retrievalmarket.ProviderDealState, error) `perm:"read"`
|
||||||
MarketGetDealUpdates func(ctx context.Context, d cid.Cid) (<-chan storagemarket.MinerDeal, error) `perm:"read"`
|
MarketGetDealUpdates func(ctx context.Context) (<-chan storagemarket.MinerDeal, error) `perm:"read"`
|
||||||
MarketListIncompleteDeals func(ctx context.Context) ([]storagemarket.MinerDeal, error) `perm:"read"`
|
MarketListIncompleteDeals func(ctx context.Context) ([]storagemarket.MinerDeal, error) `perm:"read"`
|
||||||
MarketSetAsk func(ctx context.Context, price types.BigInt, verifiedPrice types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error `perm:"admin"`
|
MarketSetAsk func(ctx context.Context, price types.BigInt, verifiedPrice types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error `perm:"admin"`
|
||||||
MarketGetAsk func(ctx context.Context) (*storagemarket.SignedStorageAsk, error) `perm:"read"`
|
MarketGetAsk func(ctx context.Context) (*storagemarket.SignedStorageAsk, error) `perm:"read"`
|
||||||
@ -388,6 +402,10 @@ func (c *CommonStruct) NetBandwidthStatsByProtocol(ctx context.Context) (map[pro
|
|||||||
return c.Internal.NetBandwidthStatsByProtocol(ctx)
|
return c.Internal.NetBandwidthStatsByProtocol(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *CommonStruct) NetAgentVersion(ctx context.Context, p peer.ID) (string, error) {
|
||||||
|
return c.Internal.NetAgentVersion(ctx, p)
|
||||||
|
}
|
||||||
|
|
||||||
// ID implements API.ID
|
// ID implements API.ID
|
||||||
func (c *CommonStruct) ID(ctx context.Context) (peer.ID, error) {
|
func (c *CommonStruct) ID(ctx context.Context) (peer.ID, error) {
|
||||||
return c.Internal.ID(ctx)
|
return c.Internal.ID(ctx)
|
||||||
@ -487,6 +505,10 @@ func (c *FullNodeStruct) ClientDataTransferUpdates(ctx context.Context) (<-chan
|
|||||||
return c.Internal.ClientDataTransferUpdates(ctx)
|
return c.Internal.ClientDataTransferUpdates(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *FullNodeStruct) ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error {
|
||||||
|
return c.Internal.ClientRetrieveTryRestartInsufficientFunds(ctx, paymentChannel)
|
||||||
|
}
|
||||||
|
|
||||||
func (c *FullNodeStruct) GasEstimateGasPremium(ctx context.Context, nblocksincl uint64, sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error) {
|
func (c *FullNodeStruct) GasEstimateGasPremium(ctx context.Context, nblocksincl uint64, sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error) {
|
||||||
return c.Internal.GasEstimateGasPremium(ctx, nblocksincl, sender, gaslimit, tsk)
|
return c.Internal.GasEstimateGasPremium(ctx, nblocksincl, sender, gaslimit, tsk)
|
||||||
}
|
}
|
||||||
@ -671,8 +693,8 @@ func (c *FullNodeStruct) ChainGetPath(ctx context.Context, from types.TipSetKey,
|
|||||||
return c.Internal.ChainGetPath(ctx, from, to)
|
return c.Internal.ChainGetPath(ctx, from, to)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *FullNodeStruct) ChainExport(ctx context.Context, nroots abi.ChainEpoch, tsk types.TipSetKey) (<-chan []byte, error) {
|
func (c *FullNodeStruct) ChainExport(ctx context.Context, nroots abi.ChainEpoch, iom bool, tsk types.TipSetKey) (<-chan []byte, error) {
|
||||||
return c.Internal.ChainExport(ctx, nroots, tsk)
|
return c.Internal.ChainExport(ctx, nroots, iom, tsk)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *FullNodeStruct) BeaconGetEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) {
|
func (c *FullNodeStruct) BeaconGetEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) {
|
||||||
@ -691,10 +713,18 @@ func (c *FullNodeStruct) SyncIncomingBlocks(ctx context.Context) (<-chan *types.
|
|||||||
return c.Internal.SyncIncomingBlocks(ctx)
|
return c.Internal.SyncIncomingBlocks(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *FullNodeStruct) SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) error {
|
||||||
|
return c.Internal.SyncCheckpoint(ctx, tsk)
|
||||||
|
}
|
||||||
|
|
||||||
func (c *FullNodeStruct) SyncMarkBad(ctx context.Context, bcid cid.Cid) error {
|
func (c *FullNodeStruct) SyncMarkBad(ctx context.Context, bcid cid.Cid) error {
|
||||||
return c.Internal.SyncMarkBad(ctx, bcid)
|
return c.Internal.SyncMarkBad(ctx, bcid)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *FullNodeStruct) SyncUnmarkBad(ctx context.Context, bcid cid.Cid) error {
|
||||||
|
return c.Internal.SyncUnmarkBad(ctx, bcid)
|
||||||
|
}
|
||||||
|
|
||||||
func (c *FullNodeStruct) SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error) {
|
func (c *FullNodeStruct) SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error) {
|
||||||
return c.Internal.SyncCheckBad(ctx, bcid)
|
return c.Internal.SyncCheckBad(ctx, bcid)
|
||||||
}
|
}
|
||||||
@ -703,7 +733,7 @@ func (c *FullNodeStruct) StateNetworkName(ctx context.Context) (dtypes.NetworkNa
|
|||||||
return c.Internal.StateNetworkName(ctx)
|
return c.Internal.StateNetworkName(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *FullNodeStruct) StateMinerSectors(ctx context.Context, addr address.Address, filter *abi.BitField, filterOut bool, tsk types.TipSetKey) ([]*api.ChainSectorInfo, error) {
|
func (c *FullNodeStruct) StateMinerSectors(ctx context.Context, addr address.Address, filter *bitfield.BitField, filterOut bool, tsk types.TipSetKey) ([]*api.ChainSectorInfo, error) {
|
||||||
return c.Internal.StateMinerSectors(ctx, addr, filter, filterOut, tsk)
|
return c.Internal.StateMinerSectors(ctx, addr, filter, filterOut, tsk)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -711,7 +741,7 @@ func (c *FullNodeStruct) StateMinerActiveSectors(ctx context.Context, addr addre
|
|||||||
return c.Internal.StateMinerActiveSectors(ctx, addr, tsk)
|
return c.Internal.StateMinerActiveSectors(ctx, addr, tsk)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *FullNodeStruct) StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*miner.DeadlineInfo, error) {
|
func (c *FullNodeStruct) StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error) {
|
||||||
return c.Internal.StateMinerProvingDeadline(ctx, addr, tsk)
|
return c.Internal.StateMinerProvingDeadline(ctx, addr, tsk)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -731,7 +761,7 @@ func (c *FullNodeStruct) StateMinerPartitions(ctx context.Context, m address.Add
|
|||||||
return c.Internal.StateMinerPartitions(ctx, m, dlIdx, tsk)
|
return c.Internal.StateMinerPartitions(ctx, m, dlIdx, tsk)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *FullNodeStruct) StateMinerFaults(ctx context.Context, actor address.Address, tsk types.TipSetKey) (abi.BitField, error) {
|
func (c *FullNodeStruct) StateMinerFaults(ctx context.Context, actor address.Address, tsk types.TipSetKey) (bitfield.BitField, error) {
|
||||||
return c.Internal.StateMinerFaults(ctx, actor, tsk)
|
return c.Internal.StateMinerFaults(ctx, actor, tsk)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -739,7 +769,7 @@ func (c *FullNodeStruct) StateAllMinerFaults(ctx context.Context, cutoff abi.Cha
|
|||||||
return c.Internal.StateAllMinerFaults(ctx, cutoff, endTsk)
|
return c.Internal.StateAllMinerFaults(ctx, cutoff, endTsk)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *FullNodeStruct) StateMinerRecoveries(ctx context.Context, actor address.Address, tsk types.TipSetKey) (abi.BitField, error) {
|
func (c *FullNodeStruct) StateMinerRecoveries(ctx context.Context, actor address.Address, tsk types.TipSetKey) (bitfield.BitField, error) {
|
||||||
return c.Internal.StateMinerRecoveries(ctx, actor, tsk)
|
return c.Internal.StateMinerRecoveries(ctx, actor, tsk)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -787,6 +817,10 @@ func (c *FullNodeStruct) StateReadState(ctx context.Context, addr address.Addres
|
|||||||
return c.Internal.StateReadState(ctx, addr, tsk)
|
return c.Internal.StateReadState(ctx, addr, tsk)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *FullNodeStruct) StateMsgGasCost(ctx context.Context, msgc cid.Cid, tsk types.TipSetKey) (*api.MsgGasCost, error) {
|
||||||
|
return c.Internal.StateMsgGasCost(ctx, msgc, tsk)
|
||||||
|
}
|
||||||
|
|
||||||
func (c *FullNodeStruct) StateWaitMsg(ctx context.Context, msgc cid.Cid, confidence uint64) (*api.MsgLookup, error) {
|
func (c *FullNodeStruct) StateWaitMsg(ctx context.Context, msgc cid.Cid, confidence uint64) (*api.MsgLookup, error) {
|
||||||
return c.Internal.StateWaitMsg(ctx, msgc, confidence)
|
return c.Internal.StateWaitMsg(ctx, msgc, confidence)
|
||||||
}
|
}
|
||||||
@ -859,6 +893,10 @@ func (c *FullNodeStruct) MsigGetAvailableBalance(ctx context.Context, a address.
|
|||||||
return c.Internal.MsigGetAvailableBalance(ctx, a, tsk)
|
return c.Internal.MsigGetAvailableBalance(ctx, a, tsk)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *FullNodeStruct) MsigGetVested(ctx context.Context, a address.Address, sTsk types.TipSetKey, eTsk types.TipSetKey) (types.BigInt, error) {
|
||||||
|
return c.Internal.MsigGetVested(ctx, a, sTsk, eTsk)
|
||||||
|
}
|
||||||
|
|
||||||
func (c *FullNodeStruct) MsigCreate(ctx context.Context, req uint64, addrs []address.Address, duration abi.ChainEpoch, val types.BigInt, src address.Address, gp types.BigInt) (cid.Cid, error) {
|
func (c *FullNodeStruct) MsigCreate(ctx context.Context, req uint64, addrs []address.Address, duration abi.ChainEpoch, val types.BigInt, src address.Address, gp types.BigInt) (cid.Cid, error) {
|
||||||
return c.Internal.MsigCreate(ctx, req, addrs, duration, val, src, gp)
|
return c.Internal.MsigCreate(ctx, req, addrs, duration, val, src, gp)
|
||||||
}
|
}
|
||||||
@ -875,6 +913,18 @@ func (c *FullNodeStruct) MsigCancel(ctx context.Context, msig address.Address, t
|
|||||||
return c.Internal.MsigCancel(ctx, msig, txID, to, amt, src, method, params)
|
return c.Internal.MsigCancel(ctx, msig, txID, to, amt, src, method, params)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *FullNodeStruct) MsigAddPropose(ctx context.Context, msig address.Address, src address.Address, newAdd address.Address, inc bool) (cid.Cid, error) {
|
||||||
|
return c.Internal.MsigAddPropose(ctx, msig, src, newAdd, inc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *FullNodeStruct) MsigAddApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, newAdd address.Address, inc bool) (cid.Cid, error) {
|
||||||
|
return c.Internal.MsigAddApprove(ctx, msig, src, txID, proposer, newAdd, inc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *FullNodeStruct) MsigAddCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, newAdd address.Address, inc bool) (cid.Cid, error) {
|
||||||
|
return c.Internal.MsigAddCancel(ctx, msig, src, txID, newAdd, inc)
|
||||||
|
}
|
||||||
|
|
||||||
func (c *FullNodeStruct) MsigSwapPropose(ctx context.Context, msig address.Address, src address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) {
|
func (c *FullNodeStruct) MsigSwapPropose(ctx context.Context, msig address.Address, src address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) {
|
||||||
return c.Internal.MsigSwapPropose(ctx, msig, src, oldAdd, newAdd)
|
return c.Internal.MsigSwapPropose(ctx, msig, src, oldAdd, newAdd)
|
||||||
}
|
}
|
||||||
@ -899,6 +949,14 @@ func (c *FullNodeStruct) PaychGetWaitReady(ctx context.Context, sentinel cid.Cid
|
|||||||
return c.Internal.PaychGetWaitReady(ctx, sentinel)
|
return c.Internal.PaychGetWaitReady(ctx, sentinel)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *FullNodeStruct) PaychAvailableFunds(ctx context.Context, ch address.Address) (*api.ChannelAvailableFunds, error) {
|
||||||
|
return c.Internal.PaychAvailableFunds(ctx, ch)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *FullNodeStruct) PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*api.ChannelAvailableFunds, error) {
|
||||||
|
return c.Internal.PaychAvailableFundsByFromTo(ctx, from, to)
|
||||||
|
}
|
||||||
|
|
||||||
func (c *FullNodeStruct) PaychList(ctx context.Context) ([]address.Address, error) {
|
func (c *FullNodeStruct) PaychList(ctx context.Context) ([]address.Address, error) {
|
||||||
return c.Internal.PaychList(ctx)
|
return c.Internal.PaychList(ctx)
|
||||||
}
|
}
|
||||||
@ -919,7 +977,7 @@ func (c *FullNodeStruct) PaychVoucherAdd(ctx context.Context, addr address.Addre
|
|||||||
return c.Internal.PaychVoucherAdd(ctx, addr, sv, proof, minDelta)
|
return c.Internal.PaychVoucherAdd(ctx, addr, sv, proof, minDelta)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *FullNodeStruct) PaychVoucherCreate(ctx context.Context, pch address.Address, amt types.BigInt, lane uint64) (*paych.SignedVoucher, error) {
|
func (c *FullNodeStruct) PaychVoucherCreate(ctx context.Context, pch address.Address, amt types.BigInt, lane uint64) (*api.VoucherCreateResult, error) {
|
||||||
return c.Internal.PaychVoucherCreate(ctx, pch, amt, lane)
|
return c.Internal.PaychVoucherCreate(ctx, pch, amt, lane)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1087,8 +1145,8 @@ func (c *StorageMinerStruct) MarketListRetrievalDeals(ctx context.Context) ([]re
|
|||||||
return c.Internal.MarketListRetrievalDeals(ctx)
|
return c.Internal.MarketListRetrievalDeals(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *StorageMinerStruct) MarketGetDealUpdates(ctx context.Context, d cid.Cid) (<-chan storagemarket.MinerDeal, error) {
|
func (c *StorageMinerStruct) MarketGetDealUpdates(ctx context.Context) (<-chan storagemarket.MinerDeal, error) {
|
||||||
return c.Internal.MarketGetDealUpdates(ctx, d)
|
return c.Internal.MarketGetDealUpdates(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *StorageMinerStruct) MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error) {
|
func (c *StorageMinerStruct) MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error) {
|
||||||
|
@ -6,7 +6,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
abi "github.com/filecoin-project/specs-actors/actors/abi"
|
abi "github.com/filecoin-project/go-state-types/abi"
|
||||||
paych "github.com/filecoin-project/specs-actors/actors/builtin/paych"
|
paych "github.com/filecoin-project/specs-actors/actors/builtin/paych"
|
||||||
cbg "github.com/whyrusleeping/cbor-gen"
|
cbg "github.com/whyrusleeping/cbor-gen"
|
||||||
xerrors "golang.org/x/xerrors"
|
xerrors "golang.org/x/xerrors"
|
||||||
|
@ -28,9 +28,9 @@ import (
|
|||||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||||
"github.com/filecoin-project/go-multistore"
|
"github.com/filecoin-project/go-multistore"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/crypto"
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
"github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
|
"github.com/filecoin-project/go-state-types/exitcode"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/api/apistruct"
|
"github.com/filecoin-project/lotus/api/apistruct"
|
||||||
@ -105,7 +105,7 @@ func init() {
|
|||||||
addExample(network.Connected)
|
addExample(network.Connected)
|
||||||
addExample(dtypes.NetworkName("lotus"))
|
addExample(dtypes.NetworkName("lotus"))
|
||||||
addExample(api.SyncStateStage(1))
|
addExample(api.SyncStateStage(1))
|
||||||
addExample(build.APIVersion)
|
addExample(build.FullAPIVersion)
|
||||||
addExample(api.PCHInbound)
|
addExample(api.PCHInbound)
|
||||||
addExample(time.Minute)
|
addExample(time.Minute)
|
||||||
addExample(datatransfer.TransferID(3))
|
addExample(datatransfer.TransferID(3))
|
||||||
|
@ -7,8 +7,8 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/lotus/miner"
|
"github.com/filecoin-project/lotus/miner"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type BlockMiner struct {
|
type BlockMiner struct {
|
||||||
|
@ -10,7 +10,7 @@ import (
|
|||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/node/impl"
|
"github.com/filecoin-project/lotus/node/impl"
|
||||||
|
@ -20,11 +20,11 @@ import (
|
|||||||
"github.com/ipld/go-car"
|
"github.com/ipld/go-car"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||||
"github.com/filecoin-project/lotus/miner"
|
"github.com/filecoin-project/lotus/miner"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
|
||||||
dag "github.com/ipfs/go-merkledag"
|
dag "github.com/ipfs/go-merkledag"
|
||||||
dstest "github.com/ipfs/go-merkledag/test"
|
dstest "github.com/ipfs/go-merkledag/test"
|
||||||
unixfile "github.com/ipfs/go-unixfs/file"
|
unixfile "github.com/ipfs/go-unixfs/file"
|
||||||
@ -334,7 +334,7 @@ loop:
|
|||||||
func waitDealPublished(t *testing.T, ctx context.Context, miner TestStorageNode, deal *cid.Cid) {
|
func waitDealPublished(t *testing.T, ctx context.Context, miner TestStorageNode, deal *cid.Cid) {
|
||||||
subCtx, cancel := context.WithCancel(ctx)
|
subCtx, cancel := context.WithCancel(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
updates, err := miner.MarketGetDealUpdates(subCtx, *deal)
|
updates, err := miner.MarketGetDealUpdates(subCtx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -343,18 +343,20 @@ func waitDealPublished(t *testing.T, ctx context.Context, miner TestStorageNode,
|
|||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
t.Fatal("context timeout")
|
t.Fatal("context timeout")
|
||||||
case di := <-updates:
|
case di := <-updates:
|
||||||
switch di.State {
|
if deal.Equals(di.ProposalCid) {
|
||||||
case storagemarket.StorageDealProposalRejected:
|
switch di.State {
|
||||||
t.Fatal("deal rejected")
|
case storagemarket.StorageDealProposalRejected:
|
||||||
case storagemarket.StorageDealFailing:
|
t.Fatal("deal rejected")
|
||||||
t.Fatal("deal failed")
|
case storagemarket.StorageDealFailing:
|
||||||
case storagemarket.StorageDealError:
|
t.Fatal("deal failed")
|
||||||
t.Fatal("deal errored", di.Message)
|
case storagemarket.StorageDealError:
|
||||||
case storagemarket.StorageDealFinalizing, storagemarket.StorageDealSealing, storagemarket.StorageDealActive:
|
t.Fatal("deal errored", di.Message)
|
||||||
fmt.Println("COMPLETE", di)
|
case storagemarket.StorageDealFinalizing, storagemarket.StorageDealSealing, storagemarket.StorageDealActive:
|
||||||
return
|
fmt.Println("COMPLETE", di)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fmt.Println("Deal state: ", storagemarket.DealStates[di.State])
|
||||||
}
|
}
|
||||||
fmt.Println("Deal state: ", storagemarket.DealStates[di.State])
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -12,7 +12,7 @@ import (
|
|||||||
|
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
@ -10,8 +10,8 @@ import (
|
|||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/paych"
|
"github.com/filecoin-project/specs-actors/actors/builtin/paych"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
|
|
||||||
@ -96,18 +96,24 @@ func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
if vouch1.Voucher == nil {
|
||||||
|
t.Fatal(fmt.Errorf("Not enough funds to create voucher: missing %d", vouch1.Shortfall))
|
||||||
|
}
|
||||||
vouch2, err := paymentCreator.PaychVoucherCreate(ctx, channel, abi.NewTokenAmount(2000), lane)
|
vouch2, err := paymentCreator.PaychVoucherCreate(ctx, channel, abi.NewTokenAmount(2000), lane)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
delta1, err := paymentReceiver.PaychVoucherAdd(ctx, channel, vouch1, nil, abi.NewTokenAmount(1000))
|
if vouch2.Voucher == nil {
|
||||||
|
t.Fatal(fmt.Errorf("Not enough funds to create voucher: missing %d", vouch2.Shortfall))
|
||||||
|
}
|
||||||
|
delta1, err := paymentReceiver.PaychVoucherAdd(ctx, channel, vouch1.Voucher, nil, abi.NewTokenAmount(1000))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
if !delta1.Equals(abi.NewTokenAmount(1000)) {
|
if !delta1.Equals(abi.NewTokenAmount(1000)) {
|
||||||
t.Fatal("voucher didn't have the right amount")
|
t.Fatal("voucher didn't have the right amount")
|
||||||
}
|
}
|
||||||
delta2, err := paymentReceiver.PaychVoucherAdd(ctx, channel, vouch2, nil, abi.NewTokenAmount(1000))
|
delta2, err := paymentReceiver.PaychVoucherAdd(ctx, channel, vouch2.Voucher, nil, abi.NewTokenAmount(1000))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -65,6 +65,8 @@ func TestApis(t *testing.T, b APIBuilder) {
|
|||||||
var OneMiner = []StorageMiner{{Full: 0, Preseal: PresealGenesis}}
|
var OneMiner = []StorageMiner{{Full: 0, Preseal: PresealGenesis}}
|
||||||
|
|
||||||
func (ts *testSuite) testVersion(t *testing.T) {
|
func (ts *testSuite) testVersion(t *testing.T) {
|
||||||
|
build.RunningNodeType = build.NodeFull
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
apis, _ := ts.makeNodes(t, 1, OneMiner)
|
apis, _ := ts.makeNodes(t, 1, OneMiner)
|
||||||
api := apis[0]
|
api := apis[0]
|
||||||
|
@ -5,7 +5,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
@ -12,9 +12,9 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/mock"
|
"github.com/filecoin-project/lotus/extern/sector-storage/mock"
|
||||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
|
||||||
miner2 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
miner2 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
@ -192,7 +192,7 @@ func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSector
|
|||||||
|
|
||||||
// Drop the partition
|
// Drop the partition
|
||||||
err = parts[0].Sectors.ForEach(func(sid uint64) error {
|
err = parts[0].Sectors.ForEach(func(sid uint64) error {
|
||||||
return miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(abi.SectorID{
|
return miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkCorrupted(abi.SectorID{
|
||||||
Miner: abi.ActorID(mid),
|
Miner: abi.ActorID(mid),
|
||||||
Number: abi.SectorNumber(sid),
|
Number: abi.SectorNumber(sid),
|
||||||
}, true)
|
}, true)
|
||||||
|
@ -6,8 +6,8 @@ import (
|
|||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
datatransfer "github.com/filecoin-project/go-data-transfer"
|
datatransfer "github.com/filecoin-project/go-data-transfer"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
|
|
||||||
@ -96,7 +96,8 @@ type MessageSendSpec struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var DefaultMessageSendSpec = MessageSendSpec{
|
var DefaultMessageSendSpec = MessageSendSpec{
|
||||||
MaxFee: big.Zero(),
|
// MaxFee of 0.1FIL
|
||||||
|
MaxFee: abi.NewTokenAmount(int64(build.FilecoinPrecision) / 10),
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ms *MessageSendSpec) Get() MessageSendSpec {
|
func (ms *MessageSendSpec) Get() MessageSendSpec {
|
||||||
|
@ -4,7 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/specs-actors/actors/crypto"
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
)
|
)
|
||||||
|
|
||||||
type SignFunc = func(context.Context, []byte) (*crypto.Signature, error)
|
type SignFunc = func(context.Context, []byte) (*crypto.Signature, error)
|
||||||
|
@ -1,15 +1,26 @@
|
|||||||
package build
|
package build
|
||||||
|
|
||||||
import "github.com/filecoin-project/lotus/node/modules/dtypes"
|
import (
|
||||||
|
"sort"
|
||||||
|
|
||||||
var DrandNetwork = DrandIncentinet
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
|
)
|
||||||
func DrandConfig() dtypes.DrandConfig {
|
|
||||||
return DrandConfigs[DrandNetwork]
|
|
||||||
}
|
|
||||||
|
|
||||||
type DrandEnum int
|
type DrandEnum int
|
||||||
|
|
||||||
|
func DrandConfigSchedule() dtypes.DrandSchedule {
|
||||||
|
out := dtypes.DrandSchedule{}
|
||||||
|
for start, config := range DrandSchedule {
|
||||||
|
out = append(out, dtypes.DrandPoint{Start: start, Config: DrandConfigs[config]})
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Slice(out, func(i, j int) bool {
|
||||||
|
return out[i].Start < out[j].Start
|
||||||
|
})
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
DrandMainnet DrandEnum = iota + 1
|
DrandMainnet DrandEnum = iota + 1
|
||||||
DrandTestnet
|
DrandTestnet
|
||||||
|
@ -3,13 +3,22 @@
|
|||||||
package build
|
package build
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/power"
|
"github.com/filecoin-project/specs-actors/actors/builtin/power"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
|
"github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const UpgradeBreezeHeight = -1
|
||||||
|
const BreezeGasTampingDuration = 0
|
||||||
|
|
||||||
|
const UpgradeSmokeHeight = -1
|
||||||
|
|
||||||
|
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||||
|
0: DrandMainnet,
|
||||||
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
power.ConsensusMinerMinPower = big.NewInt(2048)
|
power.ConsensusMinerMinPower = big.NewInt(2048)
|
||||||
miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{
|
miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{
|
||||||
|
@ -5,7 +5,7 @@ import (
|
|||||||
|
|
||||||
"github.com/libp2p/go-libp2p-core/protocol"
|
"github.com/libp2p/go-libp2p-core/protocol"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
@ -36,3 +36,11 @@ func MessagesTopic(netName dtypes.NetworkName) string { return "/fil/msgs/" + st
|
|||||||
func DhtProtocolName(netName dtypes.NetworkName) protocol.ID {
|
func DhtProtocolName(netName dtypes.NetworkName) protocol.ID {
|
||||||
return protocol.ID("/fil/kad/" + string(netName))
|
return protocol.ID("/fil/kad/" + string(netName))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func UseNewestNetwork() bool {
|
||||||
|
// TODO: Put these in a container we can iterate over
|
||||||
|
if UpgradeBreezeHeight <= 0 && UpgradeSmokeHeight <= 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
@ -5,7 +5,9 @@ package build
|
|||||||
import (
|
import (
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||||
)
|
)
|
||||||
@ -20,6 +22,7 @@ const UnixfsLinksPerLevel = 1024
|
|||||||
// Consensus / Network
|
// Consensus / Network
|
||||||
|
|
||||||
const AllowableClockDriftSecs = uint64(1)
|
const AllowableClockDriftSecs = uint64(1)
|
||||||
|
const NewestNetworkVersion = network.Version2
|
||||||
|
|
||||||
// Epochs
|
// Epochs
|
||||||
const ForkLengthThreshold = Finality
|
const ForkLengthThreshold = Finality
|
||||||
|
@ -10,7 +10,8 @@ package build
|
|||||||
import (
|
import (
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||||
)
|
)
|
||||||
@ -69,4 +70,15 @@ var (
|
|||||||
|
|
||||||
PackingEfficiencyNum int64 = 4
|
PackingEfficiencyNum int64 = 4
|
||||||
PackingEfficiencyDenom int64 = 5
|
PackingEfficiencyDenom int64 = 5
|
||||||
|
|
||||||
|
UpgradeBreezeHeight abi.ChainEpoch = -1
|
||||||
|
BreezeGasTampingDuration abi.ChainEpoch = 0
|
||||||
|
|
||||||
|
UpgradeSmokeHeight abi.ChainEpoch = -1
|
||||||
|
|
||||||
|
DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||||
|
0: DrandMainnet,
|
||||||
|
}
|
||||||
|
|
||||||
|
NewestNetworkVersion = network.Version2
|
||||||
)
|
)
|
||||||
|
@ -5,13 +5,23 @@
|
|||||||
package build
|
package build
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/power"
|
"github.com/filecoin-project/specs-actors/actors/builtin/power"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||||
|
0: DrandIncentinet,
|
||||||
|
UpgradeSmokeHeight: DrandMainnet,
|
||||||
|
}
|
||||||
|
|
||||||
|
const UpgradeBreezeHeight = 41280
|
||||||
|
const BreezeGasTampingDuration = 120
|
||||||
|
|
||||||
|
const UpgradeSmokeHeight = 51000
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
power.ConsensusMinerMinPower = big.NewInt(10 << 40)
|
power.ConsensusMinerMinPower = big.NewInt(10 << 40)
|
||||||
miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{
|
miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{
|
||||||
|
@ -1,6 +1,10 @@
|
|||||||
package build
|
package build
|
||||||
|
|
||||||
import "fmt"
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
)
|
||||||
|
|
||||||
var CurrentCommit string
|
var CurrentCommit string
|
||||||
var BuildType int
|
var BuildType int
|
||||||
@ -25,7 +29,7 @@ func buildType() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// BuildVersion is the local build version, set by build system
|
// BuildVersion is the local build version, set by build system
|
||||||
const BuildVersion = "0.5.8"
|
const BuildVersion = "0.7.0"
|
||||||
|
|
||||||
func UserVersion() string {
|
func UserVersion() string {
|
||||||
return BuildVersion + buildType() + CurrentCommit
|
return BuildVersion + buildType() + CurrentCommit
|
||||||
@ -52,8 +56,37 @@ func (ve Version) EqMajorMinor(v2 Version) bool {
|
|||||||
return ve&minorMask == v2&minorMask
|
return ve&minorMask == v2&minorMask
|
||||||
}
|
}
|
||||||
|
|
||||||
// APIVersion is a semver version of the rpc api exposed
|
type NodeType int
|
||||||
var APIVersion Version = newVer(0, 14, 0)
|
|
||||||
|
const (
|
||||||
|
NodeUnknown NodeType = iota
|
||||||
|
|
||||||
|
NodeFull
|
||||||
|
NodeMiner
|
||||||
|
NodeWorker
|
||||||
|
)
|
||||||
|
|
||||||
|
var RunningNodeType NodeType
|
||||||
|
|
||||||
|
func VersionForType(nodeType NodeType) (Version, error) {
|
||||||
|
switch nodeType {
|
||||||
|
case NodeFull:
|
||||||
|
return FullAPIVersion, nil
|
||||||
|
case NodeMiner:
|
||||||
|
return MinerAPIVersion, nil
|
||||||
|
case NodeWorker:
|
||||||
|
return WorkerAPIVersion, nil
|
||||||
|
default:
|
||||||
|
return Version(0), xerrors.Errorf("unknown node type %d", nodeType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// semver versions of the rpc api exposed
|
||||||
|
var (
|
||||||
|
FullAPIVersion = newVer(0, 14, 0)
|
||||||
|
MinerAPIVersion = newVer(0, 14, 0)
|
||||||
|
WorkerAPIVersion = newVer(0, 14, 0)
|
||||||
|
)
|
||||||
|
|
||||||
//nolint:varcheck,deadcode
|
//nolint:varcheck,deadcode
|
||||||
const (
|
const (
|
||||||
|
@ -3,7 +3,7 @@ package aerrors
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
|
"github.com/filecoin-project/go-state-types/exitcode"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -3,8 +3,8 @@ package aerrors_test
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/exitcode"
|
||||||
. "github.com/filecoin-project/lotus/chain/actors/aerrors"
|
. "github.com/filecoin-project/lotus/chain/actors/aerrors"
|
||||||
"github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
@ -4,7 +4,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
|
"github.com/filecoin-project/go-state-types/exitcode"
|
||||||
cbor "github.com/ipfs/go-ipld-cbor"
|
cbor "github.com/ipfs/go-ipld-cbor"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
)
|
)
|
||||||
|
@ -56,6 +56,10 @@ func (bts *BadBlockCache) Add(c cid.Cid, bbr BadBlockReason) {
|
|||||||
bts.badBlocks.Add(c, bbr)
|
bts.badBlocks.Add(c, bbr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (bts *BadBlockCache) Remove(c cid.Cid) {
|
||||||
|
bts.badBlocks.Remove(c)
|
||||||
|
}
|
||||||
|
|
||||||
func (bts *BadBlockCache) Has(c cid.Cid) (BadBlockReason, bool) {
|
func (bts *BadBlockCache) Has(c cid.Cid) (BadBlockReason, bool) {
|
||||||
rval, ok := bts.badBlocks.Get(c)
|
rval, ok := bts.badBlocks.Get(c)
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -3,7 +3,7 @@ package beacon
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
logging "github.com/ipfs/go-log"
|
logging "github.com/ipfs/go-log"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
@ -18,6 +18,23 @@ type Response struct {
|
|||||||
Err error
|
Err error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type Schedule []BeaconPoint
|
||||||
|
|
||||||
|
func (bs Schedule) BeaconForEpoch(e abi.ChainEpoch) RandomBeacon {
|
||||||
|
for i := len(bs) - 1; i >= 0; i-- {
|
||||||
|
bp := bs[i]
|
||||||
|
if e >= bp.Start {
|
||||||
|
return bp.Beacon
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return bs[0].Beacon
|
||||||
|
}
|
||||||
|
|
||||||
|
type BeaconPoint struct {
|
||||||
|
Start abi.ChainEpoch
|
||||||
|
Beacon RandomBeacon
|
||||||
|
}
|
||||||
|
|
||||||
// RandomBeacon represents a system that provides randomness to Lotus.
|
// RandomBeacon represents a system that provides randomness to Lotus.
|
||||||
// Other components interrogate the RandomBeacon to acquire randomness that's
|
// Other components interrogate the RandomBeacon to acquire randomness that's
|
||||||
// valid for a specific chain epoch. Also to verify beacon entries that have
|
// valid for a specific chain epoch. Also to verify beacon entries that have
|
||||||
@ -25,11 +42,30 @@ type Response struct {
|
|||||||
type RandomBeacon interface {
|
type RandomBeacon interface {
|
||||||
Entry(context.Context, uint64) <-chan Response
|
Entry(context.Context, uint64) <-chan Response
|
||||||
VerifyEntry(types.BeaconEntry, types.BeaconEntry) error
|
VerifyEntry(types.BeaconEntry, types.BeaconEntry) error
|
||||||
MaxBeaconRoundForEpoch(abi.ChainEpoch, types.BeaconEntry) uint64
|
MaxBeaconRoundForEpoch(abi.ChainEpoch) uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
func ValidateBlockValues(b RandomBeacon, h *types.BlockHeader, prevEntry types.BeaconEntry) error {
|
func ValidateBlockValues(bSchedule Schedule, h *types.BlockHeader, parentEpoch abi.ChainEpoch,
|
||||||
maxRound := b.MaxBeaconRoundForEpoch(h.Height, prevEntry)
|
prevEntry types.BeaconEntry) error {
|
||||||
|
{
|
||||||
|
parentBeacon := bSchedule.BeaconForEpoch(parentEpoch)
|
||||||
|
currBeacon := bSchedule.BeaconForEpoch(h.Height)
|
||||||
|
if parentBeacon != currBeacon {
|
||||||
|
if len(h.BeaconEntries) != 2 {
|
||||||
|
return xerrors.Errorf("expected two beacon entries at beacon fork, got %d", len(h.BeaconEntries))
|
||||||
|
}
|
||||||
|
err := currBeacon.VerifyEntry(h.BeaconEntries[1], h.BeaconEntries[0])
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("beacon at fork point invalid: (%v, %v): %w",
|
||||||
|
h.BeaconEntries[1], h.BeaconEntries[0], err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: fork logic
|
||||||
|
b := bSchedule.BeaconForEpoch(h.Height)
|
||||||
|
maxRound := b.MaxBeaconRoundForEpoch(h.Height)
|
||||||
if maxRound == prevEntry.Round {
|
if maxRound == prevEntry.Round {
|
||||||
if len(h.BeaconEntries) != 0 {
|
if len(h.BeaconEntries) != 0 {
|
||||||
return xerrors.Errorf("expected not to have any beacon entries in this block, got %d", len(h.BeaconEntries))
|
return xerrors.Errorf("expected not to have any beacon entries in this block, got %d", len(h.BeaconEntries))
|
||||||
@ -56,10 +92,35 @@ func ValidateBlockValues(b RandomBeacon, h *types.BlockHeader, prevEntry types.B
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func BeaconEntriesForBlock(ctx context.Context, beacon RandomBeacon, round abi.ChainEpoch, prev types.BeaconEntry) ([]types.BeaconEntry, error) {
|
func BeaconEntriesForBlock(ctx context.Context, bSchedule Schedule, epoch abi.ChainEpoch, parentEpoch abi.ChainEpoch, prev types.BeaconEntry) ([]types.BeaconEntry, error) {
|
||||||
|
{
|
||||||
|
parentBeacon := bSchedule.BeaconForEpoch(parentEpoch)
|
||||||
|
currBeacon := bSchedule.BeaconForEpoch(epoch)
|
||||||
|
if parentBeacon != currBeacon {
|
||||||
|
// Fork logic
|
||||||
|
round := currBeacon.MaxBeaconRoundForEpoch(epoch)
|
||||||
|
out := make([]types.BeaconEntry, 2)
|
||||||
|
rch := currBeacon.Entry(ctx, round-1)
|
||||||
|
res := <-rch
|
||||||
|
if res.Err != nil {
|
||||||
|
return nil, xerrors.Errorf("getting entry %d returned error: %w", round-1, res.Err)
|
||||||
|
}
|
||||||
|
out[0] = res.Entry
|
||||||
|
rch = currBeacon.Entry(ctx, round)
|
||||||
|
res = <-rch
|
||||||
|
if res.Err != nil {
|
||||||
|
return nil, xerrors.Errorf("getting entry %d returned error: %w", round, res.Err)
|
||||||
|
}
|
||||||
|
out[1] = res.Entry
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
beacon := bSchedule.BeaconForEpoch(epoch)
|
||||||
|
|
||||||
start := build.Clock.Now()
|
start := build.Clock.Now()
|
||||||
|
|
||||||
maxRound := beacon.MaxBeaconRoundForEpoch(round, prev)
|
maxRound := beacon.MaxBeaconRoundForEpoch(epoch)
|
||||||
if maxRound == prev.Round {
|
if maxRound == prev.Round {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
@ -82,7 +143,7 @@ func BeaconEntriesForBlock(ctx context.Context, beacon RandomBeacon, round abi.C
|
|||||||
out = append(out, resp.Entry)
|
out = append(out, resp.Entry)
|
||||||
cur = resp.Entry.Round - 1
|
cur = resp.Entry.Round - 1
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return nil, xerrors.Errorf("context timed out waiting on beacon entry to come back for round %d: %w", round, ctx.Err())
|
return nil, xerrors.Errorf("context timed out waiting on beacon entry to come back for epoch %d: %w", epoch, ctx.Err())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -19,7 +19,7 @@ import (
|
|||||||
logging "github.com/ipfs/go-log"
|
logging "github.com/ipfs/go-log"
|
||||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/beacon"
|
"github.com/filecoin-project/lotus/chain/beacon"
|
||||||
@ -187,7 +187,7 @@ func (db *DrandBeacon) VerifyEntry(curr types.BeaconEntry, prev types.BeaconEntr
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *DrandBeacon) MaxBeaconRoundForEpoch(filEpoch abi.ChainEpoch, prevEntry types.BeaconEntry) uint64 {
|
func (db *DrandBeacon) MaxBeaconRoundForEpoch(filEpoch abi.ChainEpoch) uint64 {
|
||||||
// TODO: sometimes the genesis time for filecoin is zero and this goes negative
|
// TODO: sometimes the genesis time for filecoin is zero and this goes negative
|
||||||
latestTs := ((uint64(filEpoch) * db.filRoundTime) + db.filGenTime) - db.filRoundTime
|
latestTs := ((uint64(filEpoch) * db.filRoundTime) + db.filGenTime) - db.filRoundTime
|
||||||
dround := (latestTs - db.drandGenTime) / uint64(db.interval.Seconds())
|
dround := (latestTs - db.drandGenTime) / uint64(db.interval.Seconds())
|
||||||
|
@ -12,7 +12,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestPrintGroupInfo(t *testing.T) {
|
func TestPrintGroupInfo(t *testing.T) {
|
||||||
server := build.DrandConfig().Servers[0]
|
server := build.DrandConfigs[build.DrandIncentinet].Servers[0]
|
||||||
c, err := hclient.New(server, nil, nil)
|
c, err := hclient.New(server, nil, nil)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
cg := c.(interface {
|
cg := c.(interface {
|
||||||
|
@ -6,8 +6,8 @@ import (
|
|||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
|
||||||
"github.com/minio/blake2b-simd"
|
"github.com/minio/blake2b-simd"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
)
|
)
|
||||||
@ -53,11 +53,7 @@ func (mb *mockBeacon) VerifyEntry(from types.BeaconEntry, to types.BeaconEntry)
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mb *mockBeacon) IsEntryForEpoch(e types.BeaconEntry, epoch abi.ChainEpoch, nulls int) (bool, error) {
|
func (mb *mockBeacon) MaxBeaconRoundForEpoch(epoch abi.ChainEpoch) uint64 {
|
||||||
return int64(e.Round) <= int64(epoch) && int64(epoch)-int64(nulls) >= int64(e.Round), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mb *mockBeacon) MaxBeaconRoundForEpoch(epoch abi.ChainEpoch, prevEntry types.BeaconEntry) uint64 {
|
|
||||||
return uint64(epoch)
|
return uint64(epoch)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
81
chain/checkpoint.go
Normal file
81
chain/checkpoint.go
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
package chain
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
|
"github.com/ipfs/go-datastore"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
)
|
||||||
|
|
||||||
|
var CheckpointKey = datastore.NewKey("/chain/checks")
|
||||||
|
|
||||||
|
func loadCheckpoint(ds dtypes.MetadataDS) (types.TipSetKey, error) {
|
||||||
|
haveChks, err := ds.Has(CheckpointKey)
|
||||||
|
if err != nil {
|
||||||
|
return types.EmptyTSK, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !haveChks {
|
||||||
|
return types.EmptyTSK, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
tskBytes, err := ds.Get(CheckpointKey)
|
||||||
|
if err != nil {
|
||||||
|
return types.EmptyTSK, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var tsk types.TipSetKey
|
||||||
|
err = json.Unmarshal(tskBytes, &tsk)
|
||||||
|
if err != nil {
|
||||||
|
return types.EmptyTSK, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return tsk, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (syncer *Syncer) SetCheckpoint(tsk types.TipSetKey) error {
|
||||||
|
if tsk == types.EmptyTSK {
|
||||||
|
return xerrors.Errorf("called with empty tsk")
|
||||||
|
}
|
||||||
|
|
||||||
|
syncer.checkptLk.Lock()
|
||||||
|
defer syncer.checkptLk.Unlock()
|
||||||
|
|
||||||
|
ts, err := syncer.ChainStore().LoadTipSet(tsk)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("cannot find tipset: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
hts := syncer.ChainStore().GetHeaviestTipSet()
|
||||||
|
anc, err := syncer.ChainStore().IsAncestorOf(ts, hts)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("cannot determine whether checkpoint tipset is in main-chain: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !hts.Equals(ts) && !anc {
|
||||||
|
return xerrors.Errorf("cannot mark tipset as checkpoint, since it isn't in the main-chain: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tskBytes, err := json.Marshal(tsk)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = syncer.ds.Put(CheckpointKey, tskBytes)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
syncer.checkpt = tsk
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (syncer *Syncer) GetCheckpoint() types.TipSetKey {
|
||||||
|
syncer.checkptLk.Lock()
|
||||||
|
defer syncer.checkptLk.Unlock()
|
||||||
|
return syncer.checkpt
|
||||||
|
}
|
@ -5,7 +5,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
@ -35,6 +35,7 @@ type eventAPI interface {
|
|||||||
ChainNotify(context.Context) (<-chan []*api.HeadChange, error)
|
ChainNotify(context.Context) (<-chan []*api.HeadChange, error)
|
||||||
ChainGetBlockMessages(context.Context, cid.Cid) (*api.BlockMessages, error)
|
ChainGetBlockMessages(context.Context, cid.Cid) (*api.BlockMessages, error)
|
||||||
ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error)
|
ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error)
|
||||||
|
ChainHead(context.Context) (*types.TipSet, error)
|
||||||
StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error)
|
StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error)
|
||||||
ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error)
|
ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error)
|
||||||
|
|
||||||
@ -57,7 +58,7 @@ type Events struct {
|
|||||||
func NewEvents(ctx context.Context, api eventAPI) *Events {
|
func NewEvents(ctx context.Context, api eventAPI) *Events {
|
||||||
gcConfidence := 2 * build.ForkLengthThreshold
|
gcConfidence := 2 * build.ForkLengthThreshold
|
||||||
|
|
||||||
tsc := newTSCache(gcConfidence, api.ChainGetTipSetByHeight)
|
tsc := newTSCache(gcConfidence, api)
|
||||||
|
|
||||||
e := &Events{
|
e := &Events{
|
||||||
api: api,
|
api: api,
|
||||||
|
@ -5,7 +5,7 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
@ -307,7 +307,10 @@ func (e *hcEvents) onHeadChanged(check CheckFunc, hnd EventHandler, rev RevertHa
|
|||||||
defer e.lk.Unlock()
|
defer e.lk.Unlock()
|
||||||
|
|
||||||
// Check if the event has already occurred
|
// Check if the event has already occurred
|
||||||
ts := e.tsc.best()
|
ts, err := e.tsc.best()
|
||||||
|
if err != nil {
|
||||||
|
return 0, xerrors.Errorf("error getting best tipset: %w", err)
|
||||||
|
}
|
||||||
done, more, err := check(ts)
|
done, more, err := check(ts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, xerrors.Errorf("called check error (h: %d): %w", ts.Height(), err)
|
return 0, xerrors.Errorf("called check error (h: %d): %w", ts.Height(), err)
|
||||||
|
@ -4,8 +4,9 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"go.opencensus.io/trace"
|
"go.opencensus.io/trace"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
)
|
)
|
||||||
@ -26,7 +27,6 @@ type heightEvents struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (e *heightEvents) headChangeAt(rev, app []*types.TipSet) error {
|
func (e *heightEvents) headChangeAt(rev, app []*types.TipSet) error {
|
||||||
|
|
||||||
ctx, span := trace.StartSpan(e.ctx, "events.HeightHeadChange")
|
ctx, span := trace.StartSpan(e.ctx, "events.HeightHeadChange")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
span.AddAttributes(trace.Int64Attribute("endHeight", int64(app[0].Height())))
|
span.AddAttributes(trace.Int64Attribute("endHeight", int64(app[0].Height())))
|
||||||
@ -144,16 +144,19 @@ func (e *heightEvents) headChangeAt(rev, app []*types.TipSet) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ChainAt invokes the specified `HeightHandler` when the chain reaches the
|
// ChainAt invokes the specified `HeightHandler` when the chain reaches the
|
||||||
// specified height+confidence threshold. If the chain is rolled-back under the
|
// specified height+confidence threshold. If the chain is rolled-back under the
|
||||||
// specified height, `RevertHandler` will be called.
|
// specified height, `RevertHandler` will be called.
|
||||||
//
|
//
|
||||||
// ts passed to handlers is the tipset at the specified, or above, if lower tipsets were null
|
// ts passed to handlers is the tipset at the specified, or above, if lower tipsets were null
|
||||||
func (e *heightEvents) ChainAt(hnd HeightHandler, rev RevertHandler, confidence int, h abi.ChainEpoch) error {
|
func (e *heightEvents) ChainAt(hnd HeightHandler, rev RevertHandler, confidence int, h abi.ChainEpoch) error {
|
||||||
|
|
||||||
e.lk.Lock() // Tricky locking, check your locks if you modify this function!
|
e.lk.Lock() // Tricky locking, check your locks if you modify this function!
|
||||||
|
|
||||||
bestH := e.tsc.best().Height()
|
best, err := e.tsc.best()
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("error getting best tipset: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
bestH := best.Height()
|
||||||
if bestH >= h+abi.ChainEpoch(confidence) {
|
if bestH >= h+abi.ChainEpoch(confidence) {
|
||||||
ts, err := e.tsc.getNonNull(h)
|
ts, err := e.tsc.getNonNull(h)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -172,7 +175,11 @@ func (e *heightEvents) ChainAt(hnd HeightHandler, rev RevertHandler, confidence
|
|||||||
}
|
}
|
||||||
|
|
||||||
e.lk.Lock()
|
e.lk.Lock()
|
||||||
bestH = e.tsc.best().Height()
|
best, err = e.tsc.best()
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("error getting best tipset: %w", err)
|
||||||
|
}
|
||||||
|
bestH = best.Height()
|
||||||
}
|
}
|
||||||
|
|
||||||
defer e.lk.Unlock()
|
defer e.lk.Unlock()
|
||||||
|
@ -11,8 +11,8 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/crypto"
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
@ -46,6 +46,10 @@ type fakeCS struct {
|
|||||||
sub func(rev, app []*types.TipSet)
|
sub func(rev, app []*types.TipSet)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (fcs *fakeCS) ChainHead(ctx context.Context) (*types.TipSet, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
func (fcs *fakeCS) ChainGetTipSet(ctx context.Context, key types.TipSetKey) (*types.TipSet, error) {
|
func (fcs *fakeCS) ChainGetTipSet(ctx context.Context, key types.TipSetKey) (*types.TipSet, error) {
|
||||||
return fcs.tipsets[key], nil
|
return fcs.tipsets[key], nil
|
||||||
}
|
}
|
||||||
@ -110,7 +114,11 @@ func (fcs *fakeCS) makeTs(t *testing.T, parents []cid.Cid, h abi.ChainEpoch, msg
|
|||||||
|
|
||||||
func (fcs *fakeCS) ChainNotify(context.Context) (<-chan []*api.HeadChange, error) {
|
func (fcs *fakeCS) ChainNotify(context.Context) (<-chan []*api.HeadChange, error) {
|
||||||
out := make(chan []*api.HeadChange, 1)
|
out := make(chan []*api.HeadChange, 1)
|
||||||
out <- []*api.HeadChange{{Type: store.HCCurrent, Val: fcs.tsc.best()}}
|
best, err := fcs.tsc.best()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
out <- []*api.HeadChange{{Type: store.HCCurrent, Val: best}}
|
||||||
|
|
||||||
fcs.sub = func(rev, app []*types.TipSet) {
|
fcs.sub = func(rev, app []*types.TipSet) {
|
||||||
notif := make([]*api.HeadChange, len(rev)+len(app))
|
notif := make([]*api.HeadChange, len(rev)+len(app))
|
||||||
@ -174,7 +182,8 @@ func (fcs *fakeCS) advance(rev, app int, msgs map[int]cid.Cid, nulls ...int) { /
|
|||||||
|
|
||||||
var revs []*types.TipSet
|
var revs []*types.TipSet
|
||||||
for i := 0; i < rev; i++ {
|
for i := 0; i < rev; i++ {
|
||||||
ts := fcs.tsc.best()
|
ts, err := fcs.tsc.best()
|
||||||
|
require.NoError(fcs.t, err)
|
||||||
|
|
||||||
if _, ok := nullm[int(ts.Height())]; !ok {
|
if _, ok := nullm[int(ts.Height())]; !ok {
|
||||||
revs = append(revs, ts)
|
revs = append(revs, ts)
|
||||||
@ -196,7 +205,9 @@ func (fcs *fakeCS) advance(rev, app int, msgs map[int]cid.Cid, nulls ...int) { /
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
ts := fcs.makeTs(fcs.t, fcs.tsc.best().Key().Cids(), fcs.h, mc)
|
best, err := fcs.tsc.best()
|
||||||
|
require.NoError(fcs.t, err)
|
||||||
|
ts := fcs.makeTs(fcs.t, best.Key().Cids(), fcs.h, mc)
|
||||||
require.NoError(fcs.t, fcs.tsc.add(ts))
|
require.NoError(fcs.t, fcs.tsc.add(ts))
|
||||||
|
|
||||||
if hasMsgs {
|
if hasMsgs {
|
||||||
|
@ -3,6 +3,7 @@ package state
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||||
typegen "github.com/whyrusleeping/cbor-gen"
|
typegen "github.com/whyrusleeping/cbor-gen"
|
||||||
)
|
)
|
||||||
@ -69,7 +70,7 @@ func DiffAdtArray(preArr, curArr *adt.Array, out AdtArrayDiff) error {
|
|||||||
// Modify should be called when a value is modified in the map
|
// Modify should be called when a value is modified in the map
|
||||||
// Remove should be called when a value is removed from the map
|
// Remove should be called when a value is removed from the map
|
||||||
type AdtMapDiff interface {
|
type AdtMapDiff interface {
|
||||||
AsKey(key string) (adt.Keyer, error)
|
AsKey(key string) (abi.Keyer, error)
|
||||||
Add(key string, val *typegen.Deferred) error
|
Add(key string, val *typegen.Deferred) error
|
||||||
Modify(key string, from, to *typegen.Deferred) error
|
Modify(key string, from, to *typegen.Deferred) error
|
||||||
Remove(key string, val *typegen.Deferred) error
|
Remove(key string, val *typegen.Deferred) error
|
||||||
|
@ -11,6 +11,7 @@ import (
|
|||||||
cbornode "github.com/ipfs/go-ipld-cbor"
|
cbornode "github.com/ipfs/go-ipld-cbor"
|
||||||
typegen "github.com/whyrusleeping/cbor-gen"
|
typegen "github.com/whyrusleeping/cbor-gen"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/runtime"
|
"github.com/filecoin-project/specs-actors/actors/runtime"
|
||||||
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||||
|
|
||||||
@ -78,21 +79,21 @@ func TestDiffAdtMap(t *testing.T) {
|
|||||||
mapA := adt.MakeEmptyMap(ctxstoreA)
|
mapA := adt.MakeEmptyMap(ctxstoreA)
|
||||||
mapB := adt.MakeEmptyMap(ctxstoreB)
|
mapB := adt.MakeEmptyMap(ctxstoreB)
|
||||||
|
|
||||||
require.NoError(t, mapA.Put(adt.UIntKey(0), runtime.CBORBytes([]byte{0}))) // delete
|
require.NoError(t, mapA.Put(abi.UIntKey(0), runtime.CBORBytes([]byte{0}))) // delete
|
||||||
|
|
||||||
require.NoError(t, mapA.Put(adt.UIntKey(1), runtime.CBORBytes([]byte{0}))) // modify
|
require.NoError(t, mapA.Put(abi.UIntKey(1), runtime.CBORBytes([]byte{0}))) // modify
|
||||||
require.NoError(t, mapB.Put(adt.UIntKey(1), runtime.CBORBytes([]byte{1})))
|
require.NoError(t, mapB.Put(abi.UIntKey(1), runtime.CBORBytes([]byte{1})))
|
||||||
|
|
||||||
require.NoError(t, mapA.Put(adt.UIntKey(2), runtime.CBORBytes([]byte{1}))) // delete
|
require.NoError(t, mapA.Put(abi.UIntKey(2), runtime.CBORBytes([]byte{1}))) // delete
|
||||||
|
|
||||||
require.NoError(t, mapA.Put(adt.UIntKey(3), runtime.CBORBytes([]byte{0}))) // noop
|
require.NoError(t, mapA.Put(abi.UIntKey(3), runtime.CBORBytes([]byte{0}))) // noop
|
||||||
require.NoError(t, mapB.Put(adt.UIntKey(3), runtime.CBORBytes([]byte{0})))
|
require.NoError(t, mapB.Put(abi.UIntKey(3), runtime.CBORBytes([]byte{0})))
|
||||||
|
|
||||||
require.NoError(t, mapA.Put(adt.UIntKey(4), runtime.CBORBytes([]byte{0}))) // modify
|
require.NoError(t, mapA.Put(abi.UIntKey(4), runtime.CBORBytes([]byte{0}))) // modify
|
||||||
require.NoError(t, mapB.Put(adt.UIntKey(4), runtime.CBORBytes([]byte{6})))
|
require.NoError(t, mapB.Put(abi.UIntKey(4), runtime.CBORBytes([]byte{6})))
|
||||||
|
|
||||||
require.NoError(t, mapB.Put(adt.UIntKey(5), runtime.CBORBytes{8})) // add
|
require.NoError(t, mapB.Put(abi.UIntKey(5), runtime.CBORBytes{8})) // add
|
||||||
require.NoError(t, mapB.Put(adt.UIntKey(6), runtime.CBORBytes{9})) // add
|
require.NoError(t, mapB.Put(abi.UIntKey(6), runtime.CBORBytes{9})) // add
|
||||||
|
|
||||||
changes := new(TestDiffMap)
|
changes := new(TestDiffMap)
|
||||||
|
|
||||||
@ -134,12 +135,12 @@ type TestDiffMap struct {
|
|||||||
|
|
||||||
var _ AdtMapDiff = &TestDiffMap{}
|
var _ AdtMapDiff = &TestDiffMap{}
|
||||||
|
|
||||||
func (t *TestDiffMap) AsKey(key string) (adt.Keyer, error) {
|
func (t *TestDiffMap) AsKey(key string) (abi.Keyer, error) {
|
||||||
k, err := adt.ParseUIntKey(key)
|
k, err := abi.ParseUIntKey(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return adt.UIntKey(k), nil
|
return abi.UIntKey(k), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TestDiffMap) Add(key string, val *typegen.Deferred) error {
|
func (t *TestDiffMap) Add(key string, val *typegen.Deferred) error {
|
||||||
@ -148,7 +149,7 @@ func (t *TestDiffMap) Add(key string, val *typegen.Deferred) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
k, err := adt.ParseUIntKey(key)
|
k, err := abi.ParseUIntKey(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -172,7 +173,7 @@ func (t *TestDiffMap) Modify(key string, from, to *typegen.Deferred) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
k, err := adt.ParseUIntKey(key)
|
k, err := abi.ParseUIntKey(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -198,7 +199,7 @@ func (t *TestDiffMap) Remove(key string, val *typegen.Deferred) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
k, err := adt.ParseUIntKey(key)
|
k, err := abi.ParseUIntKey(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -5,8 +5,8 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
init_ "github.com/filecoin-project/specs-actors/actors/builtin/init"
|
init_ "github.com/filecoin-project/specs-actors/actors/builtin/init"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/market"
|
"github.com/filecoin-project/specs-actors/actors/builtin/market"
|
||||||
@ -537,8 +537,8 @@ type MinerPreCommitChanges struct {
|
|||||||
Removed []miner.SectorPreCommitOnChainInfo
|
Removed []miner.SectorPreCommitOnChainInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MinerPreCommitChanges) AsKey(key string) (adt.Keyer, error) {
|
func (m *MinerPreCommitChanges) AsKey(key string) (abi.Keyer, error) {
|
||||||
sector, err := adt.ParseUIntKey(key)
|
sector, err := abi.ParseUIntKey(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -662,12 +662,12 @@ type AddressChange struct {
|
|||||||
|
|
||||||
type DiffInitActorStateFunc func(ctx context.Context, oldState *init_.State, newState *init_.State) (changed bool, user UserData, err error)
|
type DiffInitActorStateFunc func(ctx context.Context, oldState *init_.State, newState *init_.State) (changed bool, user UserData, err error)
|
||||||
|
|
||||||
func (i *InitActorAddressChanges) AsKey(key string) (adt.Keyer, error) {
|
func (i *InitActorAddressChanges) AsKey(key string) (abi.Keyer, error) {
|
||||||
addr, err := address.NewFromBytes([]byte(key))
|
addr, err := address.NewFromBytes([]byte(key))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return adt.AddrKey(addr), nil
|
return abi.AddrKey(addr), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *InitActorAddressChanges) Add(key string, val *typegen.Deferred) error {
|
func (i *InitActorAddressChanges) Add(key string, val *typegen.Deferred) error {
|
||||||
|
@ -13,11 +13,11 @@ import (
|
|||||||
cbornode "github.com/ipfs/go-ipld-cbor"
|
cbornode "github.com/ipfs/go-ipld-cbor"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/market"
|
"github.com/filecoin-project/specs-actors/actors/builtin/market"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||||
"github.com/filecoin-project/specs-actors/actors/crypto"
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||||
tutils "github.com/filecoin-project/specs-actors/support/testing"
|
tutils "github.com/filecoin-project/specs-actors/support/testing"
|
||||||
|
|
||||||
|
@ -3,13 +3,16 @@ package events
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
type tsByHFunc func(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error)
|
type tsCacheAPI interface {
|
||||||
|
ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error)
|
||||||
|
ChainHead(context.Context) (*types.TipSet, error)
|
||||||
|
}
|
||||||
|
|
||||||
// tipSetCache implements a simple ring-buffer cache to keep track of recent
|
// tipSetCache implements a simple ring-buffer cache to keep track of recent
|
||||||
// tipsets
|
// tipsets
|
||||||
@ -18,10 +21,10 @@ type tipSetCache struct {
|
|||||||
start int
|
start int
|
||||||
len int
|
len int
|
||||||
|
|
||||||
storage tsByHFunc
|
storage tsCacheAPI
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTSCache(cap abi.ChainEpoch, storage tsByHFunc) *tipSetCache {
|
func newTSCache(cap abi.ChainEpoch, storage tsCacheAPI) *tipSetCache {
|
||||||
return &tipSetCache{
|
return &tipSetCache{
|
||||||
cache: make([]*types.TipSet, cap),
|
cache: make([]*types.TipSet, cap),
|
||||||
start: 0,
|
start: 0,
|
||||||
@ -94,7 +97,7 @@ func (tsc *tipSetCache) getNonNull(height abi.ChainEpoch) (*types.TipSet, error)
|
|||||||
func (tsc *tipSetCache) get(height abi.ChainEpoch) (*types.TipSet, error) {
|
func (tsc *tipSetCache) get(height abi.ChainEpoch) (*types.TipSet, error) {
|
||||||
if tsc.len == 0 {
|
if tsc.len == 0 {
|
||||||
log.Warnf("tipSetCache.get: cache is empty, requesting from storage (h=%d)", height)
|
log.Warnf("tipSetCache.get: cache is empty, requesting from storage (h=%d)", height)
|
||||||
return tsc.storage(context.TODO(), height, types.EmptyTSK)
|
return tsc.storage.ChainGetTipSetByHeight(context.TODO(), height, types.EmptyTSK)
|
||||||
}
|
}
|
||||||
|
|
||||||
headH := tsc.cache[tsc.start].Height()
|
headH := tsc.cache[tsc.start].Height()
|
||||||
@ -114,14 +117,18 @@ func (tsc *tipSetCache) get(height abi.ChainEpoch) (*types.TipSet, error) {
|
|||||||
|
|
||||||
if height < tail.Height() {
|
if height < tail.Height() {
|
||||||
log.Warnf("tipSetCache.get: requested tipset not in cache, requesting from storage (h=%d; tail=%d)", height, tail.Height())
|
log.Warnf("tipSetCache.get: requested tipset not in cache, requesting from storage (h=%d; tail=%d)", height, tail.Height())
|
||||||
return tsc.storage(context.TODO(), height, tail.Key())
|
return tsc.storage.ChainGetTipSetByHeight(context.TODO(), height, tail.Key())
|
||||||
}
|
}
|
||||||
|
|
||||||
return tsc.cache[normalModulo(tsc.start-int(headH-height), clen)], nil
|
return tsc.cache[normalModulo(tsc.start-int(headH-height), clen)], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tsc *tipSetCache) best() *types.TipSet {
|
func (tsc *tipSetCache) best() (*types.TipSet, error) {
|
||||||
return tsc.cache[tsc.start]
|
best := tsc.cache[tsc.start]
|
||||||
|
if best == nil {
|
||||||
|
return tsc.storage.ChainHead(context.TODO())
|
||||||
|
}
|
||||||
|
return best, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func normalModulo(n, m int) int {
|
func normalModulo(n, m int) int {
|
||||||
|
@ -4,8 +4,8 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/crypto"
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
@ -13,10 +13,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestTsCache(t *testing.T) {
|
func TestTsCache(t *testing.T) {
|
||||||
tsc := newTSCache(50, func(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) {
|
tsc := newTSCache(50, &tsCacheAPIFailOnStorageCall{t: t})
|
||||||
t.Fatal("storage call")
|
|
||||||
return &types.TipSet{}, nil
|
|
||||||
})
|
|
||||||
|
|
||||||
h := abi.ChainEpoch(75)
|
h := abi.ChainEpoch(75)
|
||||||
|
|
||||||
@ -43,7 +40,12 @@ func TestTsCache(t *testing.T) {
|
|||||||
|
|
||||||
for i := 0; i < 9000; i++ {
|
for i := 0; i < 9000; i++ {
|
||||||
if i%90 > 60 {
|
if i%90 > 60 {
|
||||||
if err := tsc.revert(tsc.best()); err != nil {
|
best, err := tsc.best()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err, "; i:", i)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := tsc.revert(best); err != nil {
|
||||||
t.Fatal(err, "; i:", i)
|
t.Fatal(err, "; i:", i)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -55,11 +57,21 @@ func TestTsCache(t *testing.T) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type tsCacheAPIFailOnStorageCall struct {
|
||||||
|
t *testing.T
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc *tsCacheAPIFailOnStorageCall) ChainGetTipSetByHeight(ctx context.Context, epoch abi.ChainEpoch, key types.TipSetKey) (*types.TipSet, error) {
|
||||||
|
tc.t.Fatal("storage call")
|
||||||
|
return &types.TipSet{}, nil
|
||||||
|
}
|
||||||
|
func (tc *tsCacheAPIFailOnStorageCall) ChainHead(ctx context.Context) (*types.TipSet, error) {
|
||||||
|
tc.t.Fatal("storage call")
|
||||||
|
return &types.TipSet{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
func TestTsCacheNulls(t *testing.T) {
|
func TestTsCacheNulls(t *testing.T) {
|
||||||
tsc := newTSCache(50, func(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) {
|
tsc := newTSCache(50, &tsCacheAPIFailOnStorageCall{t: t})
|
||||||
t.Fatal("storage call")
|
|
||||||
return &types.TipSet{}, nil
|
|
||||||
})
|
|
||||||
|
|
||||||
h := abi.ChainEpoch(75)
|
h := abi.ChainEpoch(75)
|
||||||
|
|
||||||
@ -91,7 +103,9 @@ func TestTsCacheNulls(t *testing.T) {
|
|||||||
add()
|
add()
|
||||||
add()
|
add()
|
||||||
|
|
||||||
require.Equal(t, h-1, tsc.best().Height())
|
best, err := tsc.best()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, h-1, best.Height())
|
||||||
|
|
||||||
ts, err := tsc.get(h - 1)
|
ts, err := tsc.get(h - 1)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -109,9 +123,17 @@ func TestTsCacheNulls(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, h-8, ts.Height())
|
require.Equal(t, h-8, ts.Height())
|
||||||
|
|
||||||
require.NoError(t, tsc.revert(tsc.best()))
|
best, err = tsc.best()
|
||||||
require.NoError(t, tsc.revert(tsc.best()))
|
require.NoError(t, err)
|
||||||
require.Equal(t, h-8, tsc.best().Height())
|
require.NoError(t, tsc.revert(best))
|
||||||
|
|
||||||
|
best, err = tsc.best()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, tsc.revert(best))
|
||||||
|
|
||||||
|
best, err = tsc.best()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, h-8, best.Height())
|
||||||
|
|
||||||
h += 50
|
h += 50
|
||||||
add()
|
add()
|
||||||
@ -120,3 +142,27 @@ func TestTsCacheNulls(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, h-1, ts.Height())
|
require.Equal(t, h-1, ts.Height())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type tsCacheAPIStorageCallCounter struct {
|
||||||
|
t *testing.T
|
||||||
|
chainGetTipSetByHeight int
|
||||||
|
chainHead int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc *tsCacheAPIStorageCallCounter) ChainGetTipSetByHeight(ctx context.Context, epoch abi.ChainEpoch, key types.TipSetKey) (*types.TipSet, error) {
|
||||||
|
tc.chainGetTipSetByHeight++
|
||||||
|
return &types.TipSet{}, nil
|
||||||
|
}
|
||||||
|
func (tc *tsCacheAPIStorageCallCounter) ChainHead(ctx context.Context) (*types.TipSet, error) {
|
||||||
|
tc.chainHead++
|
||||||
|
return &types.TipSet{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTsCacheEmpty(t *testing.T) {
|
||||||
|
// Calling best on an empty cache should just call out to the chain API
|
||||||
|
callCounter := &tsCacheAPIStorageCallCounter{t: t}
|
||||||
|
tsc := newTSCache(50, callCounter)
|
||||||
|
_, err := tsc.best()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 1, callCounter.chainHead)
|
||||||
|
}
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT.
|
// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT.
|
||||||
|
|
||||||
package blocksync
|
package exchange
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
@ -1,4 +1,4 @@
|
|||||||
package blocksync
|
package exchange
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
@ -7,13 +7,16 @@ import (
|
|||||||
"math/rand"
|
"math/rand"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
host "github.com/libp2p/go-libp2p-core/host"
|
"github.com/libp2p/go-libp2p-core/host"
|
||||||
inet "github.com/libp2p/go-libp2p-core/network"
|
"github.com/libp2p/go-libp2p-core/network"
|
||||||
"github.com/libp2p/go-libp2p-core/peer"
|
"github.com/libp2p/go-libp2p-core/peer"
|
||||||
|
|
||||||
"go.opencensus.io/trace"
|
"go.opencensus.io/trace"
|
||||||
|
"go.uber.org/fx"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
cborutil "github.com/filecoin-project/go-cbor-util"
|
cborutil "github.com/filecoin-project/go-cbor-util"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/store"
|
"github.com/filecoin-project/lotus/chain/store"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
@ -21,11 +24,9 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/lib/peermgr"
|
"github.com/filecoin-project/lotus/lib/peermgr"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Protocol client.
|
// client implements exchange.Client, using the libp2p ChainExchange protocol
|
||||||
// FIXME: Rename to just `Client`. Not done at the moment to avoid
|
// as the fetching mechanism.
|
||||||
// disrupting too much of the consumer code, should be done along
|
type client struct {
|
||||||
// https://github.com/filecoin-project/lotus/issues/2612.
|
|
||||||
type BlockSync struct {
|
|
||||||
// Connection manager used to contact the server.
|
// Connection manager used to contact the server.
|
||||||
// FIXME: We should have a reduced interface here, initialized
|
// FIXME: We should have a reduced interface here, initialized
|
||||||
// just with our protocol ID, we shouldn't be able to open *any*
|
// just with our protocol ID, we shouldn't be able to open *any*
|
||||||
@ -35,13 +36,14 @@ type BlockSync struct {
|
|||||||
peerTracker *bsPeerTracker
|
peerTracker *bsPeerTracker
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewClient(
|
var _ Client = (*client)(nil)
|
||||||
host host.Host,
|
|
||||||
pmgr peermgr.MaybePeerMgr,
|
// NewClient creates a new libp2p-based exchange.Client that uses the libp2p
|
||||||
) *BlockSync {
|
// ChainExhange protocol as the fetching mechanism.
|
||||||
return &BlockSync{
|
func NewClient(lc fx.Lifecycle, host host.Host, pmgr peermgr.MaybePeerMgr) Client {
|
||||||
|
return &client{
|
||||||
host: host,
|
host: host,
|
||||||
peerTracker: newPeerTracker(pmgr.Mgr),
|
peerTracker: newPeerTracker(lc, host, pmgr.Mgr),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -62,11 +64,7 @@ func NewClient(
|
|||||||
// request options without disrupting external calls. In the future the
|
// request options without disrupting external calls. In the future the
|
||||||
// consumers should be forced to use a more standardized service and
|
// consumers should be forced to use a more standardized service and
|
||||||
// adhere to a single API derived from this function.
|
// adhere to a single API derived from this function.
|
||||||
func (client *BlockSync) doRequest(
|
func (c *client) doRequest(ctx context.Context, req *Request, singlePeer *peer.ID) (*validatedResponse, error) {
|
||||||
ctx context.Context,
|
|
||||||
req *Request,
|
|
||||||
singlePeer *peer.ID,
|
|
||||||
) (*validatedResponse, error) {
|
|
||||||
// Validate request.
|
// Validate request.
|
||||||
if req.Length == 0 {
|
if req.Length == 0 {
|
||||||
return nil, xerrors.Errorf("invalid request of length 0")
|
return nil, xerrors.Errorf("invalid request of length 0")
|
||||||
@ -86,7 +84,7 @@ func (client *BlockSync) doRequest(
|
|||||||
if singlePeer != nil {
|
if singlePeer != nil {
|
||||||
peers = []peer.ID{*singlePeer}
|
peers = []peer.ID{*singlePeer}
|
||||||
} else {
|
} else {
|
||||||
peers = client.getShuffledPeers()
|
peers = c.getShuffledPeers()
|
||||||
if len(peers) == 0 {
|
if len(peers) == 0 {
|
||||||
return nil, xerrors.Errorf("no peers available")
|
return nil, xerrors.Errorf("no peers available")
|
||||||
}
|
}
|
||||||
@ -107,9 +105,9 @@ func (client *BlockSync) doRequest(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Send request, read response.
|
// Send request, read response.
|
||||||
res, err := client.sendRequestToPeer(ctx, peer, req)
|
res, err := c.sendRequestToPeer(ctx, peer, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !xerrors.Is(err, inet.ErrNoConn) {
|
if !xerrors.Is(err, network.ErrNoConn) {
|
||||||
log.Warnf("could not connect to peer %s: %s",
|
log.Warnf("could not connect to peer %s: %s",
|
||||||
peer.String(), err)
|
peer.String(), err)
|
||||||
}
|
}
|
||||||
@ -117,15 +115,15 @@ func (client *BlockSync) doRequest(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Process and validate response.
|
// Process and validate response.
|
||||||
validRes, err := client.processResponse(req, res)
|
validRes, err := c.processResponse(req, res)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("processing peer %s response failed: %s",
|
log.Warnf("processing peer %s response failed: %s",
|
||||||
peer.String(), err)
|
peer.String(), err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
client.peerTracker.logGlobalSuccess(build.Clock.Since(globalTime))
|
c.peerTracker.logGlobalSuccess(build.Clock.Since(globalTime))
|
||||||
client.host.ConnManager().TagPeer(peer, "bsync", SUCCESS_PEER_TAG_VALUE)
|
c.host.ConnManager().TagPeer(peer, "bsync", SuccessPeerTagValue)
|
||||||
return validRes, nil
|
return validRes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -144,11 +142,8 @@ func (client *BlockSync) doRequest(
|
|||||||
// We are conflating in the single error returned both status and validation
|
// We are conflating in the single error returned both status and validation
|
||||||
// errors. Peer penalization should happen here then, before returning, so
|
// errors. Peer penalization should happen here then, before returning, so
|
||||||
// we can apply the correct penalties depending on the cause of the error.
|
// we can apply the correct penalties depending on the cause of the error.
|
||||||
func (client *BlockSync) processResponse(
|
// FIXME: Add the `peer` as argument once we implement penalties.
|
||||||
req *Request,
|
func (c *client) processResponse(req *Request, res *Response) (*validatedResponse, error) {
|
||||||
res *Response,
|
|
||||||
// FIXME: Add the `peer` as argument once we implement penalties.
|
|
||||||
) (*validatedResponse, error) {
|
|
||||||
err := res.statusToError()
|
err := res.statusToError()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("status error: %s", err)
|
return nil, xerrors.Errorf("status error: %s", err)
|
||||||
@ -246,16 +241,8 @@ func (client *BlockSync) processResponse(
|
|||||||
return validRes, nil
|
return validRes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetBlocks fetches count blocks from the network, from the provided tipset
|
// GetBlocks implements Client.GetBlocks(). Refer to the godocs there.
|
||||||
// *backwards*, returning as many tipsets as count.
|
func (c *client) GetBlocks(ctx context.Context, tsk types.TipSetKey, count int) ([]*types.TipSet, error) {
|
||||||
//
|
|
||||||
// {hint/usage}: This is used by the Syncer during normal chain syncing and when
|
|
||||||
// resolving forks.
|
|
||||||
func (client *BlockSync) GetBlocks(
|
|
||||||
ctx context.Context,
|
|
||||||
tsk types.TipSetKey,
|
|
||||||
count int,
|
|
||||||
) ([]*types.TipSet, error) {
|
|
||||||
ctx, span := trace.StartSpan(ctx, "bsync.GetBlocks")
|
ctx, span := trace.StartSpan(ctx, "bsync.GetBlocks")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
if span.IsRecordingEvents() {
|
if span.IsRecordingEvents() {
|
||||||
@ -271,7 +258,7 @@ func (client *BlockSync) GetBlocks(
|
|||||||
Options: Headers,
|
Options: Headers,
|
||||||
}
|
}
|
||||||
|
|
||||||
validRes, err := client.doRequest(ctx, req, nil)
|
validRes, err := c.doRequest(ctx, req, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -279,11 +266,8 @@ func (client *BlockSync) GetBlocks(
|
|||||||
return validRes.tipsets, nil
|
return validRes.tipsets, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *BlockSync) GetFullTipSet(
|
// GetFullTipSet implements Client.GetFullTipSet(). Refer to the godocs there.
|
||||||
ctx context.Context,
|
func (c *client) GetFullTipSet(ctx context.Context, peer peer.ID, tsk types.TipSetKey) (*store.FullTipSet, error) {
|
||||||
peer peer.ID,
|
|
||||||
tsk types.TipSetKey,
|
|
||||||
) (*store.FullTipSet, error) {
|
|
||||||
// TODO: round robin through these peers on error
|
// TODO: round robin through these peers on error
|
||||||
|
|
||||||
req := &Request{
|
req := &Request{
|
||||||
@ -292,7 +276,7 @@ func (client *BlockSync) GetFullTipSet(
|
|||||||
Options: Headers | Messages,
|
Options: Headers | Messages,
|
||||||
}
|
}
|
||||||
|
|
||||||
validRes, err := client.doRequest(ctx, req, &peer)
|
validRes, err := c.doRequest(ctx, req, &peer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -302,11 +286,8 @@ func (client *BlockSync) GetFullTipSet(
|
|||||||
// *one* tipset here, so it's safe to index directly.
|
// *one* tipset here, so it's safe to index directly.
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *BlockSync) GetChainMessages(
|
// GetChainMessages implements Client.GetChainMessages(). Refer to the godocs there.
|
||||||
ctx context.Context,
|
func (c *client) GetChainMessages(ctx context.Context, head *types.TipSet, length uint64) ([]*CompactedMessages, error) {
|
||||||
head *types.TipSet,
|
|
||||||
length uint64,
|
|
||||||
) ([]*CompactedMessages, error) {
|
|
||||||
ctx, span := trace.StartSpan(ctx, "GetChainMessages")
|
ctx, span := trace.StartSpan(ctx, "GetChainMessages")
|
||||||
if span.IsRecordingEvents() {
|
if span.IsRecordingEvents() {
|
||||||
span.AddAttributes(
|
span.AddAttributes(
|
||||||
@ -322,7 +303,7 @@ func (client *BlockSync) GetChainMessages(
|
|||||||
Options: Messages,
|
Options: Messages,
|
||||||
}
|
}
|
||||||
|
|
||||||
validRes, err := client.doRequest(ctx, req, nil)
|
validRes, err := c.doRequest(ctx, req, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -333,11 +314,7 @@ func (client *BlockSync) GetChainMessages(
|
|||||||
// Send a request to a peer. Write request in the stream and read the
|
// Send a request to a peer. Write request in the stream and read the
|
||||||
// response back. We do not do any processing of the request/response
|
// response back. We do not do any processing of the request/response
|
||||||
// here.
|
// here.
|
||||||
func (client *BlockSync) sendRequestToPeer(
|
func (c *client) sendRequestToPeer(ctx context.Context, peer peer.ID, req *Request) (_ *Response, err error) {
|
||||||
ctx context.Context,
|
|
||||||
peer peer.ID,
|
|
||||||
req *Request,
|
|
||||||
) (_ *Response, err error) {
|
|
||||||
// Trace code.
|
// Trace code.
|
||||||
ctx, span := trace.StartSpan(ctx, "sendRequestToPeer")
|
ctx, span := trace.StartSpan(ctx, "sendRequestToPeer")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
@ -358,34 +335,33 @@ func (client *BlockSync) sendRequestToPeer(
|
|||||||
}()
|
}()
|
||||||
// -- TRACE --
|
// -- TRACE --
|
||||||
|
|
||||||
supported, err := client.host.Peerstore().SupportsProtocols(peer, BlockSyncProtocolID)
|
supported, err := c.host.Peerstore().SupportsProtocols(peer, BlockSyncProtocolID, ChainExchangeProtocolID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
c.RemovePeer(peer)
|
||||||
return nil, xerrors.Errorf("failed to get protocols for peer: %w", err)
|
return nil, xerrors.Errorf("failed to get protocols for peer: %w", err)
|
||||||
}
|
}
|
||||||
if len(supported) == 0 || supported[0] != BlockSyncProtocolID {
|
if len(supported) == 0 || (supported[0] != BlockSyncProtocolID && supported[0] != ChainExchangeProtocolID) {
|
||||||
return nil, xerrors.Errorf("peer %s does not support protocol %s",
|
return nil, xerrors.Errorf("peer %s does not support protocols %s",
|
||||||
peer, BlockSyncProtocolID)
|
peer, []string{BlockSyncProtocolID, ChainExchangeProtocolID})
|
||||||
// FIXME: `ProtoBook` should support a *single* protocol check that returns
|
|
||||||
// a bool instead of a list.
|
|
||||||
}
|
}
|
||||||
|
|
||||||
connectionStart := build.Clock.Now()
|
connectionStart := build.Clock.Now()
|
||||||
|
|
||||||
// Open stream to peer.
|
// Open stream to peer.
|
||||||
stream, err := client.host.NewStream(
|
stream, err := c.host.NewStream(
|
||||||
inet.WithNoDial(ctx, "should already have connection"),
|
network.WithNoDial(ctx, "should already have connection"),
|
||||||
peer,
|
peer,
|
||||||
BlockSyncProtocolID)
|
ChainExchangeProtocolID, BlockSyncProtocolID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
client.RemovePeer(peer)
|
c.RemovePeer(peer)
|
||||||
return nil, xerrors.Errorf("failed to open stream to peer: %w", err)
|
return nil, xerrors.Errorf("failed to open stream to peer: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write request.
|
// Write request.
|
||||||
_ = stream.SetWriteDeadline(time.Now().Add(WRITE_REQ_DEADLINE))
|
_ = stream.SetWriteDeadline(time.Now().Add(WriteReqDeadline))
|
||||||
if err := cborutil.WriteCborRPC(stream, req); err != nil {
|
if err := cborutil.WriteCborRPC(stream, req); err != nil {
|
||||||
_ = stream.SetWriteDeadline(time.Time{})
|
_ = stream.SetWriteDeadline(time.Time{})
|
||||||
client.peerTracker.logFailure(peer, build.Clock.Since(connectionStart))
|
c.peerTracker.logFailure(peer, build.Clock.Since(connectionStart), req.Length)
|
||||||
// FIXME: Should we also remove peer here?
|
// FIXME: Should we also remove peer here?
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -395,11 +371,11 @@ func (client *BlockSync) sendRequestToPeer(
|
|||||||
// Read response.
|
// Read response.
|
||||||
var res Response
|
var res Response
|
||||||
err = cborutil.ReadCborRPC(
|
err = cborutil.ReadCborRPC(
|
||||||
bufio.NewReader(incrt.New(stream, READ_RES_MIN_SPEED, READ_RES_DEADLINE)),
|
bufio.NewReader(incrt.New(stream, ReadResMinSpeed, ReadResDeadline)),
|
||||||
&res)
|
&res)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
client.peerTracker.logFailure(peer, build.Clock.Since(connectionStart))
|
c.peerTracker.logFailure(peer, build.Clock.Since(connectionStart), req.Length)
|
||||||
return nil, xerrors.Errorf("failed to read blocksync response: %w", err)
|
return nil, xerrors.Errorf("failed to read chainxchg response: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: Move all this together at the top using a defer as done elsewhere.
|
// FIXME: Move all this together at the top using a defer as done elsewhere.
|
||||||
@ -412,32 +388,34 @@ func (client *BlockSync) sendRequestToPeer(
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
client.peerTracker.logSuccess(peer, build.Clock.Since(connectionStart))
|
c.peerTracker.logSuccess(peer, build.Clock.Since(connectionStart), uint64(len(res.Chain)))
|
||||||
// FIXME: We should really log a success only after we validate the response.
|
// FIXME: We should really log a success only after we validate the response.
|
||||||
// It might be a bit hard to do.
|
// It might be a bit hard to do.
|
||||||
return &res, nil
|
return &res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *BlockSync) AddPeer(p peer.ID) {
|
// AddPeer implements Client.AddPeer(). Refer to the godocs there.
|
||||||
client.peerTracker.addPeer(p)
|
func (c *client) AddPeer(p peer.ID) {
|
||||||
|
c.peerTracker.addPeer(p)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *BlockSync) RemovePeer(p peer.ID) {
|
// RemovePeer implements Client.RemovePeer(). Refer to the godocs there.
|
||||||
client.peerTracker.removePeer(p)
|
func (c *client) RemovePeer(p peer.ID) {
|
||||||
|
c.peerTracker.removePeer(p)
|
||||||
}
|
}
|
||||||
|
|
||||||
// getShuffledPeers returns a preference-sorted set of peers (by latency
|
// getShuffledPeers returns a preference-sorted set of peers (by latency
|
||||||
// and failure counting), shuffling the first few peers so we don't always
|
// and failure counting), shuffling the first few peers so we don't always
|
||||||
// pick the same peer.
|
// pick the same peer.
|
||||||
// FIXME: Consider merging with `shufflePrefix()s`.
|
// FIXME: Consider merging with `shufflePrefix()s`.
|
||||||
func (client *BlockSync) getShuffledPeers() []peer.ID {
|
func (c *client) getShuffledPeers() []peer.ID {
|
||||||
peers := client.peerTracker.prefSortedPeers()
|
peers := c.peerTracker.prefSortedPeers()
|
||||||
shufflePrefix(peers)
|
shufflePrefix(peers)
|
||||||
return peers
|
return peers
|
||||||
}
|
}
|
||||||
|
|
||||||
func shufflePrefix(peers []peer.ID) {
|
func shufflePrefix(peers []peer.ID) {
|
||||||
prefix := SHUFFLE_PEERS_PREFIX
|
prefix := ShufflePeersPrefix
|
||||||
if len(peers) < prefix {
|
if len(peers) < prefix {
|
||||||
prefix = len(peers)
|
prefix = len(peers)
|
||||||
}
|
}
|
19
chain/exchange/doc.go
Normal file
19
chain/exchange/doc.go
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
// Package exchange contains the ChainExchange server and client components.
|
||||||
|
//
|
||||||
|
// ChainExchange is the basic chain synchronization protocol of Filecoin.
|
||||||
|
// ChainExchange is an RPC-oriented protocol, with a single operation to
|
||||||
|
// request blocks for now.
|
||||||
|
//
|
||||||
|
// A request contains a start anchor block (referred to with a CID), and a
|
||||||
|
// amount of blocks requested beyond the anchor (including the anchor itself).
|
||||||
|
//
|
||||||
|
// A client can also pass options, encoded as a 64-bit bitfield. Lotus supports
|
||||||
|
// two options at the moment:
|
||||||
|
//
|
||||||
|
// - include block contents
|
||||||
|
// - include block messages
|
||||||
|
//
|
||||||
|
// The response will include a status code, an optional message, and the
|
||||||
|
// response payload in case of success. The payload is a slice of serialized
|
||||||
|
// tipsets.
|
||||||
|
package exchange
|
51
chain/exchange/interfaces.go
Normal file
51
chain/exchange/interfaces.go
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
package exchange
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p-core/network"
|
||||||
|
"github.com/libp2p/go-libp2p-core/peer"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/store"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Server is the responder side of the ChainExchange protocol. It accepts
|
||||||
|
// requests from clients and services them by returning the requested
|
||||||
|
// chain data.
|
||||||
|
type Server interface {
|
||||||
|
// HandleStream is the protocol handler to be registered on a libp2p
|
||||||
|
// protocol router.
|
||||||
|
//
|
||||||
|
// In the current version of the protocol, streams are single-use. The
|
||||||
|
// server will read a single Request, and will respond with a single
|
||||||
|
// Response. It will dispose of the stream straight after.
|
||||||
|
HandleStream(stream network.Stream)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Client is the requesting side of the ChainExchange protocol. It acts as
|
||||||
|
// a proxy for other components to request chain data from peers. It is chiefly
|
||||||
|
// used by the Syncer.
|
||||||
|
type Client interface {
|
||||||
|
// GetBlocks fetches block headers from the network, from the provided
|
||||||
|
// tipset *backwards*, returning as many tipsets as the count parameter,
|
||||||
|
// or less.
|
||||||
|
GetBlocks(ctx context.Context, tsk types.TipSetKey, count int) ([]*types.TipSet, error)
|
||||||
|
|
||||||
|
// GetChainMessages fetches messages from the network, from the provided
|
||||||
|
// tipset *backwards*, returning the messages from as many tipsets as the
|
||||||
|
// count parameter, or less.
|
||||||
|
GetChainMessages(ctx context.Context, head *types.TipSet, length uint64) ([]*CompactedMessages, error)
|
||||||
|
|
||||||
|
// GetFullTipSet fetches a full tipset from a given peer. If successful,
|
||||||
|
// the fetched object contains block headers and all messages in full form.
|
||||||
|
GetFullTipSet(ctx context.Context, peer peer.ID, tsk types.TipSetKey) (*store.FullTipSet, error)
|
||||||
|
|
||||||
|
// AddPeer adds a peer to the pool of peers that the Client requests
|
||||||
|
// data from.
|
||||||
|
AddPeer(peer peer.ID)
|
||||||
|
|
||||||
|
// RemovePeer removes a peer from the pool of peers that the Client
|
||||||
|
// requests data from.
|
||||||
|
RemovePeer(peer peer.ID)
|
||||||
|
}
|
@ -1,13 +1,16 @@
|
|||||||
package blocksync
|
package exchange
|
||||||
|
|
||||||
// FIXME: This needs to be reviewed.
|
// FIXME: This needs to be reviewed.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
host "github.com/libp2p/go-libp2p-core/host"
|
||||||
"github.com/libp2p/go-libp2p-core/peer"
|
"github.com/libp2p/go-libp2p-core/peer"
|
||||||
|
"go.uber.org/fx"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/lib/peermgr"
|
"github.com/filecoin-project/lotus/lib/peermgr"
|
||||||
@ -29,11 +32,30 @@ type bsPeerTracker struct {
|
|||||||
pmgr *peermgr.PeerMgr
|
pmgr *peermgr.PeerMgr
|
||||||
}
|
}
|
||||||
|
|
||||||
func newPeerTracker(pmgr *peermgr.PeerMgr) *bsPeerTracker {
|
func newPeerTracker(lc fx.Lifecycle, h host.Host, pmgr *peermgr.PeerMgr) *bsPeerTracker {
|
||||||
return &bsPeerTracker{
|
bsPt := &bsPeerTracker{
|
||||||
peers: make(map[peer.ID]*peerStats),
|
peers: make(map[peer.ID]*peerStats),
|
||||||
pmgr: pmgr,
|
pmgr: pmgr,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sub, err := h.EventBus().Subscribe(new(peermgr.NewFilPeer))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for newPeer := range sub.Out() {
|
||||||
|
bsPt.addPeer(newPeer.(peermgr.NewFilPeer).Id)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
lc.Append(fx.Hook{
|
||||||
|
OnStop: func(ctx context.Context) error {
|
||||||
|
return sub.Close()
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
return bsPt
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bpt *bsPeerTracker) addPeer(p peer.ID) {
|
func (bpt *bsPeerTracker) addPeer(p peer.ID) {
|
||||||
@ -72,16 +94,7 @@ func (bpt *bsPeerTracker) prefSortedPeers() []peer.ID {
|
|||||||
var costI, costJ float64
|
var costI, costJ float64
|
||||||
|
|
||||||
getPeerInitLat := func(p peer.ID) float64 {
|
getPeerInitLat := func(p peer.ID) float64 {
|
||||||
var res float64
|
return float64(bpt.avgGlobalTime) * newPeerMul
|
||||||
if bpt.pmgr != nil {
|
|
||||||
if lat, ok := bpt.pmgr.GetPeerLatency(p); ok {
|
|
||||||
res = float64(lat)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if res == 0 {
|
|
||||||
res = float64(bpt.avgGlobalTime)
|
|
||||||
}
|
|
||||||
return res * newPeerMul
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if pi.successes+pi.failures > 0 {
|
if pi.successes+pi.failures > 0 {
|
||||||
@ -107,8 +120,8 @@ func (bpt *bsPeerTracker) prefSortedPeers() []peer.ID {
|
|||||||
const (
|
const (
|
||||||
// xInvAlpha = (N+1)/2
|
// xInvAlpha = (N+1)/2
|
||||||
|
|
||||||
localInvAlpha = 5 // 86% of the value is the last 9
|
localInvAlpha = 10 // 86% of the value is the last 19
|
||||||
globalInvAlpha = 20 // 86% of the value is the last 39
|
globalInvAlpha = 25 // 86% of the value is the last 49
|
||||||
)
|
)
|
||||||
|
|
||||||
func (bpt *bsPeerTracker) logGlobalSuccess(dur time.Duration) {
|
func (bpt *bsPeerTracker) logGlobalSuccess(dur time.Duration) {
|
||||||
@ -133,7 +146,7 @@ func logTime(pi *peerStats, dur time.Duration) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bpt *bsPeerTracker) logSuccess(p peer.ID, dur time.Duration) {
|
func (bpt *bsPeerTracker) logSuccess(p peer.ID, dur time.Duration, reqSize uint64) {
|
||||||
bpt.lk.Lock()
|
bpt.lk.Lock()
|
||||||
defer bpt.lk.Unlock()
|
defer bpt.lk.Unlock()
|
||||||
|
|
||||||
@ -145,10 +158,13 @@ func (bpt *bsPeerTracker) logSuccess(p peer.ID, dur time.Duration) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pi.successes++
|
pi.successes++
|
||||||
logTime(pi, dur)
|
if reqSize == 0 {
|
||||||
|
reqSize = 1
|
||||||
|
}
|
||||||
|
logTime(pi, dur/time.Duration(reqSize))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bpt *bsPeerTracker) logFailure(p peer.ID, dur time.Duration) {
|
func (bpt *bsPeerTracker) logFailure(p peer.ID, dur time.Duration, reqSize uint64) {
|
||||||
bpt.lk.Lock()
|
bpt.lk.Lock()
|
||||||
defer bpt.lk.Unlock()
|
defer bpt.lk.Unlock()
|
||||||
|
|
||||||
@ -160,7 +176,10 @@ func (bpt *bsPeerTracker) logFailure(p peer.ID, dur time.Duration) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pi.failures++
|
pi.failures++
|
||||||
logTime(pi, dur)
|
if reqSize == 0 {
|
||||||
|
reqSize = 1
|
||||||
|
}
|
||||||
|
logTime(pi, dur/time.Duration(reqSize))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bpt *bsPeerTracker) removePeer(p peer.ID) {
|
func (bpt *bsPeerTracker) removePeer(p peer.ID) {
|
@ -1,4 +1,4 @@
|
|||||||
package blocksync
|
package exchange
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
||||||
@ -13,9 +13,17 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
var log = logging.Logger("blocksync")
|
var log = logging.Logger("chainxchg")
|
||||||
|
|
||||||
const BlockSyncProtocolID = "/fil/sync/blk/0.0.1"
|
const (
|
||||||
|
// BlockSyncProtocolID is the protocol ID of the former blocksync protocol.
|
||||||
|
// Deprecated.
|
||||||
|
BlockSyncProtocolID = "/fil/sync/blk/0.0.1"
|
||||||
|
|
||||||
|
// ChainExchangeProtocolID is the protocol ID of the chain exchange
|
||||||
|
// protocol.
|
||||||
|
ChainExchangeProtocolID = "/fil/chain/xchg/0.0.1"
|
||||||
|
)
|
||||||
|
|
||||||
// FIXME: Bumped from original 800 to this to accommodate `syncFork()`
|
// FIXME: Bumped from original 800 to this to accommodate `syncFork()`
|
||||||
// use of `GetBlocks()`. It seems the expectation of that API is to
|
// use of `GetBlocks()`. It seems the expectation of that API is to
|
||||||
@ -25,14 +33,16 @@ const BlockSyncProtocolID = "/fil/sync/blk/0.0.1"
|
|||||||
// qualifier to avoid "const initializer [...] is not a constant" error.)
|
// qualifier to avoid "const initializer [...] is not a constant" error.)
|
||||||
var MaxRequestLength = uint64(build.ForkLengthThreshold)
|
var MaxRequestLength = uint64(build.ForkLengthThreshold)
|
||||||
|
|
||||||
// Extracted constants from the code.
|
const (
|
||||||
// FIXME: Should be reviewed and confirmed.
|
// Extracted constants from the code.
|
||||||
const SUCCESS_PEER_TAG_VALUE = 25
|
// FIXME: Should be reviewed and confirmed.
|
||||||
const WRITE_REQ_DEADLINE = 5 * time.Second
|
SuccessPeerTagValue = 25
|
||||||
const READ_RES_DEADLINE = WRITE_REQ_DEADLINE
|
WriteReqDeadline = 5 * time.Second
|
||||||
const READ_RES_MIN_SPEED = 50 << 10
|
ReadResDeadline = WriteReqDeadline
|
||||||
const SHUFFLE_PEERS_PREFIX = 5
|
ReadResMinSpeed = 50 << 10
|
||||||
const WRITE_RES_DEADLINE = 60 * time.Second
|
ShufflePeersPrefix = 5
|
||||||
|
WriteResDeadline = 60 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
// FIXME: Rename. Make private.
|
// FIXME: Rename. Make private.
|
||||||
type Request struct {
|
type Request struct {
|
||||||
@ -117,7 +127,7 @@ func (res *Response) statusToError() error {
|
|||||||
case NotFound:
|
case NotFound:
|
||||||
return xerrors.Errorf("not found")
|
return xerrors.Errorf("not found")
|
||||||
case GoAway:
|
case GoAway:
|
||||||
return xerrors.Errorf("not handling 'go away' blocksync responses yet")
|
return xerrors.Errorf("not handling 'go away' chainxchg responses yet")
|
||||||
case InternalError:
|
case InternalError:
|
||||||
return xerrors.Errorf("block sync peer errored: %s", res.ErrorMessage)
|
return xerrors.Errorf("block sync peer errored: %s", res.ErrorMessage)
|
||||||
case BadRequest:
|
case BadRequest:
|
@ -1,4 +1,4 @@
|
|||||||
package blocksync
|
package exchange
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
@ -18,38 +18,25 @@ import (
|
|||||||
inet "github.com/libp2p/go-libp2p-core/network"
|
inet "github.com/libp2p/go-libp2p-core/network"
|
||||||
)
|
)
|
||||||
|
|
||||||
// BlockSyncService is the component that services BlockSync requests from
|
// server implements exchange.Server. It services requests for the
|
||||||
// peers.
|
// libp2p ChainExchange protocol.
|
||||||
//
|
type server struct {
|
||||||
// BlockSync is the basic chain synchronization protocol of Filecoin. BlockSync
|
|
||||||
// is an RPC-oriented protocol, with a single operation to request blocks.
|
|
||||||
//
|
|
||||||
// A request contains a start anchor block (referred to with a CID), and a
|
|
||||||
// amount of blocks requested beyond the anchor (including the anchor itself).
|
|
||||||
//
|
|
||||||
// A client can also pass options, encoded as a 64-bit bitfield. Lotus supports
|
|
||||||
// two options at the moment:
|
|
||||||
//
|
|
||||||
// - include block contents
|
|
||||||
// - include block messages
|
|
||||||
//
|
|
||||||
// The response will include a status code, an optional message, and the
|
|
||||||
// response payload in case of success. The payload is a slice of serialized
|
|
||||||
// tipsets.
|
|
||||||
// FIXME: Rename to just `Server` (will be done later, see note on `BlockSync`).
|
|
||||||
type BlockSyncService struct {
|
|
||||||
cs *store.ChainStore
|
cs *store.ChainStore
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewBlockSyncService(cs *store.ChainStore) *BlockSyncService {
|
var _ Server = (*server)(nil)
|
||||||
return &BlockSyncService{
|
|
||||||
|
// NewServer creates a new libp2p-based exchange.Server. It services requests
|
||||||
|
// for the libp2p ChainExchange protocol.
|
||||||
|
func NewServer(cs *store.ChainStore) Server {
|
||||||
|
return &server{
|
||||||
cs: cs,
|
cs: cs,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Entry point of the service, handles `Request`s.
|
// HandleStream implements Server.HandleStream. Refer to the godocs there.
|
||||||
func (server *BlockSyncService) HandleStream(stream inet.Stream) {
|
func (s *server) HandleStream(stream inet.Stream) {
|
||||||
ctx, span := trace.StartSpan(context.Background(), "blocksync.HandleStream")
|
ctx, span := trace.StartSpan(context.Background(), "chainxchg.HandleStream")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
defer stream.Close() //nolint:errcheck
|
defer stream.Close() //nolint:errcheck
|
||||||
@ -62,13 +49,13 @@ func (server *BlockSyncService) HandleStream(stream inet.Stream) {
|
|||||||
log.Infow("block sync request",
|
log.Infow("block sync request",
|
||||||
"start", req.Head, "len", req.Length)
|
"start", req.Head, "len", req.Length)
|
||||||
|
|
||||||
resp, err := server.processRequest(ctx, &req)
|
resp, err := s.processRequest(ctx, &req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("failed to process request: ", err)
|
log.Warn("failed to process request: ", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
_ = stream.SetDeadline(time.Now().Add(WRITE_RES_DEADLINE))
|
_ = stream.SetDeadline(time.Now().Add(WriteResDeadline))
|
||||||
if err := cborutil.WriteCborRPC(stream, resp); err != nil {
|
if err := cborutil.WriteCborRPC(stream, resp); err != nil {
|
||||||
_ = stream.SetDeadline(time.Time{})
|
_ = stream.SetDeadline(time.Time{})
|
||||||
log.Warnw("failed to write back response for handle stream",
|
log.Warnw("failed to write back response for handle stream",
|
||||||
@ -80,10 +67,7 @@ func (server *BlockSyncService) HandleStream(stream inet.Stream) {
|
|||||||
|
|
||||||
// Validate and service the request. We return either a protocol
|
// Validate and service the request. We return either a protocol
|
||||||
// response or an internal error.
|
// response or an internal error.
|
||||||
func (server *BlockSyncService) processRequest(
|
func (s *server) processRequest(ctx context.Context, req *Request) (*Response, error) {
|
||||||
ctx context.Context,
|
|
||||||
req *Request,
|
|
||||||
) (*Response, error) {
|
|
||||||
validReq, errResponse := validateRequest(ctx, req)
|
validReq, errResponse := validateRequest(ctx, req)
|
||||||
if errResponse != nil {
|
if errResponse != nil {
|
||||||
// The request did not pass validation, return the response
|
// The request did not pass validation, return the response
|
||||||
@ -91,17 +75,14 @@ func (server *BlockSyncService) processRequest(
|
|||||||
return errResponse, nil
|
return errResponse, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return server.serviceRequest(ctx, validReq)
|
return s.serviceRequest(ctx, validReq)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate request. We either return a `validatedRequest`, or an error
|
// Validate request. We either return a `validatedRequest`, or an error
|
||||||
// `Response` indicating why we can't process it. We do not return any
|
// `Response` indicating why we can't process it. We do not return any
|
||||||
// internal errors here, we just signal protocol ones.
|
// internal errors here, we just signal protocol ones.
|
||||||
func validateRequest(
|
func validateRequest(ctx context.Context, req *Request) (*validatedRequest, *Response) {
|
||||||
ctx context.Context,
|
_, span := trace.StartSpan(ctx, "chainxchg.ValidateRequest")
|
||||||
req *Request,
|
|
||||||
) (*validatedRequest, *Response) {
|
|
||||||
_, span := trace.StartSpan(ctx, "blocksync.ValidateRequest")
|
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
validReq := validatedRequest{}
|
validReq := validatedRequest{}
|
||||||
@ -147,14 +128,11 @@ func validateRequest(
|
|||||||
return &validReq, nil
|
return &validReq, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (server *BlockSyncService) serviceRequest(
|
func (s *server) serviceRequest(ctx context.Context, req *validatedRequest) (*Response, error) {
|
||||||
ctx context.Context,
|
_, span := trace.StartSpan(ctx, "chainxchg.ServiceRequest")
|
||||||
req *validatedRequest,
|
|
||||||
) (*Response, error) {
|
|
||||||
_, span := trace.StartSpan(ctx, "blocksync.ServiceRequest")
|
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
chain, err := collectChainSegment(server.cs, req)
|
chain, err := collectChainSegment(s.cs, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("block sync request: collectChainSegment failed: ", err)
|
log.Warn("block sync request: collectChainSegment failed: ", err)
|
||||||
return &Response{
|
return &Response{
|
||||||
@ -174,10 +152,7 @@ func (server *BlockSyncService) serviceRequest(
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func collectChainSegment(
|
func collectChainSegment(cs *store.ChainStore, req *validatedRequest) ([]*BSTipSet, error) {
|
||||||
cs *store.ChainStore,
|
|
||||||
req *validatedRequest,
|
|
||||||
) ([]*BSTipSet, error) {
|
|
||||||
var bstips []*BSTipSet
|
var bstips []*BSTipSet
|
||||||
|
|
||||||
cur := req.head
|
cur := req.head
|
@ -8,11 +8,13 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/runtime/proof"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
saminer "github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
saminer "github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||||
"github.com/filecoin-project/specs-actors/actors/crypto"
|
|
||||||
block "github.com/ipfs/go-block-format"
|
block "github.com/ipfs/go-block-format"
|
||||||
"github.com/ipfs/go-blockservice"
|
"github.com/ipfs/go-blockservice"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
@ -46,7 +48,7 @@ const msgsPerBlock = 20
|
|||||||
//nolint:deadcode,varcheck
|
//nolint:deadcode,varcheck
|
||||||
var log = logging.Logger("gen")
|
var log = logging.Logger("gen")
|
||||||
|
|
||||||
var ValidWpostForTesting = []abi.PoStProof{{
|
var ValidWpostForTesting = []proof.PoStProof{{
|
||||||
ProofBytes: []byte("valid proof"),
|
ProofBytes: []byte("valid proof"),
|
||||||
}}
|
}}
|
||||||
|
|
||||||
@ -57,7 +59,7 @@ type ChainGen struct {
|
|||||||
|
|
||||||
cs *store.ChainStore
|
cs *store.ChainStore
|
||||||
|
|
||||||
beacon beacon.RandomBeacon
|
beacon beacon.Schedule
|
||||||
|
|
||||||
sm *stmgr.StateManager
|
sm *stmgr.StateManager
|
||||||
|
|
||||||
@ -250,7 +252,7 @@ func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) {
|
|||||||
|
|
||||||
miners := []address.Address{maddr1, maddr2}
|
miners := []address.Address{maddr1, maddr2}
|
||||||
|
|
||||||
beac := beacon.NewMockBeacon(time.Second)
|
beac := beacon.Schedule{{Start: 0, Beacon: beacon.NewMockBeacon(time.Second)}}
|
||||||
//beac, err := drand.NewDrandBeacon(tpl.Timestamp, build.BlockDelaySecs)
|
//beac, err := drand.NewDrandBeacon(tpl.Timestamp, build.BlockDelaySecs)
|
||||||
//if err != nil {
|
//if err != nil {
|
||||||
//return nil, xerrors.Errorf("creating drand beacon: %w", err)
|
//return nil, xerrors.Errorf("creating drand beacon: %w", err)
|
||||||
@ -336,7 +338,7 @@ func (cg *ChainGen) nextBlockProof(ctx context.Context, pts *types.TipSet, m add
|
|||||||
|
|
||||||
prev := mbi.PrevBeaconEntry
|
prev := mbi.PrevBeaconEntry
|
||||||
|
|
||||||
entries, err := beacon.BeaconEntriesForBlock(ctx, cg.beacon, round, prev)
|
entries, err := beacon.BeaconEntriesForBlock(ctx, cg.beacon, round, pts.Height(), prev)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, xerrors.Errorf("get beacon entries for block: %w", err)
|
return nil, nil, nil, xerrors.Errorf("get beacon entries for block: %w", err)
|
||||||
}
|
}
|
||||||
@ -356,7 +358,7 @@ func (cg *ChainGen) nextBlockProof(ctx context.Context, pts *types.TipSet, m add
|
|||||||
return nil, nil, nil, xerrors.Errorf("failed to cbor marshal address: %w", err)
|
return nil, nil, nil, xerrors.Errorf("failed to cbor marshal address: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(entries) == 0 {
|
if round > build.UpgradeSmokeHeight {
|
||||||
buf.Write(pts.MinTicket().VRFProof)
|
buf.Write(pts.MinTicket().VRFProof)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -457,7 +459,7 @@ func (cg *ChainGen) NextTipSetFromMinersWithMessages(base *types.TipSet, miners
|
|||||||
|
|
||||||
func (cg *ChainGen) makeBlock(parents *types.TipSet, m address.Address, vrfticket *types.Ticket,
|
func (cg *ChainGen) makeBlock(parents *types.TipSet, m address.Address, vrfticket *types.Ticket,
|
||||||
eticket *types.ElectionProof, bvals []types.BeaconEntry, height abi.ChainEpoch,
|
eticket *types.ElectionProof, bvals []types.BeaconEntry, height abi.ChainEpoch,
|
||||||
wpost []abi.PoStProof, msgs []*types.SignedMessage) (*types.FullBlock, error) {
|
wpost []proof.PoStProof, msgs []*types.SignedMessage) (*types.FullBlock, error) {
|
||||||
|
|
||||||
var ts uint64
|
var ts uint64
|
||||||
if cg.Timestamper != nil {
|
if cg.Timestamper != nil {
|
||||||
@ -557,7 +559,7 @@ type mca struct {
|
|||||||
w *wallet.Wallet
|
w *wallet.Wallet
|
||||||
sm *stmgr.StateManager
|
sm *stmgr.StateManager
|
||||||
pv ffiwrapper.Verifier
|
pv ffiwrapper.Verifier
|
||||||
bcn beacon.RandomBeacon
|
bcn beacon.Schedule
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mca mca) ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) {
|
func (mca mca) ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) {
|
||||||
@ -588,7 +590,7 @@ func (mca mca) WalletSign(ctx context.Context, a address.Address, v []byte) (*cr
|
|||||||
|
|
||||||
type WinningPoStProver interface {
|
type WinningPoStProver interface {
|
||||||
GenerateCandidates(context.Context, abi.PoStRandomness, uint64) ([]uint64, error)
|
GenerateCandidates(context.Context, abi.PoStRandomness, uint64) ([]uint64, error)
|
||||||
ComputeProof(context.Context, []abi.SectorInfo, abi.PoStRandomness) ([]abi.PoStProof, error)
|
ComputeProof(context.Context, []proof.SectorInfo, abi.PoStRandomness) ([]proof.PoStProof, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type wppProvider struct{}
|
type wppProvider struct{}
|
||||||
@ -597,7 +599,7 @@ func (wpp *wppProvider) GenerateCandidates(ctx context.Context, _ abi.PoStRandom
|
|||||||
return []uint64{0}, nil
|
return []uint64{0}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wpp *wppProvider) ComputeProof(context.Context, []abi.SectorInfo, abi.PoStRandomness) ([]abi.PoStProof, error) {
|
func (wpp *wppProvider) ComputeProof(context.Context, []proof.SectorInfo, abi.PoStRandomness) ([]proof.PoStProof, error) {
|
||||||
return ValidWpostForTesting, nil
|
return ValidWpostForTesting, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -664,15 +666,15 @@ type genFakeVerifier struct{}
|
|||||||
|
|
||||||
var _ ffiwrapper.Verifier = (*genFakeVerifier)(nil)
|
var _ ffiwrapper.Verifier = (*genFakeVerifier)(nil)
|
||||||
|
|
||||||
func (m genFakeVerifier) VerifySeal(svi abi.SealVerifyInfo) (bool, error) {
|
func (m genFakeVerifier) VerifySeal(svi proof.SealVerifyInfo) (bool, error) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info abi.WinningPoStVerifyInfo) (bool, error) {
|
func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info proof.WinningPoStVerifyInfo) (bool, error) {
|
||||||
panic("not supported")
|
panic("not supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m genFakeVerifier) VerifyWindowPoSt(ctx context.Context, info abi.WindowPoStVerifyInfo) (bool, error) {
|
func (m genFakeVerifier) VerifyWindowPoSt(ctx context.Context, info proof.WindowPoStVerifyInfo) (bool, error) {
|
||||||
panic("not supported")
|
panic("not supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3,8 +3,8 @@ package gen
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/power"
|
"github.com/filecoin-project/specs-actors/actors/builtin/power"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
|
"github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
|
||||||
|
@ -6,6 +6,8 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
"github.com/ipfs/go-datastore"
|
"github.com/ipfs/go-datastore"
|
||||||
cbor "github.com/ipfs/go-ipld-cbor"
|
cbor "github.com/ipfs/go-ipld-cbor"
|
||||||
@ -14,13 +16,13 @@ import (
|
|||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/account"
|
"github.com/filecoin-project/specs-actors/actors/builtin/account"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/multisig"
|
"github.com/filecoin-project/specs-actors/actors/builtin/multisig"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
|
"github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
|
||||||
"github.com/filecoin-project/specs-actors/actors/crypto"
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
@ -404,6 +406,10 @@ func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, stateroot ci
|
|||||||
verifNeeds := make(map[address.Address]abi.PaddedPieceSize)
|
verifNeeds := make(map[address.Address]abi.PaddedPieceSize)
|
||||||
var sum abi.PaddedPieceSize
|
var sum abi.PaddedPieceSize
|
||||||
|
|
||||||
|
nwv := func(context.Context, abi.ChainEpoch) network.Version {
|
||||||
|
return build.NewestNetworkVersion
|
||||||
|
}
|
||||||
|
|
||||||
vmopt := vm.VMOpts{
|
vmopt := vm.VMOpts{
|
||||||
StateBase: stateroot,
|
StateBase: stateroot,
|
||||||
Epoch: 0,
|
Epoch: 0,
|
||||||
@ -411,6 +417,7 @@ func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, stateroot ci
|
|||||||
Bstore: cs.Blockstore(),
|
Bstore: cs.Blockstore(),
|
||||||
Syscalls: mkFakedSigSyscalls(cs.VMSys()),
|
Syscalls: mkFakedSigSyscalls(cs.VMSys()),
|
||||||
CircSupplyCalc: nil,
|
CircSupplyCalc: nil,
|
||||||
|
NtwkVersion: nwv,
|
||||||
BaseFee: types.NewInt(0),
|
BaseFee: types.NewInt(0),
|
||||||
}
|
}
|
||||||
vm, err := vm.NewVM(&vmopt)
|
vm, err := vm.NewVM(&vmopt)
|
||||||
|
@ -6,6 +6,10 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/state"
|
"github.com/filecoin-project/lotus/chain/state"
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
@ -14,15 +18,15 @@ import (
|
|||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/market"
|
"github.com/filecoin-project/specs-actors/actors/builtin/market"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/power"
|
"github.com/filecoin-project/specs-actors/actors/builtin/power"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/reward"
|
"github.com/filecoin-project/specs-actors/actors/builtin/reward"
|
||||||
"github.com/filecoin-project/specs-actors/actors/crypto"
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/runtime"
|
"github.com/filecoin-project/specs-actors/actors/runtime"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/store"
|
"github.com/filecoin-project/lotus/chain/store"
|
||||||
@ -61,6 +65,10 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
|
|||||||
return big.Zero(), nil
|
return big.Zero(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
nwv := func(context.Context, abi.ChainEpoch) network.Version {
|
||||||
|
return build.NewestNetworkVersion
|
||||||
|
}
|
||||||
|
|
||||||
vmopt := &vm.VMOpts{
|
vmopt := &vm.VMOpts{
|
||||||
StateBase: sroot,
|
StateBase: sroot,
|
||||||
Epoch: 0,
|
Epoch: 0,
|
||||||
@ -68,6 +76,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
|
|||||||
Bstore: cs.Blockstore(),
|
Bstore: cs.Blockstore(),
|
||||||
Syscalls: mkFakedSigSyscalls(cs.VMSys()),
|
Syscalls: mkFakedSigSyscalls(cs.VMSys()),
|
||||||
CircSupplyCalc: csc,
|
CircSupplyCalc: csc,
|
||||||
|
NtwkVersion: nwv,
|
||||||
BaseFee: types.NewInt(0),
|
BaseFee: types.NewInt(0),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6,6 +6,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||||
@ -50,7 +51,7 @@ func SetupInitActor(bs bstore.Blockstore, netname string, initialActors []genesi
|
|||||||
fmt.Printf("init set %s t0%d\n", e, counter)
|
fmt.Printf("init set %s t0%d\n", e, counter)
|
||||||
|
|
||||||
value := cbg.CborInt(counter)
|
value := cbg.CborInt(counter)
|
||||||
if err := amap.Put(adt.AddrKey(e), &value); err != nil {
|
if err := amap.Put(abi.AddrKey(e), &value); err != nil {
|
||||||
return 0, nil, nil, err
|
return 0, nil, nil, err
|
||||||
}
|
}
|
||||||
counter = counter + 1
|
counter = counter + 1
|
||||||
@ -77,7 +78,7 @@ func SetupInitActor(bs bstore.Blockstore, netname string, initialActors []genesi
|
|||||||
fmt.Printf("init set %s t0%d\n", ainfo.Owner, counter)
|
fmt.Printf("init set %s t0%d\n", ainfo.Owner, counter)
|
||||||
|
|
||||||
value := cbg.CborInt(counter)
|
value := cbg.CborInt(counter)
|
||||||
if err := amap.Put(adt.AddrKey(ainfo.Owner), &value); err != nil {
|
if err := amap.Put(abi.AddrKey(ainfo.Owner), &value); err != nil {
|
||||||
return 0, nil, nil, err
|
return 0, nil, nil, err
|
||||||
}
|
}
|
||||||
counter = counter + 1
|
counter = counter + 1
|
||||||
@ -95,7 +96,7 @@ func SetupInitActor(bs bstore.Blockstore, netname string, initialActors []genesi
|
|||||||
return 0, nil, nil, xerrors.Errorf("unmarshaling account meta: %w", err)
|
return 0, nil, nil, xerrors.Errorf("unmarshaling account meta: %w", err)
|
||||||
}
|
}
|
||||||
value := cbg.CborInt(80)
|
value := cbg.CborInt(80)
|
||||||
if err := amap.Put(adt.AddrKey(ainfo.Owner), &value); err != nil {
|
if err := amap.Put(abi.AddrKey(ainfo.Owner), &value); err != nil {
|
||||||
return 0, nil, nil, err
|
return 0, nil, nil, err
|
||||||
}
|
}
|
||||||
} else if rootVerifier.Type == genesis.TMultisig {
|
} else if rootVerifier.Type == genesis.TMultisig {
|
||||||
@ -110,7 +111,7 @@ func SetupInitActor(bs bstore.Blockstore, netname string, initialActors []genesi
|
|||||||
fmt.Printf("init set %s t0%d\n", e, counter)
|
fmt.Printf("init set %s t0%d\n", e, counter)
|
||||||
|
|
||||||
value := cbg.CborInt(counter)
|
value := cbg.CborInt(counter)
|
||||||
if err := amap.Put(adt.AddrKey(e), &value); err != nil {
|
if err := amap.Put(abi.AddrKey(e), &value); err != nil {
|
||||||
return 0, nil, nil, err
|
return 0, nil, nil, err
|
||||||
}
|
}
|
||||||
counter = counter + 1
|
counter = counter + 1
|
||||||
|
@ -3,7 +3,7 @@ package genesis
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/reward"
|
"github.com/filecoin-project/specs-actors/actors/builtin/reward"
|
||||||
|
@ -4,7 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
cbg "github.com/whyrusleeping/cbor-gen"
|
cbg "github.com/whyrusleeping/cbor-gen"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
@ -3,7 +3,7 @@ package gen
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/crypto"
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||||
cid "github.com/ipfs/go-cid"
|
cid "github.com/ipfs/go-cid"
|
||||||
cbor "github.com/ipfs/go-ipld-cbor"
|
cbor "github.com/ipfs/go-ipld-cbor"
|
||||||
|
@ -9,8 +9,8 @@ import (
|
|||||||
ds "github.com/ipfs/go-datastore"
|
ds "github.com/ipfs/go-datastore"
|
||||||
"github.com/ipfs/go-datastore/namespace"
|
"github.com/ipfs/go-datastore/namespace"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type SlashFilter struct {
|
type SlashFilter struct {
|
||||||
|
@ -4,8 +4,8 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
"go.uber.org/fx"
|
"go.uber.org/fx"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
|
@ -10,9 +10,9 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
"github.com/filecoin-project/specs-actors/actors/crypto"
|
|
||||||
tutils "github.com/filecoin-project/specs-actors/support/testing"
|
tutils "github.com/filecoin-project/specs-actors/support/testing"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
@ -9,7 +9,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -11,8 +11,9 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/crypto"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
"github.com/hashicorp/go-multierror"
|
"github.com/hashicorp/go-multierror"
|
||||||
lru "github.com/hashicorp/golang-lru"
|
lru "github.com/hashicorp/golang-lru"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
@ -31,6 +32,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/store"
|
"github.com/filecoin-project/lotus/chain/store"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/chain/vm"
|
"github.com/filecoin-project/lotus/chain/vm"
|
||||||
|
"github.com/filecoin-project/lotus/journal"
|
||||||
"github.com/filecoin-project/lotus/lib/sigs"
|
"github.com/filecoin-project/lotus/lib/sigs"
|
||||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
|
|
||||||
@ -46,12 +48,16 @@ var rbfDenomBig = types.NewInt(RbfDenom)
|
|||||||
|
|
||||||
const RbfDenom = 256
|
const RbfDenom = 256
|
||||||
|
|
||||||
var RepublishInterval = pubsub.TimeCacheDuration + time.Duration(5*build.BlockDelaySecs+build.PropagationDelaySecs)*time.Second
|
var RepublishInterval = time.Duration(10*build.BlockDelaySecs+build.PropagationDelaySecs) * time.Second
|
||||||
|
|
||||||
var minimumBaseFee = types.NewInt(uint64(build.MinimumBaseFee))
|
var minimumBaseFee = types.NewInt(uint64(build.MinimumBaseFee))
|
||||||
|
var baseFeeLowerBoundFactor = types.NewInt(10)
|
||||||
|
var baseFeeLowerBoundFactorConservative = types.NewInt(100)
|
||||||
|
|
||||||
var MaxActorPendingMessages = 1000
|
var MaxActorPendingMessages = 1000
|
||||||
|
|
||||||
|
var MaxNonceGap = uint64(4)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ErrMessageTooBig = errors.New("message too big")
|
ErrMessageTooBig = errors.New("message too big")
|
||||||
|
|
||||||
@ -68,6 +74,7 @@ var (
|
|||||||
ErrSoftValidationFailure = errors.New("validation failure")
|
ErrSoftValidationFailure = errors.New("validation failure")
|
||||||
ErrRBFTooLowPremium = errors.New("replace by fee has too low GasPremium")
|
ErrRBFTooLowPremium = errors.New("replace by fee has too low GasPremium")
|
||||||
ErrTooManyPendingMessages = errors.New("too many pending messages for actor")
|
ErrTooManyPendingMessages = errors.New("too many pending messages for actor")
|
||||||
|
ErrNonceGap = errors.New("unfulfilled nonce gap")
|
||||||
|
|
||||||
ErrTryAgain = errors.New("state inconsistency while pushing message; please try again")
|
ErrTryAgain = errors.New("state inconsistency while pushing message; please try again")
|
||||||
)
|
)
|
||||||
@ -78,6 +85,34 @@ const (
|
|||||||
localUpdates = "update"
|
localUpdates = "update"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Journal event types.
|
||||||
|
const (
|
||||||
|
evtTypeMpoolAdd = iota
|
||||||
|
evtTypeMpoolRemove
|
||||||
|
evtTypeMpoolRepub
|
||||||
|
)
|
||||||
|
|
||||||
|
// MessagePoolEvt is the journal entry for message pool events.
|
||||||
|
type MessagePoolEvt struct {
|
||||||
|
Action string
|
||||||
|
Messages []MessagePoolEvtMessage
|
||||||
|
Error error `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type MessagePoolEvtMessage struct {
|
||||||
|
types.Message
|
||||||
|
|
||||||
|
CID cid.Cid
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// if the republish interval is too short compared to the pubsub timecache, adjust it
|
||||||
|
minInterval := pubsub.TimeCacheDuration + time.Duration(build.PropagationDelaySecs)
|
||||||
|
if RepublishInterval < minInterval {
|
||||||
|
RepublishInterval = minInterval
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type MessagePool struct {
|
type MessagePool struct {
|
||||||
lk sync.Mutex
|
lk sync.Mutex
|
||||||
|
|
||||||
@ -123,6 +158,8 @@ type MessagePool struct {
|
|||||||
netName dtypes.NetworkName
|
netName dtypes.NetworkName
|
||||||
|
|
||||||
sigValCache *lru.TwoQueueCache
|
sigValCache *lru.TwoQueueCache
|
||||||
|
|
||||||
|
evtTypes [3]journal.EventType
|
||||||
}
|
}
|
||||||
|
|
||||||
type msgSet struct {
|
type msgSet struct {
|
||||||
@ -131,24 +168,63 @@ type msgSet struct {
|
|||||||
requiredFunds *stdbig.Int
|
requiredFunds *stdbig.Int
|
||||||
}
|
}
|
||||||
|
|
||||||
func newMsgSet() *msgSet {
|
func newMsgSet(nonce uint64) *msgSet {
|
||||||
return &msgSet{
|
return &msgSet{
|
||||||
msgs: make(map[uint64]*types.SignedMessage),
|
msgs: make(map[uint64]*types.SignedMessage),
|
||||||
|
nextNonce: nonce,
|
||||||
requiredFunds: stdbig.NewInt(0),
|
requiredFunds: stdbig.NewInt(0),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool, limit bool) (bool, error) {
|
func ComputeMinRBF(curPrem abi.TokenAmount) abi.TokenAmount {
|
||||||
if len(ms.msgs) == 0 || m.Message.Nonce >= ms.nextNonce {
|
minPrice := types.BigAdd(curPrem, types.BigDiv(types.BigMul(curPrem, rbfNumBig), rbfDenomBig))
|
||||||
ms.nextNonce = m.Message.Nonce + 1
|
return types.BigAdd(minPrice, types.NewInt(1))
|
||||||
|
}
|
||||||
|
|
||||||
|
func CapGasFee(msg *types.Message, maxFee abi.TokenAmount) {
|
||||||
|
if maxFee.Equals(big.Zero()) {
|
||||||
|
maxFee = types.NewInt(build.FilecoinPrecision / 10)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
gl := types.NewInt(uint64(msg.GasLimit))
|
||||||
|
totalFee := types.BigMul(msg.GasFeeCap, gl)
|
||||||
|
|
||||||
|
if totalFee.LessThanEqual(maxFee) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
msg.GasFeeCap = big.Div(maxFee, gl)
|
||||||
|
msg.GasPremium = big.Min(msg.GasFeeCap, msg.GasPremium) // cap premium at FeeCap
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool, strict bool) (bool, error) {
|
||||||
|
nextNonce := ms.nextNonce
|
||||||
|
nonceGap := false
|
||||||
|
switch {
|
||||||
|
case m.Message.Nonce == nextNonce:
|
||||||
|
nextNonce++
|
||||||
|
// advance if we are filling a gap
|
||||||
|
for _, fillGap := ms.msgs[nextNonce]; fillGap; _, fillGap = ms.msgs[nextNonce] {
|
||||||
|
nextNonce++
|
||||||
|
}
|
||||||
|
|
||||||
|
case strict && m.Message.Nonce > nextNonce+MaxNonceGap:
|
||||||
|
return false, xerrors.Errorf("message nonce has too big a gap from expected nonce (Nonce: %d, nextNonce: %d): %w", m.Message.Nonce, nextNonce, ErrNonceGap)
|
||||||
|
|
||||||
|
case m.Message.Nonce > nextNonce:
|
||||||
|
nonceGap = true
|
||||||
|
}
|
||||||
|
|
||||||
exms, has := ms.msgs[m.Message.Nonce]
|
exms, has := ms.msgs[m.Message.Nonce]
|
||||||
if has {
|
if has {
|
||||||
|
// refuse RBF if we have a gap
|
||||||
|
if strict && nonceGap {
|
||||||
|
return false, xerrors.Errorf("rejecting replace by fee because of nonce gap (Nonce: %d, nextNonce: %d): %w", m.Message.Nonce, nextNonce, ErrNonceGap)
|
||||||
|
}
|
||||||
|
|
||||||
if m.Cid() != exms.Cid() {
|
if m.Cid() != exms.Cid() {
|
||||||
// check if RBF passes
|
// check if RBF passes
|
||||||
minPrice := exms.Message.GasPremium
|
minPrice := ComputeMinRBF(exms.Message.GasPremium)
|
||||||
minPrice = types.BigAdd(minPrice, types.BigDiv(types.BigMul(minPrice, rbfNumBig), rbfDenomBig))
|
|
||||||
minPrice = types.BigAdd(minPrice, types.NewInt(1))
|
|
||||||
if types.BigCmp(m.Message.GasPremium, minPrice) >= 0 {
|
if types.BigCmp(m.Message.GasPremium, minPrice) >= 0 {
|
||||||
log.Infow("add with RBF", "oldpremium", exms.Message.GasPremium,
|
log.Infow("add with RBF", "oldpremium", exms.Message.GasPremium,
|
||||||
"newpremium", m.Message.GasPremium, "addr", m.Message.From, "nonce", m.Message.Nonce)
|
"newpremium", m.Message.GasPremium, "addr", m.Message.From, "nonce", m.Message.Nonce)
|
||||||
@ -159,17 +235,26 @@ func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool, limit bool) (bool
|
|||||||
m.Message.From, m.Message.Nonce, minPrice, m.Message.GasPremium,
|
m.Message.From, m.Message.Nonce, minPrice, m.Message.GasPremium,
|
||||||
ErrRBFTooLowPremium)
|
ErrRBFTooLowPremium)
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
return false, xerrors.Errorf("message from %s with nonce %d already in mpool: %w",
|
||||||
|
m.Message.From, m.Message.Nonce, ErrSoftValidationFailure)
|
||||||
}
|
}
|
||||||
|
|
||||||
ms.requiredFunds.Sub(ms.requiredFunds, exms.Message.RequiredFunds().Int)
|
ms.requiredFunds.Sub(ms.requiredFunds, exms.Message.RequiredFunds().Int)
|
||||||
//ms.requiredFunds.Sub(ms.requiredFunds, exms.Message.Value.Int)
|
//ms.requiredFunds.Sub(ms.requiredFunds, exms.Message.Value.Int)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !has && limit && len(ms.msgs) > MaxActorPendingMessages {
|
if !has && strict && len(ms.msgs) > MaxActorPendingMessages {
|
||||||
log.Errorf("too many pending messages from actor %s", m.Message.From)
|
log.Errorf("too many pending messages from actor %s", m.Message.From)
|
||||||
return false, ErrTooManyPendingMessages
|
return false, ErrTooManyPendingMessages
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if strict && nonceGap {
|
||||||
|
log.Warnf("adding nonce-gapped message from %s (nonce: %d, nextNonce: %d)",
|
||||||
|
m.Message.From, m.Message.Nonce, nextNonce)
|
||||||
|
}
|
||||||
|
|
||||||
|
ms.nextNonce = nextNonce
|
||||||
ms.msgs[m.Message.Nonce] = m
|
ms.msgs[m.Message.Nonce] = m
|
||||||
ms.requiredFunds.Add(ms.requiredFunds, m.Message.RequiredFunds().Int)
|
ms.requiredFunds.Add(ms.requiredFunds, m.Message.RequiredFunds().Int)
|
||||||
//ms.requiredFunds.Add(ms.requiredFunds, m.Message.Value.Int)
|
//ms.requiredFunds.Add(ms.requiredFunds, m.Message.Value.Int)
|
||||||
@ -177,12 +262,38 @@ func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool, limit bool) (bool
|
|||||||
return !has, nil
|
return !has, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ms *msgSet) rm(nonce uint64) {
|
func (ms *msgSet) rm(nonce uint64, applied bool) {
|
||||||
m, has := ms.msgs[nonce]
|
m, has := ms.msgs[nonce]
|
||||||
if has {
|
if !has {
|
||||||
ms.requiredFunds.Sub(ms.requiredFunds, m.Message.RequiredFunds().Int)
|
if applied && nonce >= ms.nextNonce {
|
||||||
//ms.requiredFunds.Sub(ms.requiredFunds, m.Message.Value.Int)
|
// we removed a message we did not know about because it was applied
|
||||||
delete(ms.msgs, nonce)
|
// we need to adjust the nonce and check if we filled a gap
|
||||||
|
ms.nextNonce = nonce + 1
|
||||||
|
for _, fillGap := ms.msgs[ms.nextNonce]; fillGap; _, fillGap = ms.msgs[ms.nextNonce] {
|
||||||
|
ms.nextNonce++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ms.requiredFunds.Sub(ms.requiredFunds, m.Message.RequiredFunds().Int)
|
||||||
|
//ms.requiredFunds.Sub(ms.requiredFunds, m.Message.Value.Int)
|
||||||
|
delete(ms.msgs, nonce)
|
||||||
|
|
||||||
|
// adjust next nonce
|
||||||
|
if applied {
|
||||||
|
// we removed a (known) message because it was applied in a tipset
|
||||||
|
// we can't possibly have filled a gap in this case
|
||||||
|
if nonce >= ms.nextNonce {
|
||||||
|
ms.nextNonce = nonce + 1
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// we removed a message because it was pruned
|
||||||
|
// we have to adjust the nonce if it creates a gap or rewinds state
|
||||||
|
if nonce < ms.nextNonce {
|
||||||
|
ms.nextNonce = nonce
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -225,6 +336,11 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName) (*Messa
|
|||||||
api: api,
|
api: api,
|
||||||
netName: netName,
|
netName: netName,
|
||||||
cfg: cfg,
|
cfg: cfg,
|
||||||
|
evtTypes: [...]journal.EventType{
|
||||||
|
evtTypeMpoolAdd: journal.J.RegisterEventType("mpool", "add"),
|
||||||
|
evtTypeMpoolRemove: journal.J.RegisterEventType("mpool", "remove"),
|
||||||
|
evtTypeMpoolRepub: journal.J.RegisterEventType("mpool", "repub"),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// enable initial prunes
|
// enable initial prunes
|
||||||
@ -276,10 +392,12 @@ func (mp *MessagePool) runLoop() {
|
|||||||
if err := mp.republishPendingMessages(); err != nil {
|
if err := mp.republishPendingMessages(); err != nil {
|
||||||
log.Errorf("error while republishing messages: %s", err)
|
log.Errorf("error while republishing messages: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
case <-mp.pruneTrigger:
|
case <-mp.pruneTrigger:
|
||||||
if err := mp.pruneExcessMessages(); err != nil {
|
if err := mp.pruneExcessMessages(); err != nil {
|
||||||
log.Errorf("failed to prune excess messages from mempool: %s", err)
|
log.Errorf("failed to prune excess messages from mempool: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
case <-mp.closer:
|
case <-mp.closer:
|
||||||
mp.repubTk.Stop()
|
mp.repubTk.Stop()
|
||||||
return
|
return
|
||||||
@ -297,13 +415,57 @@ func (mp *MessagePool) addLocal(m *types.SignedMessage, msgb []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, epoch abi.ChainEpoch) error {
|
// verifyMsgBeforeAdd verifies that the message meets the minimum criteria for block inclusio
|
||||||
|
// and whether the message has enough funds to be included in the next 20 blocks.
|
||||||
|
// If the message is not valid for block inclusion, it returns an error.
|
||||||
|
// For local messages, if the message can be included in the next 20 blocks, it returns true to
|
||||||
|
// signal that it should be immediately published. If the message cannot be included in the next 20
|
||||||
|
// blocks, it returns false so that the message doesn't immediately get published (and ignored by our
|
||||||
|
// peers); instead it will be published through the republish loop, once the base fee has fallen
|
||||||
|
// sufficiently.
|
||||||
|
// For non local messages, if the message cannot be included in the next 20 blocks it returns
|
||||||
|
// a (soft) validation error.
|
||||||
|
func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, curTs *types.TipSet, local bool) (bool, error) {
|
||||||
|
epoch := curTs.Height()
|
||||||
minGas := vm.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength())
|
minGas := vm.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength())
|
||||||
|
|
||||||
if err := m.VMMessage().ValidForBlockInclusion(minGas.Total()); err != nil {
|
if err := m.VMMessage().ValidForBlockInclusion(minGas.Total()); err != nil {
|
||||||
return xerrors.Errorf("message will not be included in a block: %w", err)
|
return false, xerrors.Errorf("message will not be included in a block: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
|
// this checks if the GasFeeCap is suffisciently high for inclusion in the next 20 blocks
|
||||||
|
// if the GasFeeCap is too low, we soft reject the message (Ignore in pubsub) and rely
|
||||||
|
// on republish to push it through later, if the baseFee has fallen.
|
||||||
|
// this is a defensive check that stops minimum baseFee spam attacks from overloading validation
|
||||||
|
// queues.
|
||||||
|
// Note that for local messages, we always add them so that they can be accepted and republished
|
||||||
|
// automatically.
|
||||||
|
publish := local
|
||||||
|
|
||||||
|
var baseFee big.Int
|
||||||
|
if len(curTs.Blocks()) > 0 {
|
||||||
|
baseFee = curTs.Blocks()[0].ParentBaseFee
|
||||||
|
} else {
|
||||||
|
var err error
|
||||||
|
baseFee, err = mp.api.ChainComputeBaseFee(context.TODO(), curTs)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("computing basefee: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
baseFeeLowerBound := getBaseFeeLowerBound(baseFee, baseFeeLowerBoundFactorConservative)
|
||||||
|
if m.Message.GasFeeCap.LessThan(baseFeeLowerBound) {
|
||||||
|
if local {
|
||||||
|
log.Warnf("local message will not be immediately published because GasFeeCap doesn't meet the lower bound for inclusion in the next 20 blocks (GasFeeCap: %s, baseFeeLowerBound: %s)",
|
||||||
|
m.Message.GasFeeCap, baseFeeLowerBound)
|
||||||
|
publish = false
|
||||||
|
} else {
|
||||||
|
return false, xerrors.Errorf("GasFeeCap doesn't meet base fee lower bound for inclusion in the next 20 blocks (GasFeeCap: %s, baseFeeLowerBound: %s): %w",
|
||||||
|
m.Message.GasFeeCap, baseFeeLowerBound, ErrSoftValidationFailure)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return publish, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mp *MessagePool) Push(m *types.SignedMessage) (cid.Cid, error) {
|
func (mp *MessagePool) Push(m *types.SignedMessage) (cid.Cid, error) {
|
||||||
@ -324,7 +486,8 @@ func (mp *MessagePool) Push(m *types.SignedMessage) (cid.Cid, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
mp.curTsLk.Lock()
|
mp.curTsLk.Lock()
|
||||||
if err := mp.addTs(m, mp.curTs); err != nil {
|
publish, err := mp.addTs(m, mp.curTs, true)
|
||||||
|
if err != nil {
|
||||||
mp.curTsLk.Unlock()
|
mp.curTsLk.Unlock()
|
||||||
return cid.Undef, err
|
return cid.Undef, err
|
||||||
}
|
}
|
||||||
@ -337,7 +500,11 @@ func (mp *MessagePool) Push(m *types.SignedMessage) (cid.Cid, error) {
|
|||||||
}
|
}
|
||||||
mp.lk.Unlock()
|
mp.lk.Unlock()
|
||||||
|
|
||||||
return m.Cid(), mp.api.PubSubPublish(build.MessagesTopic(mp.netName), msgb)
|
if publish {
|
||||||
|
err = mp.api.PubSubPublish(build.MessagesTopic(mp.netName), msgb)
|
||||||
|
}
|
||||||
|
|
||||||
|
return m.Cid(), err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mp *MessagePool) checkMessage(m *types.SignedMessage) error {
|
func (mp *MessagePool) checkMessage(m *types.SignedMessage) error {
|
||||||
@ -385,7 +552,9 @@ func (mp *MessagePool) Add(m *types.SignedMessage) error {
|
|||||||
|
|
||||||
mp.curTsLk.Lock()
|
mp.curTsLk.Lock()
|
||||||
defer mp.curTsLk.Unlock()
|
defer mp.curTsLk.Unlock()
|
||||||
return mp.addTs(m, mp.curTs)
|
|
||||||
|
_, err = mp.addTs(m, mp.curTs, false)
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func sigCacheKey(m *types.SignedMessage) (string, error) {
|
func sigCacheKey(m *types.SignedMessage) (string, error) {
|
||||||
@ -452,7 +621,42 @@ func (mp *MessagePool) checkBalance(m *types.SignedMessage, curTs *types.TipSet)
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mp *MessagePool) addTs(m *types.SignedMessage, curTs *types.TipSet) error {
|
func (mp *MessagePool) addTs(m *types.SignedMessage, curTs *types.TipSet, local bool) (bool, error) {
|
||||||
|
snonce, err := mp.getStateNonce(m.Message.From, curTs)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("failed to look up actor state nonce: %s: %w", err, ErrSoftValidationFailure)
|
||||||
|
}
|
||||||
|
|
||||||
|
if snonce > m.Message.Nonce {
|
||||||
|
return false, xerrors.Errorf("minimum expected nonce is %d: %w", snonce, ErrNonceTooLow)
|
||||||
|
}
|
||||||
|
|
||||||
|
mp.lk.Lock()
|
||||||
|
defer mp.lk.Unlock()
|
||||||
|
|
||||||
|
publish, err := mp.verifyMsgBeforeAdd(m, curTs, local)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := mp.checkBalance(m, curTs); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return publish, mp.addLocked(m, !local)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mp *MessagePool) addLoaded(m *types.SignedMessage) error {
|
||||||
|
err := mp.checkMessage(m)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
mp.curTsLk.Lock()
|
||||||
|
defer mp.curTsLk.Unlock()
|
||||||
|
|
||||||
|
curTs := mp.curTs
|
||||||
|
|
||||||
snonce, err := mp.getStateNonce(m.Message.From, curTs)
|
snonce, err := mp.getStateNonce(m.Message.From, curTs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("failed to look up actor state nonce: %s: %w", err, ErrSoftValidationFailure)
|
return xerrors.Errorf("failed to look up actor state nonce: %s: %w", err, ErrSoftValidationFailure)
|
||||||
@ -465,7 +669,8 @@ func (mp *MessagePool) addTs(m *types.SignedMessage, curTs *types.TipSet) error
|
|||||||
mp.lk.Lock()
|
mp.lk.Lock()
|
||||||
defer mp.lk.Unlock()
|
defer mp.lk.Unlock()
|
||||||
|
|
||||||
if err := mp.verifyMsgBeforeAdd(m, curTs.Height()); err != nil {
|
_, err = mp.verifyMsgBeforeAdd(m, curTs, true)
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -473,7 +678,7 @@ func (mp *MessagePool) addTs(m *types.SignedMessage, curTs *types.TipSet) error
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return mp.addLocked(m, true)
|
return mp.addLocked(m, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mp *MessagePool) addSkipChecks(m *types.SignedMessage) error {
|
func (mp *MessagePool) addSkipChecks(m *types.SignedMessage) error {
|
||||||
@ -483,7 +688,7 @@ func (mp *MessagePool) addSkipChecks(m *types.SignedMessage) error {
|
|||||||
return mp.addLocked(m, false)
|
return mp.addLocked(m, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mp *MessagePool) addLocked(m *types.SignedMessage, limit bool) error {
|
func (mp *MessagePool) addLocked(m *types.SignedMessage, strict bool) error {
|
||||||
log.Debugf("mpooladd: %s %d", m.Message.From, m.Message.Nonce)
|
log.Debugf("mpooladd: %s %d", m.Message.From, m.Message.Nonce)
|
||||||
if m.Signature.Type == crypto.SigTypeBLS {
|
if m.Signature.Type == crypto.SigTypeBLS {
|
||||||
mp.blsSigCache.Add(m.Cid(), m.Signature)
|
mp.blsSigCache.Add(m.Cid(), m.Signature)
|
||||||
@ -501,13 +706,18 @@ func (mp *MessagePool) addLocked(m *types.SignedMessage, limit bool) error {
|
|||||||
|
|
||||||
mset, ok := mp.pending[m.Message.From]
|
mset, ok := mp.pending[m.Message.From]
|
||||||
if !ok {
|
if !ok {
|
||||||
mset = newMsgSet()
|
nonce, err := mp.getStateNonce(m.Message.From, mp.curTs)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to get initial actor nonce: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
mset = newMsgSet(nonce)
|
||||||
mp.pending[m.Message.From] = mset
|
mp.pending[m.Message.From] = mset
|
||||||
}
|
}
|
||||||
|
|
||||||
incr, err := mset.add(m, mp, limit)
|
incr, err := mset.add(m, mp, strict)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Info(err)
|
log.Debug(err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -526,6 +736,14 @@ func (mp *MessagePool) addLocked(m *types.SignedMessage, limit bool) error {
|
|||||||
Type: api.MpoolAdd,
|
Type: api.MpoolAdd,
|
||||||
Message: m,
|
Message: m,
|
||||||
}, localUpdates)
|
}, localUpdates)
|
||||||
|
|
||||||
|
journal.J.RecordEvent(mp.evtTypes[evtTypeMpoolAdd], func() interface{} {
|
||||||
|
return MessagePoolEvt{
|
||||||
|
Action: "add",
|
||||||
|
Messages: []MessagePoolEvtMessage{{Message: m.Message, CID: m.Cid()}},
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -646,7 +864,8 @@ func (mp *MessagePool) PushWithNonce(ctx context.Context, addr address.Address,
|
|||||||
return nil, ErrTryAgain
|
return nil, ErrTryAgain
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := mp.verifyMsgBeforeAdd(msg, curTs.Height()); err != nil {
|
publish, err := mp.verifyMsgBeforeAdd(msg, curTs, true)
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -654,24 +873,28 @@ func (mp *MessagePool) PushWithNonce(ctx context.Context, addr address.Address,
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := mp.addLocked(msg, true); err != nil {
|
if err := mp.addLocked(msg, false); err != nil {
|
||||||
return nil, xerrors.Errorf("add locked failed: %w", err)
|
return nil, xerrors.Errorf("add locked failed: %w", err)
|
||||||
}
|
}
|
||||||
if err := mp.addLocal(msg, msgb); err != nil {
|
if err := mp.addLocal(msg, msgb); err != nil {
|
||||||
log.Errorf("addLocal failed: %+v", err)
|
log.Errorf("addLocal failed: %+v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return msg, mp.api.PubSubPublish(build.MessagesTopic(mp.netName), msgb)
|
if publish {
|
||||||
|
err = mp.api.PubSubPublish(build.MessagesTopic(mp.netName), msgb)
|
||||||
|
}
|
||||||
|
|
||||||
|
return msg, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mp *MessagePool) Remove(from address.Address, nonce uint64) {
|
func (mp *MessagePool) Remove(from address.Address, nonce uint64, applied bool) {
|
||||||
mp.lk.Lock()
|
mp.lk.Lock()
|
||||||
defer mp.lk.Unlock()
|
defer mp.lk.Unlock()
|
||||||
|
|
||||||
mp.remove(from, nonce)
|
mp.remove(from, nonce, applied)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mp *MessagePool) remove(from address.Address, nonce uint64) {
|
func (mp *MessagePool) remove(from address.Address, nonce uint64, applied bool) {
|
||||||
mset, ok := mp.pending[from]
|
mset, ok := mp.pending[from]
|
||||||
if !ok {
|
if !ok {
|
||||||
return
|
return
|
||||||
@ -683,27 +906,21 @@ func (mp *MessagePool) remove(from address.Address, nonce uint64) {
|
|||||||
Message: m,
|
Message: m,
|
||||||
}, localUpdates)
|
}, localUpdates)
|
||||||
|
|
||||||
|
journal.J.RecordEvent(mp.evtTypes[evtTypeMpoolRemove], func() interface{} {
|
||||||
|
return MessagePoolEvt{
|
||||||
|
Action: "remove",
|
||||||
|
Messages: []MessagePoolEvtMessage{{Message: m.Message, CID: m.Cid()}}}
|
||||||
|
})
|
||||||
|
|
||||||
mp.currentSize--
|
mp.currentSize--
|
||||||
}
|
}
|
||||||
|
|
||||||
// NB: This deletes any message with the given nonce. This makes sense
|
// NB: This deletes any message with the given nonce. This makes sense
|
||||||
// as two messages with the same sender cannot have the same nonce
|
// as two messages with the same sender cannot have the same nonce
|
||||||
mset.rm(nonce)
|
mset.rm(nonce, applied)
|
||||||
|
|
||||||
if len(mset.msgs) == 0 {
|
if len(mset.msgs) == 0 {
|
||||||
delete(mp.pending, from)
|
delete(mp.pending, from)
|
||||||
} else {
|
|
||||||
var max uint64
|
|
||||||
for nonce := range mset.msgs {
|
|
||||||
if max < nonce {
|
|
||||||
max = nonce
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if max < nonce {
|
|
||||||
max = nonce // we could have not seen the removed message before
|
|
||||||
}
|
|
||||||
|
|
||||||
mset.nextNonce = max + 1
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -771,7 +988,7 @@ func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet)
|
|||||||
rm := func(from address.Address, nonce uint64) {
|
rm := func(from address.Address, nonce uint64) {
|
||||||
s, ok := rmsgs[from]
|
s, ok := rmsgs[from]
|
||||||
if !ok {
|
if !ok {
|
||||||
mp.Remove(from, nonce)
|
mp.Remove(from, nonce, true)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -780,7 +997,7 @@ func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet)
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
mp.Remove(from, nonce)
|
mp.Remove(from, nonce, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
maybeRepub := func(cid cid.Cid) {
|
maybeRepub := func(cid cid.Cid) {
|
||||||
@ -1082,7 +1299,7 @@ func (mp *MessagePool) loadLocal() error {
|
|||||||
return xerrors.Errorf("unmarshaling local message: %w", err)
|
return xerrors.Errorf("unmarshaling local message: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := mp.Add(&sm); err != nil {
|
if err := mp.addLoaded(&sm); err != nil {
|
||||||
if xerrors.Is(err, ErrNonceTooLow) {
|
if xerrors.Is(err, ErrNonceTooLow) {
|
||||||
continue // todo: drop the message from local cache (if above certain confidence threshold)
|
continue // todo: drop the message from local cache (if above certain confidence threshold)
|
||||||
}
|
}
|
||||||
@ -1132,3 +1349,12 @@ func (mp *MessagePool) Clear(local bool) {
|
|||||||
delete(mp.pending, a)
|
delete(mp.pending, a)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getBaseFeeLowerBound(baseFee, factor types.BigInt) types.BigInt {
|
||||||
|
baseFeeLowerBound := types.BigDiv(baseFee, factor)
|
||||||
|
if baseFeeLowerBound.LessThan(minimumBaseFee) {
|
||||||
|
baseFeeLowerBound = minimumBaseFee
|
||||||
|
}
|
||||||
|
|
||||||
|
return baseFeeLowerBound
|
||||||
|
}
|
||||||
|
@ -7,6 +7,8 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
|
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/chain/types/mock"
|
"github.com/filecoin-project/lotus/chain/types/mock"
|
||||||
@ -14,7 +16,6 @@ import (
|
|||||||
_ "github.com/filecoin-project/lotus/lib/sigs/bls"
|
_ "github.com/filecoin-project/lotus/lib/sigs/bls"
|
||||||
_ "github.com/filecoin-project/lotus/lib/sigs/secp"
|
_ "github.com/filecoin-project/lotus/lib/sigs/secp"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
"github.com/filecoin-project/specs-actors/actors/crypto"
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
"github.com/ipfs/go-datastore"
|
"github.com/ipfs/go-datastore"
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
@ -34,6 +35,8 @@ type testMpoolAPI struct {
|
|||||||
tipsets []*types.TipSet
|
tipsets []*types.TipSet
|
||||||
|
|
||||||
published int
|
published int
|
||||||
|
|
||||||
|
baseFee types.BigInt
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTestMpoolAPI() *testMpoolAPI {
|
func newTestMpoolAPI() *testMpoolAPI {
|
||||||
@ -41,6 +44,7 @@ func newTestMpoolAPI() *testMpoolAPI {
|
|||||||
bmsgs: make(map[cid.Cid][]*types.SignedMessage),
|
bmsgs: make(map[cid.Cid][]*types.SignedMessage),
|
||||||
statenonce: make(map[address.Address]uint64),
|
statenonce: make(map[address.Address]uint64),
|
||||||
balance: make(map[address.Address]types.BigInt),
|
balance: make(map[address.Address]types.BigInt),
|
||||||
|
baseFee: types.NewInt(100),
|
||||||
}
|
}
|
||||||
genesis := mock.MkBlock(nil, 1, 1)
|
genesis := mock.MkBlock(nil, 1, 1)
|
||||||
tma.tipsets = append(tma.tipsets, mock.TipSet(genesis))
|
tma.tipsets = append(tma.tipsets, mock.TipSet(genesis))
|
||||||
@ -53,6 +57,13 @@ func (tma *testMpoolAPI) nextBlock() *types.BlockHeader {
|
|||||||
return newBlk
|
return newBlk
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (tma *testMpoolAPI) nextBlockWithHeight(height uint64) *types.BlockHeader {
|
||||||
|
newBlk := mock.MkBlock(tma.tipsets[len(tma.tipsets)-1], 1, 1)
|
||||||
|
newBlk.Height = abi.ChainEpoch(height)
|
||||||
|
tma.tipsets = append(tma.tipsets, mock.TipSet(newBlk))
|
||||||
|
return newBlk
|
||||||
|
}
|
||||||
|
|
||||||
func (tma *testMpoolAPI) applyBlock(t *testing.T, b *types.BlockHeader) {
|
func (tma *testMpoolAPI) applyBlock(t *testing.T, b *types.BlockHeader) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
if err := tma.cb(nil, []*types.TipSet{mock.TipSet(b)}); err != nil {
|
if err := tma.cb(nil, []*types.TipSet{mock.TipSet(b)}); err != nil {
|
||||||
@ -182,7 +193,7 @@ func (tma *testMpoolAPI) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (tma *testMpoolAPI) ChainComputeBaseFee(ctx context.Context, ts *types.TipSet) (types.BigInt, error) {
|
func (tma *testMpoolAPI) ChainComputeBaseFee(ctx context.Context, ts *types.TipSet) (types.BigInt, error) {
|
||||||
return types.NewInt(100), nil
|
return tma.baseFee, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertNonce(t *testing.T, mp *MessagePool, addr address.Address, val uint64) {
|
func assertNonce(t *testing.T, mp *MessagePool, addr address.Address, val uint64) {
|
||||||
@ -352,6 +363,12 @@ func TestRevertMessages(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPruningSimple(t *testing.T) {
|
func TestPruningSimple(t *testing.T) {
|
||||||
|
oldMaxNonceGap := MaxNonceGap
|
||||||
|
MaxNonceGap = 1000
|
||||||
|
defer func() {
|
||||||
|
MaxNonceGap = oldMaxNonceGap
|
||||||
|
}()
|
||||||
|
|
||||||
tma := newTestMpoolAPI()
|
tma := newTestMpoolAPI()
|
||||||
|
|
||||||
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||||
|
@ -46,13 +46,21 @@ func (mp *MessagePool) pruneMessages(ctx context.Context, ts *types.TipSet) erro
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("computing basefee: %w", err)
|
return xerrors.Errorf("computing basefee: %w", err)
|
||||||
}
|
}
|
||||||
|
baseFeeLowerBound := getBaseFeeLowerBound(baseFee, baseFeeLowerBoundFactor)
|
||||||
|
|
||||||
pending, _ := mp.getPendingMessages(ts, ts)
|
pending, _ := mp.getPendingMessages(ts, ts)
|
||||||
|
|
||||||
// priority actors -- not pruned
|
// protected actors -- not pruned
|
||||||
priority := make(map[address.Address]struct{})
|
protected := make(map[address.Address]struct{})
|
||||||
|
|
||||||
|
// we never prune priority addresses
|
||||||
for _, actor := range mp.cfg.PriorityAddrs {
|
for _, actor := range mp.cfg.PriorityAddrs {
|
||||||
priority[actor] = struct{}{}
|
protected[actor] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// we also never prune locally published messages
|
||||||
|
for actor := range mp.localAddrs {
|
||||||
|
protected[actor] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Collect all messages to track which ones to remove and create chains for block inclusion
|
// Collect all messages to track which ones to remove and create chains for block inclusion
|
||||||
@ -61,18 +69,18 @@ func (mp *MessagePool) pruneMessages(ctx context.Context, ts *types.TipSet) erro
|
|||||||
|
|
||||||
var chains []*msgChain
|
var chains []*msgChain
|
||||||
for actor, mset := range pending {
|
for actor, mset := range pending {
|
||||||
// we never prune priority actors
|
// we never prune protected actors
|
||||||
_, keep := priority[actor]
|
_, keep := protected[actor]
|
||||||
if keep {
|
if keep {
|
||||||
keepCount += len(mset)
|
keepCount += len(mset)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// not a priority actor, track the messages and create chains
|
// not a protected actor, track the messages and create chains
|
||||||
for _, m := range mset {
|
for _, m := range mset {
|
||||||
pruneMsgs[m.Message.Cid()] = m
|
pruneMsgs[m.Message.Cid()] = m
|
||||||
}
|
}
|
||||||
actorChains := mp.createMessageChains(actor, mset, baseFee, ts)
|
actorChains := mp.createMessageChains(actor, mset, baseFeeLowerBound, ts)
|
||||||
chains = append(chains, actorChains...)
|
chains = append(chains, actorChains...)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -98,7 +106,7 @@ keepLoop:
|
|||||||
// and remove all messages that are still in pruneMsgs after processing the chains
|
// and remove all messages that are still in pruneMsgs after processing the chains
|
||||||
log.Infof("Pruning %d messages", len(pruneMsgs))
|
log.Infof("Pruning %d messages", len(pruneMsgs))
|
||||||
for _, m := range pruneMsgs {
|
for _, m := range pruneMsgs {
|
||||||
mp.remove(m.Message.From, m.Message.Nonce)
|
mp.remove(m.Message.From, m.Message.Nonce, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -3,6 +3,7 @@ package messagepool
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"sort"
|
"sort"
|
||||||
|
"time"
|
||||||
|
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
@ -10,11 +11,14 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
|
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/journal"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
)
|
)
|
||||||
|
|
||||||
const repubMsgLimit = 30
|
const repubMsgLimit = 30
|
||||||
|
|
||||||
|
var RepublishBatchDelay = 100 * time.Millisecond
|
||||||
|
|
||||||
func (mp *MessagePool) republishPendingMessages() error {
|
func (mp *MessagePool) republishPendingMessages() error {
|
||||||
mp.curTsLk.Lock()
|
mp.curTsLk.Lock()
|
||||||
ts := mp.curTs
|
ts := mp.curTs
|
||||||
@ -24,6 +28,7 @@ func (mp *MessagePool) republishPendingMessages() error {
|
|||||||
mp.curTsLk.Unlock()
|
mp.curTsLk.Unlock()
|
||||||
return xerrors.Errorf("computing basefee: %w", err)
|
return xerrors.Errorf("computing basefee: %w", err)
|
||||||
}
|
}
|
||||||
|
baseFeeLowerBound := getBaseFeeLowerBound(baseFee, baseFeeLowerBoundFactor)
|
||||||
|
|
||||||
pending := make(map[address.Address]map[uint64]*types.SignedMessage)
|
pending := make(map[address.Address]map[uint64]*types.SignedMessage)
|
||||||
mp.lk.Lock()
|
mp.lk.Lock()
|
||||||
@ -52,7 +57,11 @@ func (mp *MessagePool) republishPendingMessages() error {
|
|||||||
|
|
||||||
var chains []*msgChain
|
var chains []*msgChain
|
||||||
for actor, mset := range pending {
|
for actor, mset := range pending {
|
||||||
next := mp.createMessageChains(actor, mset, baseFee, ts)
|
// We use the baseFee lower bound for createChange so that we optimistically include
|
||||||
|
// chains that might become profitable in the next 20 blocks.
|
||||||
|
// We still check the lowerBound condition for individual messages so that we don't send
|
||||||
|
// messages that will be rejected by the mpool spam protector, so this is safe to do.
|
||||||
|
next := mp.createMessageChains(actor, mset, baseFeeLowerBound, ts)
|
||||||
chains = append(chains, next...)
|
chains = append(chains, next...)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -64,15 +73,10 @@ func (mp *MessagePool) republishPendingMessages() error {
|
|||||||
return chains[i].Before(chains[j])
|
return chains[i].Before(chains[j])
|
||||||
})
|
})
|
||||||
|
|
||||||
// we don't republish negative performing chains; this is an error that will be screamed
|
|
||||||
// at the user
|
|
||||||
if chains[0].gasPerf < 0 {
|
|
||||||
return xerrors.Errorf("skipping republish: all message chains have negative gas performance; best gas performance: %f", chains[0].gasPerf)
|
|
||||||
}
|
|
||||||
|
|
||||||
gasLimit := int64(build.BlockGasLimit)
|
gasLimit := int64(build.BlockGasLimit)
|
||||||
minGas := int64(gasguess.MinGas)
|
minGas := int64(gasguess.MinGas)
|
||||||
var msgs []*types.SignedMessage
|
var msgs []*types.SignedMessage
|
||||||
|
loop:
|
||||||
for i := 0; i < len(chains); {
|
for i := 0; i < len(chains); {
|
||||||
chain := chains[i]
|
chain := chains[i]
|
||||||
|
|
||||||
@ -86,12 +90,6 @@ func (mp *MessagePool) republishPendingMessages() error {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
// we don't republish negative performing chains, as they won't be included in
|
|
||||||
// a block anyway
|
|
||||||
if chain.gasPerf < 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// has the chain been invalidated?
|
// has the chain been invalidated?
|
||||||
if !chain.valid {
|
if !chain.valid {
|
||||||
i++
|
i++
|
||||||
@ -100,15 +98,25 @@ func (mp *MessagePool) republishPendingMessages() error {
|
|||||||
|
|
||||||
// does it fit in a block?
|
// does it fit in a block?
|
||||||
if chain.gasLimit <= gasLimit {
|
if chain.gasLimit <= gasLimit {
|
||||||
gasLimit -= chain.gasLimit
|
// check the baseFee lower bound -- only republish messages that can be included in the chain
|
||||||
msgs = append(msgs, chain.msgs...)
|
// within the next 20 blocks.
|
||||||
|
for _, m := range chain.msgs {
|
||||||
|
if !allowNegativeChains(ts.Height()) && m.Message.GasFeeCap.LessThan(baseFeeLowerBound) {
|
||||||
|
chain.Invalidate()
|
||||||
|
continue loop
|
||||||
|
}
|
||||||
|
gasLimit -= m.Message.GasLimit
|
||||||
|
msgs = append(msgs, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// we processed the whole chain, advance
|
||||||
i++
|
i++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// we can't fit the current chain but there is gas to spare
|
// we can't fit the current chain but there is gas to spare
|
||||||
// trim it and push it down
|
// trim it and push it down
|
||||||
chain.Trim(gasLimit, mp, baseFee, ts)
|
chain.Trim(gasLimit, mp, baseFee, true)
|
||||||
for j := i; j < len(chains)-1; j++ {
|
for j := i; j < len(chains)-1; j++ {
|
||||||
if chains[j].Before(chains[j+1]) {
|
if chains[j].Before(chains[j+1]) {
|
||||||
break
|
break
|
||||||
@ -131,6 +139,25 @@ func (mp *MessagePool) republishPendingMessages() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
count++
|
count++
|
||||||
|
|
||||||
|
if count < len(msgs) {
|
||||||
|
// this delay is here to encourage the pubsub subsystem to process the messages serially
|
||||||
|
// and avoid creating nonce gaps because of concurrent validation.
|
||||||
|
time.Sleep(RepublishBatchDelay)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(msgs) > 0 {
|
||||||
|
journal.J.RecordEvent(mp.evtTypes[evtTypeMpoolRepub], func() interface{} {
|
||||||
|
msgs := make([]MessagePoolEvtMessage, 0, len(msgs))
|
||||||
|
for _, m := range msgs {
|
||||||
|
msgs = append(msgs, MessagePoolEvtMessage{Message: m.Message, CID: m.Cid()})
|
||||||
|
}
|
||||||
|
return MessagePoolEvt{
|
||||||
|
Action: "repub",
|
||||||
|
Messages: msgs,
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// track most recently republished messages
|
// track most recently republished messages
|
||||||
|
@ -4,14 +4,20 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
|
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
|
||||||
"github.com/filecoin-project/lotus/chain/wallet"
|
"github.com/filecoin-project/lotus/chain/wallet"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
"github.com/filecoin-project/specs-actors/actors/crypto"
|
|
||||||
"github.com/ipfs/go-datastore"
|
"github.com/ipfs/go-datastore"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestRepubMessages(t *testing.T) {
|
func TestRepubMessages(t *testing.T) {
|
||||||
|
oldRepublishBatchDelay := RepublishBatchDelay
|
||||||
|
RepublishBatchDelay = time.Microsecond
|
||||||
|
defer func() {
|
||||||
|
RepublishBatchDelay = oldRepublishBatchDelay
|
||||||
|
}()
|
||||||
|
|
||||||
tma := newTestMpoolAPI()
|
tma := newTestMpoolAPI()
|
||||||
ds := datastore.NewMapDatastore()
|
ds := datastore.NewMapDatastore()
|
||||||
|
|
||||||
|
@ -3,21 +3,29 @@ package messagepool
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
"math/rand"
|
||||||
"sort"
|
"sort"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
tbig "github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
|
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/chain/vm"
|
"github.com/filecoin-project/lotus/chain/vm"
|
||||||
abig "github.com/filecoin-project/specs-actors/actors/abi/big"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var bigBlockGasLimit = big.NewInt(build.BlockGasLimit)
|
var bigBlockGasLimit = big.NewInt(build.BlockGasLimit)
|
||||||
|
|
||||||
|
// this is *temporary* mutilation until we have implemented uncapped miner penalties -- it will go
|
||||||
|
// away in the next fork.
|
||||||
|
func allowNegativeChains(epoch abi.ChainEpoch) bool {
|
||||||
|
return epoch < build.UpgradeBreezeHeight+5
|
||||||
|
}
|
||||||
|
|
||||||
const MaxBlocks = 15
|
const MaxBlocks = 15
|
||||||
|
|
||||||
type msgChain struct {
|
type msgChain struct {
|
||||||
@ -100,9 +108,9 @@ func (mp *MessagePool) selectMessagesOptimal(curTs, ts *types.TipSet, tq float64
|
|||||||
return chains[i].Before(chains[j])
|
return chains[i].Before(chains[j])
|
||||||
})
|
})
|
||||||
|
|
||||||
if len(chains) != 0 && chains[0].gasPerf < 0 {
|
if !allowNegativeChains(curTs.Height()) && len(chains) != 0 && chains[0].gasPerf < 0 {
|
||||||
log.Warnw("all messages in mpool have non-positive gas performance", "bestGasPerf", chains[0].gasPerf)
|
log.Warnw("all messages in mpool have non-positive gas performance", "bestGasPerf", chains[0].gasPerf)
|
||||||
return nil, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// 3. Parition chains into blocks (without trimming)
|
// 3. Parition chains into blocks (without trimming)
|
||||||
@ -153,7 +161,7 @@ func (mp *MessagePool) selectMessagesOptimal(curTs, ts *types.TipSet, tq float64
|
|||||||
last := len(chains)
|
last := len(chains)
|
||||||
for i, chain := range chains {
|
for i, chain := range chains {
|
||||||
// did we run out of performing chains?
|
// did we run out of performing chains?
|
||||||
if chain.gasPerf < 0 {
|
if !allowNegativeChains(curTs.Height()) && chain.gasPerf < 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -191,9 +199,11 @@ func (mp *MessagePool) selectMessagesOptimal(curTs, ts *types.TipSet, tq float64
|
|||||||
gasLimit -= chainGasLimit
|
gasLimit -= chainGasLimit
|
||||||
|
|
||||||
// resort to account for already merged chains and effective performance adjustments
|
// resort to account for already merged chains and effective performance adjustments
|
||||||
sort.Slice(chains[i+1:], func(i, j int) bool {
|
// the sort *must* be stable or we end up getting negative gasPerfs pushed up.
|
||||||
|
sort.SliceStable(chains[i+1:], func(i, j int) bool {
|
||||||
return chains[i].BeforeEffective(chains[j])
|
return chains[i].BeforeEffective(chains[j])
|
||||||
})
|
})
|
||||||
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -217,7 +227,7 @@ tailLoop:
|
|||||||
for gasLimit >= minGas && last < len(chains) {
|
for gasLimit >= minGas && last < len(chains) {
|
||||||
// trim if necessary
|
// trim if necessary
|
||||||
if chains[last].gasLimit > gasLimit {
|
if chains[last].gasLimit > gasLimit {
|
||||||
chains[last].Trim(gasLimit, mp, baseFee, ts)
|
chains[last].Trim(gasLimit, mp, baseFee, allowNegativeChains(curTs.Height()))
|
||||||
}
|
}
|
||||||
|
|
||||||
// push down if it hasn't been invalidated
|
// push down if it hasn't been invalidated
|
||||||
@ -243,7 +253,7 @@ tailLoop:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// if gasPerf < 0 we have no more profitable chains
|
// if gasPerf < 0 we have no more profitable chains
|
||||||
if chain.gasPerf < 0 {
|
if !allowNegativeChains(curTs.Height()) && chain.gasPerf < 0 {
|
||||||
break tailLoop
|
break tailLoop
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -284,7 +294,7 @@ tailLoop:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// dependencies fit, just trim it
|
// dependencies fit, just trim it
|
||||||
chain.Trim(gasLimit-depGasLimit, mp, baseFee, ts)
|
chain.Trim(gasLimit-depGasLimit, mp, baseFee, allowNegativeChains(curTs.Height()))
|
||||||
last += i
|
last += i
|
||||||
continue tailLoop
|
continue tailLoop
|
||||||
}
|
}
|
||||||
@ -297,6 +307,79 @@ tailLoop:
|
|||||||
log.Infow("pack tail chains done", "took", dt)
|
log.Infow("pack tail chains done", "took", dt)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// if we have gasLimit to spare, pick some random (non-negative) chains to fill the block
|
||||||
|
// we pick randomly so that we minimize the probability of duplication among all miners
|
||||||
|
if gasLimit >= minGas {
|
||||||
|
randomCount := 0
|
||||||
|
|
||||||
|
startRandom := time.Now()
|
||||||
|
shuffleChains(chains)
|
||||||
|
|
||||||
|
for _, chain := range chains {
|
||||||
|
// have we filled the block
|
||||||
|
if gasLimit < minGas {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// has it been merged or invalidated?
|
||||||
|
if chain.merged || !chain.valid {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// is it negative?
|
||||||
|
if !allowNegativeChains(curTs.Height()) && chain.gasPerf < 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// compute the dependencies that must be merged and the gas limit including deps
|
||||||
|
chainGasLimit := chain.gasLimit
|
||||||
|
depGasLimit := int64(0)
|
||||||
|
var chainDeps []*msgChain
|
||||||
|
for curChain := chain.prev; curChain != nil && !curChain.merged; curChain = curChain.prev {
|
||||||
|
chainDeps = append(chainDeps, curChain)
|
||||||
|
chainGasLimit += curChain.gasLimit
|
||||||
|
depGasLimit += curChain.gasLimit
|
||||||
|
}
|
||||||
|
|
||||||
|
// do the deps fit? if the deps won't fit, invalidate the chain
|
||||||
|
if depGasLimit > gasLimit {
|
||||||
|
chain.Invalidate()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// do they fit as is? if it doesn't, trim to make it fit if possible
|
||||||
|
if chainGasLimit > gasLimit {
|
||||||
|
chain.Trim(gasLimit-depGasLimit, mp, baseFee, allowNegativeChains(curTs.Height()))
|
||||||
|
|
||||||
|
if !chain.valid {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// include it together with all dependencies
|
||||||
|
for i := len(chainDeps) - 1; i >= 0; i-- {
|
||||||
|
curChain := chainDeps[i]
|
||||||
|
curChain.merged = true
|
||||||
|
result = append(result, curChain.msgs...)
|
||||||
|
randomCount += len(curChain.msgs)
|
||||||
|
}
|
||||||
|
|
||||||
|
chain.merged = true
|
||||||
|
result = append(result, chain.msgs...)
|
||||||
|
randomCount += len(chain.msgs)
|
||||||
|
gasLimit -= chainGasLimit
|
||||||
|
}
|
||||||
|
|
||||||
|
if dt := time.Since(startRandom); dt > time.Millisecond {
|
||||||
|
log.Infow("pack random tail chains done", "took", dt)
|
||||||
|
}
|
||||||
|
|
||||||
|
if randomCount > 0 {
|
||||||
|
log.Warnf("optimal selection failed to pack a block; picked %d messages with random selection",
|
||||||
|
randomCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -349,9 +432,9 @@ func (mp *MessagePool) selectMessagesGreedy(curTs, ts *types.TipSet) ([]*types.S
|
|||||||
return chains[i].Before(chains[j])
|
return chains[i].Before(chains[j])
|
||||||
})
|
})
|
||||||
|
|
||||||
if len(chains) != 0 && chains[0].gasPerf < 0 {
|
if !allowNegativeChains(curTs.Height()) && len(chains) != 0 && chains[0].gasPerf < 0 {
|
||||||
log.Warnw("all messages in mpool have non-positive gas performance", "bestGasPerf", chains[0].gasPerf)
|
log.Warnw("all messages in mpool have non-positive gas performance", "bestGasPerf", chains[0].gasPerf)
|
||||||
return nil, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// 3. Merge the head chains to produce the list of messages selected for inclusion, subject to
|
// 3. Merge the head chains to produce the list of messages selected for inclusion, subject to
|
||||||
@ -360,7 +443,7 @@ func (mp *MessagePool) selectMessagesGreedy(curTs, ts *types.TipSet) ([]*types.S
|
|||||||
last := len(chains)
|
last := len(chains)
|
||||||
for i, chain := range chains {
|
for i, chain := range chains {
|
||||||
// did we run out of performing chains?
|
// did we run out of performing chains?
|
||||||
if chain.gasPerf < 0 {
|
if !allowNegativeChains(curTs.Height()) && chain.gasPerf < 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -389,7 +472,7 @@ func (mp *MessagePool) selectMessagesGreedy(curTs, ts *types.TipSet) ([]*types.S
|
|||||||
tailLoop:
|
tailLoop:
|
||||||
for gasLimit >= minGas && last < len(chains) {
|
for gasLimit >= minGas && last < len(chains) {
|
||||||
// trim
|
// trim
|
||||||
chains[last].Trim(gasLimit, mp, baseFee, ts)
|
chains[last].Trim(gasLimit, mp, baseFee, allowNegativeChains(curTs.Height()))
|
||||||
|
|
||||||
// push down if it hasn't been invalidated
|
// push down if it hasn't been invalidated
|
||||||
if chains[last].valid {
|
if chains[last].valid {
|
||||||
@ -409,7 +492,7 @@ tailLoop:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// if gasPerf < 0 we have no more profitable chains
|
// if gasPerf < 0 we have no more profitable chains
|
||||||
if chain.gasPerf < 0 {
|
if !allowNegativeChains(curTs.Height()) && chain.gasPerf < 0 {
|
||||||
break tailLoop
|
break tailLoop
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -471,7 +554,7 @@ func (mp *MessagePool) selectPriorityMessages(pending map[address.Address]map[ui
|
|||||||
return chains[i].Before(chains[j])
|
return chains[i].Before(chains[j])
|
||||||
})
|
})
|
||||||
|
|
||||||
if len(chains) != 0 && chains[0].gasPerf < 0 {
|
if !allowNegativeChains(ts.Height()) && len(chains) != 0 && chains[0].gasPerf < 0 {
|
||||||
log.Warnw("all priority messages in mpool have negative gas performance", "bestGasPerf", chains[0].gasPerf)
|
log.Warnw("all priority messages in mpool have negative gas performance", "bestGasPerf", chains[0].gasPerf)
|
||||||
return nil, gasLimit
|
return nil, gasLimit
|
||||||
}
|
}
|
||||||
@ -479,7 +562,7 @@ func (mp *MessagePool) selectPriorityMessages(pending map[address.Address]map[ui
|
|||||||
// 3. Merge chains until the block limit, as long as they have non-negative gas performance
|
// 3. Merge chains until the block limit, as long as they have non-negative gas performance
|
||||||
last := len(chains)
|
last := len(chains)
|
||||||
for i, chain := range chains {
|
for i, chain := range chains {
|
||||||
if chain.gasPerf < 0 {
|
if !allowNegativeChains(ts.Height()) && chain.gasPerf < 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -497,7 +580,7 @@ func (mp *MessagePool) selectPriorityMessages(pending map[address.Address]map[ui
|
|||||||
tailLoop:
|
tailLoop:
|
||||||
for gasLimit >= minGas && last < len(chains) {
|
for gasLimit >= minGas && last < len(chains) {
|
||||||
// trim, discarding negative performing messages
|
// trim, discarding negative performing messages
|
||||||
chains[last].Trim(gasLimit, mp, baseFee, ts)
|
chains[last].Trim(gasLimit, mp, baseFee, allowNegativeChains(ts.Height()))
|
||||||
|
|
||||||
// push down if it hasn't been invalidated
|
// push down if it hasn't been invalidated
|
||||||
if chains[last].valid {
|
if chains[last].valid {
|
||||||
@ -517,7 +600,7 @@ tailLoop:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// if gasPerf < 0 we have no more profitable chains
|
// if gasPerf < 0 we have no more profitable chains
|
||||||
if chain.gasPerf < 0 {
|
if !allowNegativeChains(ts.Height()) && chain.gasPerf < 0 {
|
||||||
break tailLoop
|
break tailLoop
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -585,16 +668,18 @@ func (mp *MessagePool) getPendingMessages(curTs, ts *types.TipSet) (map[address.
|
|||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mp *MessagePool) getGasReward(msg *types.SignedMessage, baseFee types.BigInt, ts *types.TipSet) *big.Int {
|
func (*MessagePool) getGasReward(msg *types.SignedMessage, baseFee types.BigInt) *big.Int {
|
||||||
maxPremium := types.BigSub(msg.Message.GasFeeCap, baseFee)
|
maxPremium := types.BigSub(msg.Message.GasFeeCap, baseFee)
|
||||||
if types.BigCmp(maxPremium, msg.Message.GasPremium) < 0 {
|
|
||||||
|
if types.BigCmp(maxPremium, msg.Message.GasPremium) > 0 {
|
||||||
maxPremium = msg.Message.GasPremium
|
maxPremium = msg.Message.GasPremium
|
||||||
}
|
}
|
||||||
gasReward := abig.Mul(maxPremium, types.NewInt(uint64(msg.Message.GasLimit)))
|
|
||||||
|
gasReward := tbig.Mul(maxPremium, types.NewInt(uint64(msg.Message.GasLimit)))
|
||||||
return gasReward.Int
|
return gasReward.Int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mp *MessagePool) getGasPerf(gasReward *big.Int, gasLimit int64) float64 {
|
func (*MessagePool) getGasPerf(gasReward *big.Int, gasLimit int64) float64 {
|
||||||
// gasPerf = gasReward * build.BlockGasLimit / gasLimit
|
// gasPerf = gasReward * build.BlockGasLimit / gasLimit
|
||||||
a := new(big.Rat).SetInt(new(big.Int).Mul(gasReward, bigBlockGasLimit))
|
a := new(big.Rat).SetInt(new(big.Int).Mul(gasReward, bigBlockGasLimit))
|
||||||
b := big.NewRat(1, gasLimit)
|
b := big.NewRat(1, gasLimit)
|
||||||
@ -672,7 +757,7 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6
|
|||||||
balance = new(big.Int).Sub(balance, value)
|
balance = new(big.Int).Sub(balance, value)
|
||||||
}
|
}
|
||||||
|
|
||||||
gasReward := mp.getGasReward(m, baseFee, ts)
|
gasReward := mp.getGasReward(m, baseFee)
|
||||||
rewards = append(rewards, gasReward)
|
rewards = append(rewards, gasReward)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -773,10 +858,10 @@ func (mc *msgChain) Before(other *msgChain) bool {
|
|||||||
(mc.gasPerf == other.gasPerf && mc.gasReward.Cmp(other.gasReward) > 0)
|
(mc.gasPerf == other.gasPerf && mc.gasReward.Cmp(other.gasReward) > 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mc *msgChain) Trim(gasLimit int64, mp *MessagePool, baseFee types.BigInt, ts *types.TipSet) {
|
func (mc *msgChain) Trim(gasLimit int64, mp *MessagePool, baseFee types.BigInt, allowNegative bool) {
|
||||||
i := len(mc.msgs) - 1
|
i := len(mc.msgs) - 1
|
||||||
for i >= 0 && (mc.gasLimit > gasLimit || mc.gasPerf < 0) {
|
for i >= 0 && (mc.gasLimit > gasLimit || (!allowNegative && mc.gasPerf < 0)) {
|
||||||
gasReward := mp.getGasReward(mc.msgs[i], baseFee, ts)
|
gasReward := mp.getGasReward(mc.msgs[i], baseFee)
|
||||||
mc.gasReward = new(big.Int).Sub(mc.gasReward, gasReward)
|
mc.gasReward = new(big.Int).Sub(mc.gasReward, gasReward)
|
||||||
mc.gasLimit -= mc.msgs[i].Message.GasLimit
|
mc.gasLimit -= mc.msgs[i].Message.GasLimit
|
||||||
if mc.gasLimit > 0 {
|
if mc.gasLimit > 0 {
|
||||||
@ -839,7 +924,16 @@ func (mc *msgChain) SetNullEffectivePerf() {
|
|||||||
|
|
||||||
func (mc *msgChain) BeforeEffective(other *msgChain) bool {
|
func (mc *msgChain) BeforeEffective(other *msgChain) bool {
|
||||||
// move merged chains to the front so we can discard them earlier
|
// move merged chains to the front so we can discard them earlier
|
||||||
return (mc.merged && !other.merged) || mc.effPerf > other.effPerf ||
|
return (mc.merged && !other.merged) ||
|
||||||
|
(mc.gasPerf >= 0 && other.gasPerf < 0) ||
|
||||||
|
mc.effPerf > other.effPerf ||
|
||||||
(mc.effPerf == other.effPerf && mc.gasPerf > other.gasPerf) ||
|
(mc.effPerf == other.effPerf && mc.gasPerf > other.gasPerf) ||
|
||||||
(mc.effPerf == other.effPerf && mc.gasPerf == other.gasPerf && mc.gasReward.Cmp(other.gasReward) > 0)
|
(mc.effPerf == other.effPerf && mc.gasPerf == other.gasPerf && mc.gasReward.Cmp(other.gasReward) > 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func shuffleChains(lst []*msgChain) {
|
||||||
|
for i := range lst {
|
||||||
|
j := rand.Intn(i + 1)
|
||||||
|
lst[i], lst[j] = lst[j], lst[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -1,20 +1,26 @@
|
|||||||
package messagepool
|
package messagepool
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"compress/gzip"
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
"math"
|
"math"
|
||||||
"math/big"
|
"math/big"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
|
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/chain/types/mock"
|
"github.com/filecoin-project/lotus/chain/types/mock"
|
||||||
"github.com/filecoin-project/lotus/chain/wallet"
|
"github.com/filecoin-project/lotus/chain/wallet"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
"github.com/filecoin-project/specs-actors/actors/crypto"
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
"github.com/ipfs/go-datastore"
|
"github.com/ipfs/go-datastore"
|
||||||
|
|
||||||
@ -369,6 +375,12 @@ func TestMessageChainSkipping(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestBasicMessageSelection(t *testing.T) {
|
func TestBasicMessageSelection(t *testing.T) {
|
||||||
|
oldMaxNonceGap := MaxNonceGap
|
||||||
|
MaxNonceGap = 1000
|
||||||
|
defer func() {
|
||||||
|
MaxNonceGap = oldMaxNonceGap
|
||||||
|
}()
|
||||||
|
|
||||||
mp, tma := makeTestMpool()
|
mp, tma := makeTestMpool()
|
||||||
|
|
||||||
// the actors
|
// the actors
|
||||||
@ -721,6 +733,102 @@ func TestPriorityMessageSelection2(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPriorityMessageSelection3(t *testing.T) {
|
||||||
|
t.Skip("reenable after removing allow negative")
|
||||||
|
|
||||||
|
mp, tma := makeTestMpool()
|
||||||
|
|
||||||
|
// the actors
|
||||||
|
w1, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
a1, err := w1.GenerateKey(crypto.SigTypeSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
w2, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
a2, err := w2.GenerateKey(crypto.SigTypeSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
block := tma.nextBlock()
|
||||||
|
ts := mock.TipSet(block)
|
||||||
|
tma.applyBlock(t, block)
|
||||||
|
|
||||||
|
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}]
|
||||||
|
|
||||||
|
tma.setBalance(a1, 1) // in FIL
|
||||||
|
tma.setBalance(a2, 1) // in FIL
|
||||||
|
|
||||||
|
mp.cfg.PriorityAddrs = []address.Address{a1}
|
||||||
|
|
||||||
|
tma.baseFee = types.NewInt(1000)
|
||||||
|
nMessages := 10
|
||||||
|
for i := 0; i < nMessages; i++ {
|
||||||
|
bias := (nMessages - i) / 3
|
||||||
|
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(1000+i%3+bias))
|
||||||
|
mustAdd(t, mp, m)
|
||||||
|
// messages from a2 have negative performance
|
||||||
|
m = makeTestMessage(w2, a2, a1, uint64(i), gasLimit, 100)
|
||||||
|
mustAdd(t, mp, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// test greedy selection
|
||||||
|
msgs, err := mp.SelectMessages(ts, 1.0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedMsgs := 10
|
||||||
|
if len(msgs) != expectedMsgs {
|
||||||
|
t.Fatalf("expected %d messages but got %d", expectedMsgs, len(msgs))
|
||||||
|
}
|
||||||
|
|
||||||
|
// all messages must be from a1
|
||||||
|
nextNonce := uint64(0)
|
||||||
|
for _, m := range msgs {
|
||||||
|
if m.Message.From != a1 {
|
||||||
|
t.Fatal("expected messages from a1 before messages from a2")
|
||||||
|
}
|
||||||
|
if m.Message.Nonce != nextNonce {
|
||||||
|
t.Fatalf("expected nonce %d but got %d", nextNonce, m.Message.Nonce)
|
||||||
|
}
|
||||||
|
nextNonce++
|
||||||
|
}
|
||||||
|
|
||||||
|
// test optimal selection
|
||||||
|
msgs, err = mp.SelectMessages(ts, 0.1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedMsgs = 10
|
||||||
|
if len(msgs) != expectedMsgs {
|
||||||
|
t.Fatalf("expected %d messages but got %d", expectedMsgs, len(msgs))
|
||||||
|
}
|
||||||
|
|
||||||
|
// all messages must be from a1
|
||||||
|
nextNonce = uint64(0)
|
||||||
|
for _, m := range msgs {
|
||||||
|
if m.Message.From != a1 {
|
||||||
|
t.Fatal("expected messages from a1 before messages from a2")
|
||||||
|
}
|
||||||
|
if m.Message.Nonce != nextNonce {
|
||||||
|
t.Fatalf("expected nonce %d but got %d", nextNonce, m.Message.Nonce)
|
||||||
|
}
|
||||||
|
nextNonce++
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func TestOptimalMessageSelection1(t *testing.T) {
|
func TestOptimalMessageSelection1(t *testing.T) {
|
||||||
// this test uses just a single actor sending messages with a low tq
|
// this test uses just a single actor sending messages with a low tq
|
||||||
// the chain depenent merging algorithm should pick messages from the actor
|
// the chain depenent merging algorithm should pick messages from the actor
|
||||||
@ -1055,17 +1163,17 @@ func testCompetitiveMessageSelection(t *testing.T, rng *rand.Rand, getPremium fu
|
|||||||
|
|
||||||
greedyReward := big.NewInt(0)
|
greedyReward := big.NewInt(0)
|
||||||
for _, m := range greedyMsgs {
|
for _, m := range greedyMsgs {
|
||||||
greedyReward.Add(greedyReward, mp.getGasReward(m, baseFee, ts))
|
greedyReward.Add(greedyReward, mp.getGasReward(m, baseFee))
|
||||||
}
|
}
|
||||||
|
|
||||||
optReward := big.NewInt(0)
|
optReward := big.NewInt(0)
|
||||||
for _, m := range optMsgs {
|
for _, m := range optMsgs {
|
||||||
optReward.Add(optReward, mp.getGasReward(m, baseFee, ts))
|
optReward.Add(optReward, mp.getGasReward(m, baseFee))
|
||||||
}
|
}
|
||||||
|
|
||||||
bestTqReward := big.NewInt(0)
|
bestTqReward := big.NewInt(0)
|
||||||
for _, m := range bestMsgs {
|
for _, m := range bestMsgs {
|
||||||
bestTqReward.Add(bestTqReward, mp.getGasReward(m, baseFee, ts))
|
bestTqReward.Add(bestTqReward, mp.getGasReward(m, baseFee))
|
||||||
}
|
}
|
||||||
|
|
||||||
totalBestTQReward += float64(bestTqReward.Uint64())
|
totalBestTQReward += float64(bestTqReward.Uint64())
|
||||||
@ -1146,3 +1254,209 @@ func TestCompetitiveMessageSelectionZipf(t *testing.T) {
|
|||||||
t.Logf("Average reward boost across all seeds: %f", rewardBoost)
|
t.Logf("Average reward boost across all seeds: %f", rewardBoost)
|
||||||
t.Logf("Average reward of best ticket across all seeds: %f", tqReward)
|
t.Logf("Average reward of best ticket across all seeds: %f", tqReward)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGasReward(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
Premium uint64
|
||||||
|
FeeCap uint64
|
||||||
|
BaseFee uint64
|
||||||
|
GasReward int64
|
||||||
|
}{
|
||||||
|
{Premium: 100, FeeCap: 200, BaseFee: 100, GasReward: 100},
|
||||||
|
{Premium: 100, FeeCap: 200, BaseFee: 210, GasReward: -10},
|
||||||
|
{Premium: 200, FeeCap: 250, BaseFee: 210, GasReward: 40},
|
||||||
|
{Premium: 200, FeeCap: 250, BaseFee: 2000, GasReward: -1750},
|
||||||
|
}
|
||||||
|
|
||||||
|
mp := new(MessagePool)
|
||||||
|
for _, test := range tests {
|
||||||
|
test := test
|
||||||
|
t.Run(fmt.Sprintf("%v", test), func(t *testing.T) {
|
||||||
|
msg := &types.SignedMessage{
|
||||||
|
Message: types.Message{
|
||||||
|
GasLimit: 10,
|
||||||
|
GasFeeCap: types.NewInt(test.FeeCap),
|
||||||
|
GasPremium: types.NewInt(test.Premium),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
rew := mp.getGasReward(msg, types.NewInt(test.BaseFee))
|
||||||
|
if rew.Cmp(big.NewInt(test.GasReward*10)) != 0 {
|
||||||
|
t.Errorf("bad reward: expected %d, got %s", test.GasReward*10, rew)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRealWorldSelection(t *testing.T) {
|
||||||
|
// load test-messages.json.gz and rewrite the messages so that
|
||||||
|
// 1) we map each real actor to a test actor so that we can sign the messages
|
||||||
|
// 2) adjust the nonces so that they start from 0
|
||||||
|
file, err := os.Open("test-messages.json.gz")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
gzr, err := gzip.NewReader(file)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dec := json.NewDecoder(gzr)
|
||||||
|
|
||||||
|
var msgs []*types.SignedMessage
|
||||||
|
baseNonces := make(map[address.Address]uint64)
|
||||||
|
|
||||||
|
readLoop:
|
||||||
|
for {
|
||||||
|
m := new(types.SignedMessage)
|
||||||
|
err := dec.Decode(m)
|
||||||
|
switch err {
|
||||||
|
case nil:
|
||||||
|
msgs = append(msgs, m)
|
||||||
|
nonce, ok := baseNonces[m.Message.From]
|
||||||
|
if !ok || m.Message.Nonce < nonce {
|
||||||
|
baseNonces[m.Message.From] = m.Message.Nonce
|
||||||
|
}
|
||||||
|
|
||||||
|
case io.EOF:
|
||||||
|
break readLoop
|
||||||
|
|
||||||
|
default:
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
actorMap := make(map[address.Address]address.Address)
|
||||||
|
actorWallets := make(map[address.Address]*wallet.Wallet)
|
||||||
|
|
||||||
|
for _, m := range msgs {
|
||||||
|
baseNonce := baseNonces[m.Message.From]
|
||||||
|
|
||||||
|
localActor, ok := actorMap[m.Message.From]
|
||||||
|
if !ok {
|
||||||
|
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
a, err := w.GenerateKey(crypto.SigTypeSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
actorMap[m.Message.From] = a
|
||||||
|
actorWallets[a] = w
|
||||||
|
localActor = a
|
||||||
|
}
|
||||||
|
|
||||||
|
w, ok := actorWallets[localActor]
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("failed to lookup wallet for actor %s", localActor)
|
||||||
|
}
|
||||||
|
|
||||||
|
m.Message.From = localActor
|
||||||
|
m.Message.Nonce -= baseNonce
|
||||||
|
|
||||||
|
sig, err := w.Sign(context.TODO(), localActor, m.Message.Cid().Bytes())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
m.Signature = *sig
|
||||||
|
}
|
||||||
|
|
||||||
|
mp, tma := makeTestMpool()
|
||||||
|
|
||||||
|
block := tma.nextBlockWithHeight(build.UpgradeBreezeHeight + 10)
|
||||||
|
ts := mock.TipSet(block)
|
||||||
|
tma.applyBlock(t, block)
|
||||||
|
|
||||||
|
for _, a := range actorMap {
|
||||||
|
tma.setBalance(a, 1000000)
|
||||||
|
}
|
||||||
|
|
||||||
|
tma.baseFee = types.NewInt(800_000_000)
|
||||||
|
|
||||||
|
sort.Slice(msgs, func(i, j int) bool {
|
||||||
|
return msgs[i].Message.Nonce < msgs[j].Message.Nonce
|
||||||
|
})
|
||||||
|
|
||||||
|
// add the messages
|
||||||
|
for _, m := range msgs {
|
||||||
|
mustAdd(t, mp, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// do message selection and check block packing
|
||||||
|
minGasLimit := int64(0.9 * float64(build.BlockGasLimit))
|
||||||
|
|
||||||
|
// greedy first
|
||||||
|
selected, err := mp.SelectMessages(ts, 1.0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
gasLimit := int64(0)
|
||||||
|
for _, m := range selected {
|
||||||
|
gasLimit += m.Message.GasLimit
|
||||||
|
}
|
||||||
|
if gasLimit < minGasLimit {
|
||||||
|
t.Fatalf("failed to pack with tq=1.0; packed %d, minimum packing: %d", gasLimit, minGasLimit)
|
||||||
|
}
|
||||||
|
|
||||||
|
// high quality ticket
|
||||||
|
selected, err = mp.SelectMessages(ts, .8)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
gasLimit = int64(0)
|
||||||
|
for _, m := range selected {
|
||||||
|
gasLimit += m.Message.GasLimit
|
||||||
|
}
|
||||||
|
if gasLimit < minGasLimit {
|
||||||
|
t.Fatalf("failed to pack with tq=0.8; packed %d, minimum packing: %d", gasLimit, minGasLimit)
|
||||||
|
}
|
||||||
|
|
||||||
|
// mid quality ticket
|
||||||
|
selected, err = mp.SelectMessages(ts, .4)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
gasLimit = int64(0)
|
||||||
|
for _, m := range selected {
|
||||||
|
gasLimit += m.Message.GasLimit
|
||||||
|
}
|
||||||
|
if gasLimit < minGasLimit {
|
||||||
|
t.Fatalf("failed to pack with tq=0.4; packed %d, minimum packing: %d", gasLimit, minGasLimit)
|
||||||
|
}
|
||||||
|
|
||||||
|
// low quality ticket
|
||||||
|
selected, err = mp.SelectMessages(ts, .1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
gasLimit = int64(0)
|
||||||
|
for _, m := range selected {
|
||||||
|
gasLimit += m.Message.GasLimit
|
||||||
|
}
|
||||||
|
if gasLimit < minGasLimit {
|
||||||
|
t.Fatalf("failed to pack with tq=0.1; packed %d, minimum packing: %d", gasLimit, minGasLimit)
|
||||||
|
}
|
||||||
|
|
||||||
|
// very low quality ticket
|
||||||
|
selected, err = mp.SelectMessages(ts, .01)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
gasLimit = int64(0)
|
||||||
|
for _, m := range selected {
|
||||||
|
gasLimit += m.Message.GasLimit
|
||||||
|
}
|
||||||
|
if gasLimit < minGasLimit {
|
||||||
|
t.Fatalf("failed to pack with tq=0.01; packed %d, minimum packing: %d", gasLimit, minGasLimit)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
BIN
chain/messagepool/test-messages.json.gz
Normal file
BIN
chain/messagepool/test-messages.json.gz
Normal file
Binary file not shown.
@ -4,7 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||||
|
@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
init_ "github.com/filecoin-project/specs-actors/actors/builtin/init"
|
init_ "github.com/filecoin-project/specs-actors/actors/builtin/init"
|
||||||
|
|
||||||
@ -209,7 +210,7 @@ func (st *StateTree) GetActor(addr address.Address) (*types.Actor, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var act types.Actor
|
var act types.Actor
|
||||||
if found, err := st.root.Get(adt.AddrKey(addr), &act); err != nil {
|
if found, err := st.root.Get(abi.AddrKey(addr), &act); err != nil {
|
||||||
return nil, xerrors.Errorf("hamt find failed: %w", err)
|
return nil, xerrors.Errorf("hamt find failed: %w", err)
|
||||||
} else if !found {
|
} else if !found {
|
||||||
return nil, types.ErrActorNotFound
|
return nil, types.ErrActorNotFound
|
||||||
@ -254,11 +255,11 @@ func (st *StateTree) Flush(ctx context.Context) (cid.Cid, error) {
|
|||||||
|
|
||||||
for addr, sto := range st.snaps.layers[0].actors {
|
for addr, sto := range st.snaps.layers[0].actors {
|
||||||
if sto.Delete {
|
if sto.Delete {
|
||||||
if err := st.root.Delete(adt.AddrKey(addr)); err != nil {
|
if err := st.root.Delete(abi.AddrKey(addr)); err != nil {
|
||||||
return cid.Undef, err
|
return cid.Undef, err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if err := st.root.Put(adt.AddrKey(addr), &sto.Act); err != nil {
|
if err := st.root.Put(abi.AddrKey(addr), &sto.Act); err != nil {
|
||||||
return cid.Undef, err
|
return cid.Undef, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -5,8 +5,8 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/crypto"
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
"go.opencensus.io/trace"
|
"go.opencensus.io/trace"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
@ -29,6 +29,7 @@ func (sm *StateManager) CallRaw(ctx context.Context, msg *types.Message, bstate
|
|||||||
Bstore: sm.cs.Blockstore(),
|
Bstore: sm.cs.Blockstore(),
|
||||||
Syscalls: sm.cs.VMSys(),
|
Syscalls: sm.cs.VMSys(),
|
||||||
CircSupplyCalc: sm.GetCirculatingSupply,
|
CircSupplyCalc: sm.GetCirculatingSupply,
|
||||||
|
NtwkVersion: sm.GetNtwkVersion,
|
||||||
BaseFee: types.NewInt(0),
|
BaseFee: types.NewInt(0),
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -130,6 +131,7 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
|
|||||||
Bstore: sm.cs.Blockstore(),
|
Bstore: sm.cs.Blockstore(),
|
||||||
Syscalls: sm.cs.VMSys(),
|
Syscalls: sm.cs.VMSys(),
|
||||||
CircSupplyCalc: sm.GetCirculatingSupply,
|
CircSupplyCalc: sm.GetCirculatingSupply,
|
||||||
|
NtwkVersion: sm.GetNtwkVersion,
|
||||||
BaseFee: ts.Blocks()[0].ParentBaseFee,
|
BaseFee: ts.Blocks()[0].ParentBaseFee,
|
||||||
}
|
}
|
||||||
vmi, err := vm.NewVM(vmopt)
|
vmi, err := vm.NewVM(vmopt)
|
||||||
|
@ -3,16 +3,28 @@ package stmgr
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
"github.com/filecoin-project/lotus/chain/state"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/builtin/power"
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||||
|
cbor "github.com/ipfs/go-ipld-cbor"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
)
|
)
|
||||||
|
|
||||||
var ForksAtHeight = map[abi.ChainEpoch]func(context.Context, *StateManager, types.StateTree) error{}
|
var ForksAtHeight = map[abi.ChainEpoch]func(context.Context, *StateManager, types.StateTree, *types.TipSet) error{
|
||||||
|
build.UpgradeBreezeHeight: UpgradeFaucetBurnRecovery,
|
||||||
|
}
|
||||||
|
|
||||||
func (sm *StateManager) handleStateForks(ctx context.Context, st types.StateTree, height abi.ChainEpoch) (err error) {
|
func (sm *StateManager) handleStateForks(ctx context.Context, st types.StateTree, height abi.ChainEpoch, ts *types.TipSet) (err error) {
|
||||||
f, ok := ForksAtHeight[height]
|
f, ok := ForksAtHeight[height]
|
||||||
if ok {
|
if ok {
|
||||||
err := f(ctx, sm, st)
|
err := f(ctx, sm, st, ts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -20,3 +32,291 @@ func (sm *StateManager) handleStateForks(ctx context.Context, st types.StateTree
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type forEachTree interface {
|
||||||
|
ForEach(func(address.Address, *types.Actor) error) error
|
||||||
|
}
|
||||||
|
|
||||||
|
func doTransfer(tree types.StateTree, from, to address.Address, amt abi.TokenAmount) error {
|
||||||
|
fromAct, err := tree.GetActor(from)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to get 'from' actor for transfer: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fromAct.Balance = types.BigSub(fromAct.Balance, amt)
|
||||||
|
if fromAct.Balance.Sign() < 0 {
|
||||||
|
return xerrors.Errorf("(sanity) deducted more funds from target account than it had (%s, %s)", from, types.FIL(amt))
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := tree.SetActor(from, fromAct); err != nil {
|
||||||
|
return xerrors.Errorf("failed to persist from actor: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
toAct, err := tree.GetActor(to)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to get 'to' actor for transfer: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
toAct.Balance = types.BigAdd(toAct.Balance, amt)
|
||||||
|
|
||||||
|
if err := tree.SetActor(to, toAct); err != nil {
|
||||||
|
return xerrors.Errorf("failed to persist to actor: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, tree types.StateTree, ts *types.TipSet) error {
|
||||||
|
// Some initial parameters
|
||||||
|
FundsForMiners := types.FromFil(1_000_000)
|
||||||
|
LookbackEpoch := abi.ChainEpoch(32000)
|
||||||
|
AccountCap := types.FromFil(0)
|
||||||
|
BaseMinerBalance := types.FromFil(20)
|
||||||
|
DesiredReimbursementBalance := types.FromFil(5_000_000)
|
||||||
|
|
||||||
|
isSystemAccount := func(addr address.Address) (bool, error) {
|
||||||
|
id, err := address.IDFromAddress(addr)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("id address: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if id < 1000 {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
minerFundsAlloc := func(pow, tpow abi.StoragePower) abi.TokenAmount {
|
||||||
|
return types.BigDiv(types.BigMul(pow, FundsForMiners), tpow)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Grab lookback state for account checks
|
||||||
|
lbts, err := sm.ChainStore().GetTipsetByHeight(ctx, LookbackEpoch, ts, false)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to get tipset at lookback height: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var lbtree *state.StateTree
|
||||||
|
if err = sm.WithStateTree(lbts.ParentState(), func(state *state.StateTree) error {
|
||||||
|
lbtree = state
|
||||||
|
return nil
|
||||||
|
}); err != nil {
|
||||||
|
return xerrors.Errorf("loading state tree failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ReserveAddress, err := address.NewFromString("t090")
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to parse reserve address: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fetree, ok := tree.(forEachTree)
|
||||||
|
if !ok {
|
||||||
|
return xerrors.Errorf("fork transition state tree doesnt support ForEach (%T)", tree)
|
||||||
|
}
|
||||||
|
|
||||||
|
type transfer struct {
|
||||||
|
From address.Address
|
||||||
|
To address.Address
|
||||||
|
Amt abi.TokenAmount
|
||||||
|
}
|
||||||
|
|
||||||
|
var transfers []transfer
|
||||||
|
|
||||||
|
// Take all excess funds away, put them into the reserve account
|
||||||
|
err = fetree.ForEach(func(addr address.Address, act *types.Actor) error {
|
||||||
|
switch act.Code {
|
||||||
|
case builtin.AccountActorCodeID, builtin.MultisigActorCodeID, builtin.PaymentChannelActorCodeID:
|
||||||
|
sysAcc, err := isSystemAccount(addr)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("checking system account: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !sysAcc {
|
||||||
|
transfers = append(transfers, transfer{
|
||||||
|
From: addr,
|
||||||
|
To: ReserveAddress,
|
||||||
|
Amt: act.Balance,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
case builtin.StorageMinerActorCodeID:
|
||||||
|
var st miner.State
|
||||||
|
if err := sm.WithActorState(ctx, &st)(act); err != nil {
|
||||||
|
return xerrors.Errorf("failed to load miner state: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var available abi.TokenAmount
|
||||||
|
{
|
||||||
|
defer func() {
|
||||||
|
if err := recover(); err != nil {
|
||||||
|
log.Warnf("Get available balance failed (%s, %s, %s): %s", addr, act.Head, act.Balance, err)
|
||||||
|
}
|
||||||
|
available = abi.NewTokenAmount(0)
|
||||||
|
}()
|
||||||
|
// this panics if the miner doesnt have enough funds to cover their locked pledge
|
||||||
|
available = st.GetAvailableBalance(act.Balance)
|
||||||
|
}
|
||||||
|
|
||||||
|
transfers = append(transfers, transfer{
|
||||||
|
From: addr,
|
||||||
|
To: ReserveAddress,
|
||||||
|
Amt: available,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("foreach over state tree failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute transfers from previous step
|
||||||
|
for _, t := range transfers {
|
||||||
|
if err := doTransfer(tree, t.From, t.To, t.Amt); err != nil {
|
||||||
|
return xerrors.Errorf("transfer %s %s->%s failed: %w", t.Amt, t.From, t.To, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// pull up power table to give miners back some funds proportional to their power
|
||||||
|
var ps power.State
|
||||||
|
powAct, err := tree.GetActor(builtin.StoragePowerActorAddr)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to load power actor: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cst := cbor.NewCborStore(sm.ChainStore().Blockstore())
|
||||||
|
if err := cst.Get(ctx, powAct.Head, &ps); err != nil {
|
||||||
|
return xerrors.Errorf("failed to get power actor state: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
totalPower := ps.TotalBytesCommitted
|
||||||
|
|
||||||
|
var transfersBack []transfer
|
||||||
|
// Now, we return some funds to places where they are needed
|
||||||
|
err = fetree.ForEach(func(addr address.Address, act *types.Actor) error {
|
||||||
|
lbact, err := lbtree.GetActor(addr)
|
||||||
|
if err != nil {
|
||||||
|
if !xerrors.Is(err, types.ErrActorNotFound) {
|
||||||
|
return xerrors.Errorf("failed to get actor in lookback state")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
prevBalance := abi.NewTokenAmount(0)
|
||||||
|
if lbact != nil {
|
||||||
|
prevBalance = lbact.Balance
|
||||||
|
}
|
||||||
|
|
||||||
|
switch act.Code {
|
||||||
|
case builtin.AccountActorCodeID, builtin.MultisigActorCodeID, builtin.PaymentChannelActorCodeID:
|
||||||
|
nbalance := big.Min(prevBalance, AccountCap)
|
||||||
|
if nbalance.Sign() != 0 {
|
||||||
|
transfersBack = append(transfersBack, transfer{
|
||||||
|
From: ReserveAddress,
|
||||||
|
To: addr,
|
||||||
|
Amt: nbalance,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
case builtin.StorageMinerActorCodeID:
|
||||||
|
var st miner.State
|
||||||
|
if err := sm.WithActorState(ctx, &st)(act); err != nil {
|
||||||
|
return xerrors.Errorf("failed to load miner state: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var minfo miner.MinerInfo
|
||||||
|
if err := cst.Get(ctx, st.Info, &minfo); err != nil {
|
||||||
|
return xerrors.Errorf("failed to get miner info: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sectorsArr, err := adt.AsArray(sm.ChainStore().Store(ctx), st.Sectors)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to load sectors array: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
slen := sectorsArr.Length()
|
||||||
|
|
||||||
|
power := types.BigMul(types.NewInt(slen), types.NewInt(uint64(minfo.SectorSize)))
|
||||||
|
|
||||||
|
mfunds := minerFundsAlloc(power, totalPower)
|
||||||
|
transfersBack = append(transfersBack, transfer{
|
||||||
|
From: ReserveAddress,
|
||||||
|
To: minfo.Worker,
|
||||||
|
Amt: mfunds,
|
||||||
|
})
|
||||||
|
|
||||||
|
// Now make sure to give each miner who had power at the lookback some FIL
|
||||||
|
lbact, err := lbtree.GetActor(addr)
|
||||||
|
if err == nil {
|
||||||
|
var lbst miner.State
|
||||||
|
if err := sm.WithActorState(ctx, &lbst)(lbact); err != nil {
|
||||||
|
return xerrors.Errorf("failed to load miner state: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
lbsectors, err := adt.AsArray(sm.ChainStore().Store(ctx), lbst.Sectors)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to load lb sectors array: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if lbsectors.Length() > 0 {
|
||||||
|
transfersBack = append(transfersBack, transfer{
|
||||||
|
From: ReserveAddress,
|
||||||
|
To: minfo.Worker,
|
||||||
|
Amt: BaseMinerBalance,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
log.Warnf("failed to get miner in lookback state: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("foreach over state tree failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, t := range transfersBack {
|
||||||
|
if err := doTransfer(tree, t.From, t.To, t.Amt); err != nil {
|
||||||
|
return xerrors.Errorf("transfer %s %s->%s failed: %w", t.Amt, t.From, t.To, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// transfer all burnt funds back to the reserve account
|
||||||
|
burntAct, err := tree.GetActor(builtin.BurntFundsActorAddr)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to load burnt funds actor: %w", err)
|
||||||
|
}
|
||||||
|
if err := doTransfer(tree, builtin.BurntFundsActorAddr, ReserveAddress, burntAct.Balance); err != nil {
|
||||||
|
return xerrors.Errorf("failed to unburn funds: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Top up the reimbursement service
|
||||||
|
reimbAddr, err := address.NewFromString("t0111")
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to parse reimbursement service address")
|
||||||
|
}
|
||||||
|
|
||||||
|
reimb, err := tree.GetActor(reimbAddr)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to load reimbursement account actor: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
difference := types.BigSub(DesiredReimbursementBalance, reimb.Balance)
|
||||||
|
if err := doTransfer(tree, ReserveAddress, reimbAddr, difference); err != nil {
|
||||||
|
return xerrors.Errorf("failed to top up reimbursement account: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now, a final sanity check to make sure the balances all check out
|
||||||
|
total := abi.NewTokenAmount(0)
|
||||||
|
err = fetree.ForEach(func(addr address.Address, act *types.Actor) error {
|
||||||
|
total = types.BigAdd(total, act.Balance)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("checking final state balance failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
exp := types.FromFil(build.FilBase)
|
||||||
|
if !exp.Equals(total) {
|
||||||
|
return xerrors.Errorf("resultant state tree account balance was not correct: %s", total)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
@ -7,15 +7,14 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
init_ "github.com/filecoin-project/specs-actors/actors/builtin/init"
|
init_ "github.com/filecoin-project/specs-actors/actors/builtin/init"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/power"
|
"github.com/filecoin-project/specs-actors/actors/builtin/power"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
|
"github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
|
||||||
"github.com/filecoin-project/specs-actors/actors/runtime"
|
"github.com/filecoin-project/specs-actors/actors/runtime"
|
||||||
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors"
|
"github.com/filecoin-project/lotus/chain/actors"
|
||||||
@ -73,18 +72,18 @@ func (ta *testActor) Exports() []interface{} {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ta *testActor) Constructor(rt runtime.Runtime, params *adt.EmptyValue) *adt.EmptyValue {
|
func (ta *testActor) Constructor(rt runtime.Runtime, params *abi.EmptyValue) *abi.EmptyValue {
|
||||||
rt.ValidateImmediateCallerAcceptAny()
|
rt.ValidateImmediateCallerAcceptAny()
|
||||||
rt.State().Create(&testActorState{11})
|
rt.StateCreate(&testActorState{11})
|
||||||
fmt.Println("NEW ACTOR ADDRESS IS: ", rt.Message().Receiver())
|
fmt.Println("NEW ACTOR ADDRESS IS: ", rt.Receiver())
|
||||||
|
|
||||||
return adt.Empty
|
return abi.Empty
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ta *testActor) TestMethod(rt runtime.Runtime, params *adt.EmptyValue) *adt.EmptyValue {
|
func (ta *testActor) TestMethod(rt runtime.Runtime, params *abi.EmptyValue) *abi.EmptyValue {
|
||||||
rt.ValidateImmediateCallerAcceptAny()
|
rt.ValidateImmediateCallerAcceptAny()
|
||||||
var st testActorState
|
var st testActorState
|
||||||
rt.State().Readonly(&st)
|
rt.StateReadonly(&st)
|
||||||
|
|
||||||
if rt.CurrEpoch() > testForkHeight {
|
if rt.CurrEpoch() > testForkHeight {
|
||||||
if st.HasUpgraded != 55 {
|
if st.HasUpgraded != 55 {
|
||||||
@ -96,7 +95,7 @@ func (ta *testActor) TestMethod(rt runtime.Runtime, params *adt.EmptyValue) *adt
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return adt.Empty
|
return abi.Empty
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestForkHeightTriggers(t *testing.T) {
|
func TestForkHeightTriggers(t *testing.T) {
|
||||||
@ -119,7 +118,7 @@ func TestForkHeightTriggers(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
stmgr.ForksAtHeight[testForkHeight] = func(ctx context.Context, sm *StateManager, st types.StateTree) error {
|
stmgr.ForksAtHeight[testForkHeight] = func(ctx context.Context, sm *StateManager, st types.StateTree, ts *types.TipSet) error {
|
||||||
cst := cbor.NewCborStore(sm.ChainStore().Blockstore())
|
cst := cbor.NewCborStore(sm.ChainStore().Blockstore())
|
||||||
|
|
||||||
act, err := st.GetActor(taddr)
|
act, err := st.GetActor(taddr)
|
||||||
|
@ -5,6 +5,8 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/power"
|
"github.com/filecoin-project/specs-actors/actors/builtin/power"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/multisig"
|
"github.com/filecoin-project/specs-actors/actors/builtin/multisig"
|
||||||
@ -18,8 +20,8 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/chain/vm"
|
"github.com/filecoin-project/lotus/chain/vm"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/market"
|
"github.com/filecoin-project/specs-actors/actors/builtin/market"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/reward"
|
"github.com/filecoin-project/specs-actors/actors/builtin/reward"
|
||||||
@ -145,7 +147,7 @@ func (sm *StateManager) ExecutionTrace(ctx context.Context, ts *types.TipSet) (c
|
|||||||
|
|
||||||
type ExecCallback func(cid.Cid, *types.Message, *vm.ApplyRet) error
|
type ExecCallback func(cid.Cid, *types.Message, *vm.ApplyRet) error
|
||||||
|
|
||||||
func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEpoch, pstate cid.Cid, bms []store.BlockMessages, epoch abi.ChainEpoch, r vm.Rand, cb ExecCallback, baseFee abi.TokenAmount) (cid.Cid, cid.Cid, error) {
|
func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEpoch, pstate cid.Cid, bms []store.BlockMessages, epoch abi.ChainEpoch, r vm.Rand, cb ExecCallback, baseFee abi.TokenAmount, ts *types.TipSet) (cid.Cid, cid.Cid, error) {
|
||||||
|
|
||||||
vmopt := &vm.VMOpts{
|
vmopt := &vm.VMOpts{
|
||||||
StateBase: pstate,
|
StateBase: pstate,
|
||||||
@ -154,6 +156,7 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
|
|||||||
Bstore: sm.cs.Blockstore(),
|
Bstore: sm.cs.Blockstore(),
|
||||||
Syscalls: sm.cs.VMSys(),
|
Syscalls: sm.cs.VMSys(),
|
||||||
CircSupplyCalc: sm.GetCirculatingSupply,
|
CircSupplyCalc: sm.GetCirculatingSupply,
|
||||||
|
NtwkVersion: sm.GetNtwkVersion,
|
||||||
BaseFee: baseFee,
|
BaseFee: baseFee,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -198,7 +201,7 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
|
|||||||
|
|
||||||
for i := parentEpoch; i < epoch; i++ {
|
for i := parentEpoch; i < epoch; i++ {
|
||||||
// handle state forks
|
// handle state forks
|
||||||
err = sm.handleStateForks(ctx, vmi.StateTree(), i)
|
err = sm.handleStateForks(ctx, vmi.StateTree(), i, ts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cid.Undef, cid.Undef, xerrors.Errorf("error handling state forks: %w", err)
|
return cid.Undef, cid.Undef, xerrors.Errorf("error handling state forks: %w", err)
|
||||||
}
|
}
|
||||||
@ -230,8 +233,8 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
|
|||||||
}
|
}
|
||||||
|
|
||||||
receipts = append(receipts, &r.MessageReceipt)
|
receipts = append(receipts, &r.MessageReceipt)
|
||||||
gasReward = big.Add(gasReward, r.MinerTip)
|
gasReward = big.Add(gasReward, r.GasCosts.MinerTip)
|
||||||
penalty = big.Add(penalty, r.Penalty)
|
penalty = big.Add(penalty, r.GasCosts.MinerPenalty)
|
||||||
|
|
||||||
if cb != nil {
|
if cb != nil {
|
||||||
if err := cb(cm.Cid(), m, r); err != nil {
|
if err := cb(cm.Cid(), m, r); err != nil {
|
||||||
@ -347,7 +350,7 @@ func (sm *StateManager) computeTipSetState(ctx context.Context, ts *types.TipSet
|
|||||||
|
|
||||||
baseFee := blks[0].ParentBaseFee
|
baseFee := blks[0].ParentBaseFee
|
||||||
|
|
||||||
return sm.ApplyBlocks(ctx, parentEpoch, pstate, blkmsgs, blks[0].Height, r, cb, baseFee)
|
return sm.ApplyBlocks(ctx, parentEpoch, pstate, blkmsgs, blks[0].Height, r, cb, baseFee, ts)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *StateManager) parentState(ts *types.TipSet) cid.Cid {
|
func (sm *StateManager) parentState(ts *types.TipSet) cid.Cid {
|
||||||
@ -1120,3 +1123,19 @@ func (sm *StateManager) GetCirculatingSupply(ctx context.Context, height abi.Cha
|
|||||||
|
|
||||||
return csi.FilCirculating, nil
|
return csi.FilCirculating, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (sm *StateManager) GetNtwkVersion(ctx context.Context, height abi.ChainEpoch) network.Version {
|
||||||
|
if build.UseNewestNetwork() {
|
||||||
|
return build.NewestNetworkVersion
|
||||||
|
}
|
||||||
|
|
||||||
|
if height <= build.UpgradeBreezeHeight {
|
||||||
|
return network.Version0
|
||||||
|
}
|
||||||
|
|
||||||
|
if height <= build.UpgradeSmokeHeight {
|
||||||
|
return network.Version1
|
||||||
|
}
|
||||||
|
|
||||||
|
return build.NewestNetworkVersion
|
||||||
|
}
|
||||||
|
@ -3,8 +3,14 @@ package stmgr
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
saruntime "github.com/filecoin-project/specs-actors/actors/runtime"
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/runtime/proof"
|
||||||
|
|
||||||
cid "github.com/ipfs/go-cid"
|
cid "github.com/ipfs/go-cid"
|
||||||
cbor "github.com/ipfs/go-ipld-cbor"
|
cbor "github.com/ipfs/go-ipld-cbor"
|
||||||
@ -13,8 +19,9 @@ import (
|
|||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-bitfield"
|
"github.com/filecoin-project/go-bitfield"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/account"
|
"github.com/filecoin-project/specs-actors/actors/builtin/account"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/cron"
|
"github.com/filecoin-project/specs-actors/actors/builtin/cron"
|
||||||
@ -26,7 +33,6 @@ import (
|
|||||||
"github.com/filecoin-project/specs-actors/actors/builtin/power"
|
"github.com/filecoin-project/specs-actors/actors/builtin/power"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/reward"
|
"github.com/filecoin-project/specs-actors/actors/builtin/reward"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
|
"github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
|
||||||
"github.com/filecoin-project/specs-actors/actors/crypto"
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
@ -114,7 +120,7 @@ func GetPowerRaw(ctx context.Context, sm *StateManager, st cid.Cid, maddr addres
|
|||||||
}
|
}
|
||||||
|
|
||||||
var claim power.Claim
|
var claim power.Claim
|
||||||
if _, err := cm.Get(adt.AddrKey(maddr), &claim); err != nil {
|
if _, err := cm.Get(abi.AddrKey(maddr), &claim); err != nil {
|
||||||
return power.Claim{}, power.Claim{}, err
|
return power.Claim{}, power.Claim{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -163,7 +169,7 @@ func MinerSectorInfo(ctx context.Context, sm *StateManager, maddr address.Addres
|
|||||||
return sectorInfo, nil
|
return sectorInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetMinerSectorSet(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address, filter *abi.BitField, filterOut bool) ([]*api.ChainSectorInfo, error) {
|
func GetMinerSectorSet(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address, filter *bitfield.BitField, filterOut bool) ([]*api.ChainSectorInfo, error) {
|
||||||
var mas miner.State
|
var mas miner.State
|
||||||
_, err := sm.LoadActorState(ctx, maddr, &mas, ts)
|
_, err := sm.LoadActorState(ctx, maddr, &mas, ts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -173,8 +179,8 @@ func GetMinerSectorSet(ctx context.Context, sm *StateManager, ts *types.TipSet,
|
|||||||
return LoadSectorsFromSet(ctx, sm.ChainStore().Blockstore(), mas.Sectors, filter, filterOut)
|
return LoadSectorsFromSet(ctx, sm.ChainStore().Blockstore(), mas.Sectors, filter, filterOut)
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *StateManager, st cid.Cid, maddr address.Address, rand abi.PoStRandomness) ([]abi.SectorInfo, error) {
|
func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *StateManager, st cid.Cid, maddr address.Address, rand abi.PoStRandomness) ([]proof.SectorInfo, error) {
|
||||||
var partsProving []abi.BitField
|
var partsProving []bitfield.BitField
|
||||||
var mas *miner.State
|
var mas *miner.State
|
||||||
var info *miner.MinerInfo
|
var info *miner.MinerInfo
|
||||||
|
|
||||||
@ -261,7 +267,7 @@ func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *S
|
|||||||
return nil, xerrors.Errorf("failed to load sectors amt: %w", err)
|
return nil, xerrors.Errorf("failed to load sectors amt: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
out := make([]abi.SectorInfo, len(ids))
|
out := make([]proof.SectorInfo, len(ids))
|
||||||
for i, n := range ids {
|
for i, n := range ids {
|
||||||
sid := sectors[n]
|
sid := sectors[n]
|
||||||
|
|
||||||
@ -272,7 +278,7 @@ func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *S
|
|||||||
return nil, xerrors.Errorf("failed to find sector %d", sid)
|
return nil, xerrors.Errorf("failed to find sector %d", sid)
|
||||||
}
|
}
|
||||||
|
|
||||||
out[i] = abi.SectorInfo{
|
out[i] = proof.SectorInfo{
|
||||||
SealProof: spt,
|
SealProof: spt,
|
||||||
SectorNumber: sinfo.SectorNumber,
|
SectorNumber: sinfo.SectorNumber,
|
||||||
SealedCID: sinfo.SealedCID,
|
SealedCID: sinfo.SealedCID,
|
||||||
@ -306,7 +312,7 @@ func GetMinerSlashed(ctx context.Context, sm *StateManager, ts *types.TipSet, ma
|
|||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
ok, err := claims.Get(power.AddrKey(maddr), nil)
|
ok, err := claims.Get(abi.AddrKey(maddr), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@ -387,7 +393,7 @@ func ListMinerActors(ctx context.Context, sm *StateManager, ts *types.TipSet) ([
|
|||||||
return miners, nil
|
return miners, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func LoadSectorsFromSet(ctx context.Context, bs blockstore.Blockstore, ssc cid.Cid, filter *abi.BitField, filterOut bool) ([]*api.ChainSectorInfo, error) {
|
func LoadSectorsFromSet(ctx context.Context, bs blockstore.Blockstore, ssc cid.Cid, filter *bitfield.BitField, filterOut bool) ([]*api.ChainSectorInfo, error) {
|
||||||
a, err := adt.AsArray(store.ActorStore(ctx, bs), ssc)
|
a, err := adt.AsArray(store.ActorStore(ctx, bs), ssc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -440,6 +446,7 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch,
|
|||||||
Bstore: sm.cs.Blockstore(),
|
Bstore: sm.cs.Blockstore(),
|
||||||
Syscalls: sm.cs.VMSys(),
|
Syscalls: sm.cs.VMSys(),
|
||||||
CircSupplyCalc: sm.GetCirculatingSupply,
|
CircSupplyCalc: sm.GetCirculatingSupply,
|
||||||
|
NtwkVersion: sm.GetNtwkVersion,
|
||||||
BaseFee: ts.Blocks()[0].ParentBaseFee,
|
BaseFee: ts.Blocks()[0].ParentBaseFee,
|
||||||
}
|
}
|
||||||
vmi, err := vm.NewVM(vmopt)
|
vmi, err := vm.NewVM(vmopt)
|
||||||
@ -449,7 +456,7 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch,
|
|||||||
|
|
||||||
for i := ts.Height(); i < height; i++ {
|
for i := ts.Height(); i < height; i++ {
|
||||||
// handle state forks
|
// handle state forks
|
||||||
err = sm.handleStateForks(ctx, vmi.StateTree(), i)
|
err = sm.handleStateForks(ctx, vmi.StateTree(), i, ts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cid.Undef, nil, xerrors.Errorf("error handling state forks: %w", err)
|
return cid.Undef, nil, xerrors.Errorf("error handling state forks: %w", err)
|
||||||
}
|
}
|
||||||
@ -495,7 +502,7 @@ func GetLookbackTipSetForRound(ctx context.Context, sm *StateManager, ts *types.
|
|||||||
return lbts, nil
|
return lbts, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcn beacon.RandomBeacon, tsk types.TipSetKey, round abi.ChainEpoch, maddr address.Address, pv ffiwrapper.Verifier) (*api.MiningBaseInfo, error) {
|
func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcs beacon.Schedule, tsk types.TipSetKey, round abi.ChainEpoch, maddr address.Address, pv ffiwrapper.Verifier) (*api.MiningBaseInfo, error) {
|
||||||
ts, err := sm.ChainStore().LoadTipSet(tsk)
|
ts, err := sm.ChainStore().LoadTipSet(tsk)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("failed to load tipset for mining base: %w", err)
|
return nil, xerrors.Errorf("failed to load tipset for mining base: %w", err)
|
||||||
@ -510,7 +517,7 @@ func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcn beacon.RandomBe
|
|||||||
prev = &types.BeaconEntry{}
|
prev = &types.BeaconEntry{}
|
||||||
}
|
}
|
||||||
|
|
||||||
entries, err := beacon.BeaconEntriesForBlock(ctx, bcn, round, *prev)
|
entries, err := beacon.BeaconEntriesForBlock(ctx, bcs, round, ts.Height(), *prev)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -547,7 +554,7 @@ func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcn beacon.RandomBe
|
|||||||
|
|
||||||
sectors, err := GetSectorsForWinningPoSt(ctx, pv, sm, lbst, maddr, prand)
|
sectors, err := GetSectorsForWinningPoSt(ctx, pv, sm, lbst, maddr, prand)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("getting wpost proving set: %w", err)
|
return nil, xerrors.Errorf("getting winning post proving set: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(sectors) == 0 {
|
if len(sectors) == 0 {
|
||||||
@ -586,14 +593,14 @@ func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcn beacon.RandomBe
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type methodMeta struct {
|
type MethodMeta struct {
|
||||||
Name string
|
Name string
|
||||||
|
|
||||||
Params reflect.Type
|
Params reflect.Type
|
||||||
Ret reflect.Type
|
Ret reflect.Type
|
||||||
}
|
}
|
||||||
|
|
||||||
var MethodsMap = map[cid.Cid][]methodMeta{}
|
var MethodsMap = map[cid.Cid]map[abi.MethodNum]MethodMeta{}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
cidToMethods := map[cid.Cid][2]interface{}{
|
cidToMethods := map[cid.Cid][2]interface{}{
|
||||||
@ -611,25 +618,65 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for c, m := range cidToMethods {
|
for c, m := range cidToMethods {
|
||||||
rt := reflect.TypeOf(m[0])
|
exports := m[1].(saruntime.Invokee).Exports()
|
||||||
nf := rt.NumField()
|
methods := make(map[abi.MethodNum]MethodMeta, len(exports))
|
||||||
|
|
||||||
MethodsMap[c] = append(MethodsMap[c], methodMeta{
|
// Explicitly add send, it's special.
|
||||||
|
methods[builtin.MethodSend] = MethodMeta{
|
||||||
Name: "Send",
|
Name: "Send",
|
||||||
Params: reflect.TypeOf(new(adt.EmptyValue)),
|
Params: reflect.TypeOf(new(abi.EmptyValue)),
|
||||||
Ret: reflect.TypeOf(new(adt.EmptyValue)),
|
Ret: reflect.TypeOf(new(abi.EmptyValue)),
|
||||||
})
|
|
||||||
|
|
||||||
exports := m[1].(abi.Invokee).Exports()
|
|
||||||
for i := 0; i < nf; i++ {
|
|
||||||
export := reflect.TypeOf(exports[i+1])
|
|
||||||
|
|
||||||
MethodsMap[c] = append(MethodsMap[c], methodMeta{
|
|
||||||
Name: rt.Field(i).Name,
|
|
||||||
Params: export.In(1),
|
|
||||||
Ret: export.Out(0),
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Learn method names from the builtin.Methods* structs.
|
||||||
|
rv := reflect.ValueOf(m[0])
|
||||||
|
rt := rv.Type()
|
||||||
|
nf := rt.NumField()
|
||||||
|
methodToName := make([]string, len(exports))
|
||||||
|
for i := 0; i < nf; i++ {
|
||||||
|
name := rt.Field(i).Name
|
||||||
|
number := rv.Field(i).Interface().(abi.MethodNum)
|
||||||
|
methodToName[number] = name
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterate over exported methods. Some of these _may_ be nil and
|
||||||
|
// must be skipped.
|
||||||
|
for number, export := range exports {
|
||||||
|
if export == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
ev := reflect.ValueOf(export)
|
||||||
|
et := ev.Type()
|
||||||
|
|
||||||
|
// Make sure the method name is correct.
|
||||||
|
// This is just a nice sanity check.
|
||||||
|
fnName := runtime.FuncForPC(ev.Pointer()).Name()
|
||||||
|
fnName = strings.TrimSuffix(fnName[strings.LastIndexByte(fnName, '.')+1:], "-fm")
|
||||||
|
mName := methodToName[number]
|
||||||
|
if mName != fnName {
|
||||||
|
panic(fmt.Sprintf(
|
||||||
|
"actor method name is %s but exported method name is %s",
|
||||||
|
fnName, mName,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
switch abi.MethodNum(number) {
|
||||||
|
case builtin.MethodSend:
|
||||||
|
panic("method 0 is reserved for Send")
|
||||||
|
case builtin.MethodConstructor:
|
||||||
|
if fnName != "Constructor" {
|
||||||
|
panic("method 1 is reserved for Constructor")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
methods[abi.MethodNum(number)] = MethodMeta{
|
||||||
|
Name: fnName,
|
||||||
|
Params: et.In(1),
|
||||||
|
Ret: et.Out(0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
MethodsMap[c] = methods
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -639,7 +686,10 @@ func GetReturnType(ctx context.Context, sm *StateManager, to address.Address, me
|
|||||||
return nil, xerrors.Errorf("getting actor: %w", err)
|
return nil, xerrors.Errorf("getting actor: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
m := MethodsMap[act.Code][method]
|
m, found := MethodsMap[act.Code][method]
|
||||||
|
if !found {
|
||||||
|
return nil, fmt.Errorf("unknown method %d for actor %s", method, act.Code)
|
||||||
|
}
|
||||||
return reflect.New(m.Ret.Elem()).Interface().(cbg.CBORUnmarshaler), nil
|
return reflect.New(m.Ret.Elem()).Interface().(cbg.CBORUnmarshaler), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3,22 +3,28 @@ package store
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
)
|
)
|
||||||
|
|
||||||
func computeNextBaseFee(baseFee types.BigInt, gasLimitUsed int64, noOfBlocks int) types.BigInt {
|
func ComputeNextBaseFee(baseFee types.BigInt, gasLimitUsed int64, noOfBlocks int, epoch abi.ChainEpoch) types.BigInt {
|
||||||
// deta := 1/PackingEfficiency * gasLimitUsed/noOfBlocks - build.BlockGasTarget
|
// deta := gasLimitUsed/noOfBlocks - build.BlockGasTarget
|
||||||
// change := baseFee * deta / BlockGasTarget / BaseFeeMaxChangeDenom
|
// change := baseFee * deta / BlockGasTarget
|
||||||
// nextBaseFee = baseFee + change
|
// nextBaseFee = baseFee + change
|
||||||
// nextBaseFee = max(nextBaseFee, build.MinimumBaseFee)
|
// nextBaseFee = max(nextBaseFee, build.MinimumBaseFee)
|
||||||
|
|
||||||
delta := build.PackingEfficiencyDenom * gasLimitUsed / (int64(noOfBlocks) * build.PackingEfficiencyNum)
|
var delta int64
|
||||||
delta -= build.BlockGasTarget
|
if epoch > build.UpgradeSmokeHeight {
|
||||||
|
delta = gasLimitUsed / int64(noOfBlocks)
|
||||||
|
delta -= build.BlockGasTarget
|
||||||
|
} else {
|
||||||
|
delta = build.PackingEfficiencyDenom * gasLimitUsed / (int64(noOfBlocks) * build.PackingEfficiencyNum)
|
||||||
|
delta -= build.BlockGasTarget
|
||||||
|
}
|
||||||
|
|
||||||
// cap change at 12.5% (BaseFeeMaxChangeDenom) by capping delta
|
// cap change at 12.5% (BaseFeeMaxChangeDenom) by capping delta
|
||||||
if delta > build.BlockGasTarget {
|
if delta > build.BlockGasTarget {
|
||||||
@ -40,6 +46,10 @@ func computeNextBaseFee(baseFee types.BigInt, gasLimitUsed int64, noOfBlocks int
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (cs *ChainStore) ComputeBaseFee(ctx context.Context, ts *types.TipSet) (abi.TokenAmount, error) {
|
func (cs *ChainStore) ComputeBaseFee(ctx context.Context, ts *types.TipSet) (abi.TokenAmount, error) {
|
||||||
|
if ts.Height() > build.UpgradeBreezeHeight && ts.Height() < build.UpgradeBreezeHeight+build.BreezeGasTampingDuration {
|
||||||
|
return abi.NewTokenAmount(100), nil
|
||||||
|
}
|
||||||
|
|
||||||
zero := abi.NewTokenAmount(0)
|
zero := abi.NewTokenAmount(0)
|
||||||
|
|
||||||
// totalLimit is sum of GasLimits of unique messages in a tipset
|
// totalLimit is sum of GasLimits of unique messages in a tipset
|
||||||
@ -69,5 +79,5 @@ func (cs *ChainStore) ComputeBaseFee(ctx context.Context, ts *types.TipSet) (abi
|
|||||||
}
|
}
|
||||||
parentBaseFee := ts.Blocks()[0].ParentBaseFee
|
parentBaseFee := ts.Blocks()[0].ParentBaseFee
|
||||||
|
|
||||||
return computeNextBaseFee(parentBaseFee, totalLimit, len(ts.Blocks())), nil
|
return ComputeNextBaseFee(parentBaseFee, totalLimit, len(ts.Blocks()), ts.Height()), nil
|
||||||
}
|
}
|
||||||
|
@ -27,7 +27,7 @@ func TestBaseFee(t *testing.T) {
|
|||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
test := test
|
test := test
|
||||||
t.Run(fmt.Sprintf("%v", test), func(t *testing.T) {
|
t.Run(fmt.Sprintf("%v", test), func(t *testing.T) {
|
||||||
output := computeNextBaseFee(types.NewInt(test.basefee), test.limitUsed, test.noOfBlocks)
|
output := ComputeNextBaseFee(types.NewInt(test.basefee), test.limitUsed, test.noOfBlocks, 0)
|
||||||
assert.Equal(t, fmt.Sprintf("%d", test.output), output.String())
|
assert.Equal(t, fmt.Sprintf("%d", test.output), output.String())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -5,8 +5,8 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
|
||||||
lru "github.com/hashicorp/golang-lru"
|
lru "github.com/hashicorp/golang-lru"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
)
|
)
|
||||||
|
@ -5,11 +5,11 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/lotus/chain/gen"
|
"github.com/filecoin-project/lotus/chain/gen"
|
||||||
"github.com/filecoin-project/lotus/chain/store"
|
"github.com/filecoin-project/lotus/chain/store"
|
||||||
"github.com/filecoin-project/lotus/chain/types/mock"
|
"github.com/filecoin-project/lotus/chain/types/mock"
|
||||||
"github.com/filecoin-project/lotus/lib/blockstore"
|
"github.com/filecoin-project/lotus/lib/blockstore"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
|
||||||
datastore "github.com/ipfs/go-datastore"
|
datastore "github.com/ipfs/go-datastore"
|
||||||
syncds "github.com/ipfs/go-datastore/sync"
|
syncds "github.com/ipfs/go-datastore/sync"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
@ -10,15 +10,14 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/crypto"
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
"github.com/minio/blake2b-simd"
|
"github.com/minio/blake2b-simd"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/chain/state"
|
|
||||||
"github.com/filecoin-project/lotus/chain/vm"
|
"github.com/filecoin-project/lotus/chain/vm"
|
||||||
"github.com/filecoin-project/lotus/journal"
|
"github.com/filecoin-project/lotus/journal"
|
||||||
bstore "github.com/filecoin-project/lotus/lib/blockstore"
|
bstore "github.com/filecoin-project/lotus/lib/blockstore"
|
||||||
@ -72,6 +71,20 @@ func init() {
|
|||||||
// ReorgNotifee represents a callback that gets called upon reorgs.
|
// ReorgNotifee represents a callback that gets called upon reorgs.
|
||||||
type ReorgNotifee func(rev, app []*types.TipSet) error
|
type ReorgNotifee func(rev, app []*types.TipSet) error
|
||||||
|
|
||||||
|
// Journal event types.
|
||||||
|
const (
|
||||||
|
evtTypeHeadChange = iota
|
||||||
|
)
|
||||||
|
|
||||||
|
type HeadChangeEvt struct {
|
||||||
|
From types.TipSetKey
|
||||||
|
FromHeight abi.ChainEpoch
|
||||||
|
To types.TipSetKey
|
||||||
|
ToHeight abi.ChainEpoch
|
||||||
|
RevertCount int
|
||||||
|
ApplyCount int
|
||||||
|
}
|
||||||
|
|
||||||
// ChainStore is the main point of access to chain data.
|
// ChainStore is the main point of access to chain data.
|
||||||
//
|
//
|
||||||
// Raw chain data is stored in the Blockstore, with relevant markers (genesis,
|
// Raw chain data is stored in the Blockstore, with relevant markers (genesis,
|
||||||
@ -103,6 +116,8 @@ type ChainStore struct {
|
|||||||
tsCache *lru.ARCCache
|
tsCache *lru.ARCCache
|
||||||
|
|
||||||
vmcalls vm.SyscallBuilder
|
vmcalls vm.SyscallBuilder
|
||||||
|
|
||||||
|
evtTypes [1]journal.EventType
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewChainStore(bs bstore.Blockstore, ds dstore.Batching, vmcalls vm.SyscallBuilder) *ChainStore {
|
func NewChainStore(bs bstore.Blockstore, ds dstore.Batching, vmcalls vm.SyscallBuilder) *ChainStore {
|
||||||
@ -118,6 +133,10 @@ func NewChainStore(bs bstore.Blockstore, ds dstore.Batching, vmcalls vm.SyscallB
|
|||||||
vmcalls: vmcalls,
|
vmcalls: vmcalls,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cs.evtTypes = [1]journal.EventType{
|
||||||
|
evtTypeHeadChange: journal.J.RegisterEventType("sync", "head_change"),
|
||||||
|
}
|
||||||
|
|
||||||
ci := NewChainIndex(cs.LoadTipSet)
|
ci := NewChainIndex(cs.LoadTipSet)
|
||||||
|
|
||||||
cs.cindex = ci
|
cs.cindex = ci
|
||||||
@ -344,12 +363,15 @@ func (cs *ChainStore) reorgWorker(ctx context.Context, initialNotifees []ReorgNo
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
journal.Add("sync", map[string]interface{}{
|
journal.J.RecordEvent(cs.evtTypes[evtTypeHeadChange], func() interface{} {
|
||||||
"op": "headChange",
|
return HeadChangeEvt{
|
||||||
"from": r.old.Key(),
|
From: r.old.Key(),
|
||||||
"to": r.new.Key(),
|
FromHeight: r.old.Height(),
|
||||||
"rev": len(revert),
|
To: r.new.Key(),
|
||||||
"apply": len(apply),
|
ToHeight: r.new.Height(),
|
||||||
|
RevertCount: len(revert),
|
||||||
|
ApplyCount: len(apply),
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
// reverse the apply array
|
// reverse the apply array
|
||||||
@ -471,7 +493,7 @@ func (cs *ChainStore) IsAncestorOf(a, b *types.TipSet) (bool, error) {
|
|||||||
|
|
||||||
cur := b
|
cur := b
|
||||||
for !a.Equals(cur) && cur.Height() > a.Height() {
|
for !a.Equals(cur) && cur.Height() > a.Height() {
|
||||||
next, err := cs.LoadTipSet(b.Parents())
|
next, err := cs.LoadTipSet(cur.Parents())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@ -744,32 +766,16 @@ type BlockMessages struct {
|
|||||||
func (cs *ChainStore) BlockMsgsForTipset(ts *types.TipSet) ([]BlockMessages, error) {
|
func (cs *ChainStore) BlockMsgsForTipset(ts *types.TipSet) ([]BlockMessages, error) {
|
||||||
applied := make(map[address.Address]uint64)
|
applied := make(map[address.Address]uint64)
|
||||||
|
|
||||||
cst := cbor.NewCborStore(cs.bs)
|
|
||||||
st, err := state.LoadStateTree(cst, ts.Blocks()[0].ParentStateRoot)
|
|
||||||
if err != nil {
|
|
||||||
return nil, xerrors.Errorf("failed to load state tree")
|
|
||||||
}
|
|
||||||
|
|
||||||
preloadAddr := func(a address.Address) error {
|
|
||||||
if _, ok := applied[a]; !ok {
|
|
||||||
act, err := st.GetActor(a)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
applied[a] = act.Nonce
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
selectMsg := func(m *types.Message) (bool, error) {
|
selectMsg := func(m *types.Message) (bool, error) {
|
||||||
if err := preloadAddr(m.From); err != nil {
|
// The first match for a sender is guaranteed to have correct nonce -- the block isn't valid otherwise
|
||||||
return false, err
|
if _, ok := applied[m.From]; !ok {
|
||||||
|
applied[m.From] = m.Nonce
|
||||||
}
|
}
|
||||||
|
|
||||||
if applied[m.From] != m.Nonce {
|
if applied[m.From] != m.Nonce {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
applied[m.From]++
|
applied[m.From]++
|
||||||
|
|
||||||
return true, nil
|
return true, nil
|
||||||
@ -1159,7 +1165,7 @@ func recurseLinks(bs bstore.Blockstore, walked *cid.Set, root cid.Cid, in []cid.
|
|||||||
return in, rerr
|
return in, rerr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRoots abi.ChainEpoch, w io.Writer) error {
|
func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRoots abi.ChainEpoch, skipOldMsgs bool, w io.Writer) error {
|
||||||
if ts == nil {
|
if ts == nil {
|
||||||
ts = cs.GetHeaviestTipSet()
|
ts = cs.GetHeaviestTipSet()
|
||||||
}
|
}
|
||||||
@ -1197,9 +1203,13 @@ func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRo
|
|||||||
return xerrors.Errorf("unmarshaling block header (cid=%s): %w", blk, err)
|
return xerrors.Errorf("unmarshaling block header (cid=%s): %w", blk, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cids, err := recurseLinks(cs.bs, walked, b.Messages, []cid.Cid{b.Messages})
|
var cids []cid.Cid
|
||||||
if err != nil {
|
if !skipOldMsgs || b.Height > ts.Height()-inclRecentRoots {
|
||||||
return xerrors.Errorf("recursing messages failed: %w", err)
|
mcids, err := recurseLinks(cs.bs, walked, b.Messages, []cid.Cid{b.Messages})
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("recursing messages failed: %w", err)
|
||||||
|
}
|
||||||
|
cids = mcids
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.Height > 0 {
|
if b.Height > 0 {
|
||||||
|
@ -7,12 +7,12 @@ import (
|
|||||||
|
|
||||||
datastore "github.com/ipfs/go-datastore"
|
datastore "github.com/ipfs/go-datastore"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/power"
|
"github.com/filecoin-project/specs-actors/actors/builtin/power"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
|
"github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
|
||||||
"github.com/filecoin-project/specs-actors/actors/crypto"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/gen"
|
"github.com/filecoin-project/lotus/chain/gen"
|
||||||
"github.com/filecoin-project/lotus/chain/store"
|
"github.com/filecoin-project/lotus/chain/store"
|
||||||
@ -96,7 +96,7 @@ func TestChainExportImport(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
buf := new(bytes.Buffer)
|
buf := new(bytes.Buffer)
|
||||||
if err := cg.ChainStore().Export(context.TODO(), last, 0, buf); err != nil {
|
if err := cg.ChainStore().Export(context.TODO(), last, 0, false, buf); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4,10 +4,10 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
|
big2 "github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/state"
|
"github.com/filecoin-project/lotus/chain/state"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
big2 "github.com/filecoin-project/specs-actors/actors/abi/big"
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/power"
|
"github.com/filecoin-project/specs-actors/actors/builtin/power"
|
||||||
cbor "github.com/ipfs/go-ipld-cbor"
|
cbor "github.com/ipfs/go-ipld-cbor"
|
||||||
|
@ -369,9 +369,8 @@ func (bv *BlockValidator) decodeAndCheckBlock(msg *pubsub.Message) (*types.Block
|
|||||||
func (bv *BlockValidator) isChainNearSynced() bool {
|
func (bv *BlockValidator) isChainNearSynced() bool {
|
||||||
ts := bv.chain.GetHeaviestTipSet()
|
ts := bv.chain.GetHeaviestTipSet()
|
||||||
timestamp := ts.MinTimestamp()
|
timestamp := ts.MinTimestamp()
|
||||||
now := build.Clock.Now().UnixNano()
|
timestampTime := time.Unix(int64(timestamp), 0)
|
||||||
cutoff := uint64(now) - uint64(6*time.Hour)
|
return build.Clock.Since(timestampTime) < 6*time.Hour
|
||||||
return timestamp > cutoff
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bv *BlockValidator) validateMsgMeta(ctx context.Context, msg *types.BlockMsg) error {
|
func (bv *BlockValidator) validateMsgMeta(ctx context.Context, msg *types.BlockMsg) error {
|
||||||
@ -555,6 +554,8 @@ func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubs
|
|||||||
fallthrough
|
fallthrough
|
||||||
case xerrors.Is(err, messagepool.ErrTooManyPendingMessages):
|
case xerrors.Is(err, messagepool.ErrTooManyPendingMessages):
|
||||||
fallthrough
|
fallthrough
|
||||||
|
case xerrors.Is(err, messagepool.ErrNonceGap):
|
||||||
|
fallthrough
|
||||||
case xerrors.Is(err, messagepool.ErrNonceTooLow):
|
case xerrors.Is(err, messagepool.ErrNonceTooLow):
|
||||||
return pubsub.ValidationIgnore
|
return pubsub.ValidationIgnore
|
||||||
default:
|
default:
|
||||||
|
156
chain/sync.go
156
chain/sync.go
@ -9,8 +9,13 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/runtime/proof"
|
||||||
|
|
||||||
"github.com/Gurpartap/async"
|
"github.com/Gurpartap/async"
|
||||||
"github.com/hashicorp/go-multierror"
|
"github.com/hashicorp/go-multierror"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
@ -25,18 +30,18 @@ import (
|
|||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/power"
|
"github.com/filecoin-project/specs-actors/actors/builtin/power"
|
||||||
"github.com/filecoin-project/specs-actors/actors/crypto"
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||||
blst "github.com/supranational/blst/bindings/go"
|
blst "github.com/supranational/blst/bindings/go"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/beacon"
|
"github.com/filecoin-project/lotus/chain/beacon"
|
||||||
"github.com/filecoin-project/lotus/chain/blocksync"
|
"github.com/filecoin-project/lotus/chain/exchange"
|
||||||
"github.com/filecoin-project/lotus/chain/gen"
|
"github.com/filecoin-project/lotus/chain/gen"
|
||||||
"github.com/filecoin-project/lotus/chain/state"
|
"github.com/filecoin-project/lotus/chain/state"
|
||||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||||
@ -50,10 +55,17 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Blocks that are more than MaxHeightDrift epochs above
|
// Blocks that are more than MaxHeightDrift epochs above
|
||||||
//the theoretical max height based on systime are quickly rejected
|
// the theoretical max height based on systime are quickly rejected
|
||||||
const MaxHeightDrift = 5
|
const MaxHeightDrift = 5
|
||||||
|
|
||||||
var defaultMessageFetchWindowSize = 200
|
var (
|
||||||
|
// LocalIncoming is the _local_ pubsub (unrelated to libp2p pubsub) topic
|
||||||
|
// where the Syncer publishes candidate chain heads to be synced.
|
||||||
|
LocalIncoming = "incoming"
|
||||||
|
|
||||||
|
log = logging.Logger("chain")
|
||||||
|
defaultMessageFetchWindowSize = 200
|
||||||
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
if s := os.Getenv("LOTUS_BSYNC_MSG_WINDOW"); s != "" {
|
if s := os.Getenv("LOTUS_BSYNC_MSG_WINDOW"); s != "" {
|
||||||
@ -66,10 +78,6 @@ func init() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var log = logging.Logger("chain")
|
|
||||||
|
|
||||||
var LocalIncoming = "incoming"
|
|
||||||
|
|
||||||
// Syncer is in charge of running the chain synchronization logic. As such, it
|
// Syncer is in charge of running the chain synchronization logic. As such, it
|
||||||
// is tasked with these functions, amongst others:
|
// is tasked with these functions, amongst others:
|
||||||
//
|
//
|
||||||
@ -87,7 +95,7 @@ var LocalIncoming = "incoming"
|
|||||||
// The Syncer does not run workers itself. It's mainly concerned with
|
// The Syncer does not run workers itself. It's mainly concerned with
|
||||||
// ensuring a consistent state of chain consensus. The reactive and network-
|
// ensuring a consistent state of chain consensus. The reactive and network-
|
||||||
// interfacing processes are part of other components, such as the SyncManager
|
// interfacing processes are part of other components, such as the SyncManager
|
||||||
// (which owns the sync scheduler and sync workers), BlockSync, the HELLO
|
// (which owns the sync scheduler and sync workers), ChainExchange, the HELLO
|
||||||
// protocol, and the gossipsub block propagation layer.
|
// protocol, and the gossipsub block propagation layer.
|
||||||
//
|
//
|
||||||
// {hint/concept} The fork-choice rule as it currently stands is: "pick the
|
// {hint/concept} The fork-choice rule as it currently stands is: "pick the
|
||||||
@ -98,7 +106,7 @@ type Syncer struct {
|
|||||||
store *store.ChainStore
|
store *store.ChainStore
|
||||||
|
|
||||||
// handle to the random beacon for verification
|
// handle to the random beacon for verification
|
||||||
beacon beacon.RandomBeacon
|
beacon beacon.Schedule
|
||||||
|
|
||||||
// the state manager handles making state queries
|
// the state manager handles making state queries
|
||||||
sm *stmgr.StateManager
|
sm *stmgr.StateManager
|
||||||
@ -110,11 +118,11 @@ type Syncer struct {
|
|||||||
bad *BadBlockCache
|
bad *BadBlockCache
|
||||||
|
|
||||||
// handle to the block sync service
|
// handle to the block sync service
|
||||||
Bsync *blocksync.BlockSync
|
Exchange exchange.Client
|
||||||
|
|
||||||
self peer.ID
|
self peer.ID
|
||||||
|
|
||||||
syncmgr *SyncManager
|
syncmgr SyncManager
|
||||||
|
|
||||||
connmgr connmgr.ConnManager
|
connmgr connmgr.ConnManager
|
||||||
|
|
||||||
@ -125,10 +133,20 @@ type Syncer struct {
|
|||||||
verifier ffiwrapper.Verifier
|
verifier ffiwrapper.Verifier
|
||||||
|
|
||||||
windowSize int
|
windowSize int
|
||||||
|
|
||||||
|
tickerCtxCancel context.CancelFunc
|
||||||
|
|
||||||
|
checkptLk sync.Mutex
|
||||||
|
|
||||||
|
checkpt types.TipSetKey
|
||||||
|
|
||||||
|
ds dtypes.MetadataDS
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type SyncManagerCtor func(syncFn SyncFunc) SyncManager
|
||||||
|
|
||||||
// NewSyncer creates a new Syncer object.
|
// NewSyncer creates a new Syncer object.
|
||||||
func NewSyncer(sm *stmgr.StateManager, bsync *blocksync.BlockSync, connmgr connmgr.ConnManager, self peer.ID, beacon beacon.RandomBeacon, verifier ffiwrapper.Verifier) (*Syncer, error) {
|
func NewSyncer(ds dtypes.MetadataDS, sm *stmgr.StateManager, exchange exchange.Client, syncMgrCtor SyncManagerCtor, connmgr connmgr.ConnManager, self peer.ID, beacon beacon.Schedule, verifier ffiwrapper.Verifier) (*Syncer, error) {
|
||||||
gen, err := sm.ChainStore().GetGenesis()
|
gen, err := sm.ChainStore().GetGenesis()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("getting genesis block: %w", err)
|
return nil, xerrors.Errorf("getting genesis block: %w", err)
|
||||||
@ -139,11 +157,18 @@ func NewSyncer(sm *stmgr.StateManager, bsync *blocksync.BlockSync, connmgr connm
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cp, err := loadCheckpoint(ds)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("error loading mpool config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
s := &Syncer{
|
s := &Syncer{
|
||||||
|
ds: ds,
|
||||||
|
checkpt: cp,
|
||||||
beacon: beacon,
|
beacon: beacon,
|
||||||
bad: NewBadBlockCache(),
|
bad: NewBadBlockCache(),
|
||||||
Genesis: gent,
|
Genesis: gent,
|
||||||
Bsync: bsync,
|
Exchange: exchange,
|
||||||
store: sm.ChainStore(),
|
store: sm.ChainStore(),
|
||||||
sm: sm,
|
sm: sm,
|
||||||
self: self,
|
self: self,
|
||||||
@ -161,16 +186,40 @@ func NewSyncer(sm *stmgr.StateManager, bsync *blocksync.BlockSync, connmgr connm
|
|||||||
log.Warn("*********************************************************************************************")
|
log.Warn("*********************************************************************************************")
|
||||||
}
|
}
|
||||||
|
|
||||||
s.syncmgr = NewSyncManager(s.Sync)
|
s.syncmgr = syncMgrCtor(s.Sync)
|
||||||
return s, nil
|
return s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (syncer *Syncer) Start() {
|
func (syncer *Syncer) Start() {
|
||||||
|
tickerCtx, tickerCtxCancel := context.WithCancel(context.Background())
|
||||||
syncer.syncmgr.Start()
|
syncer.syncmgr.Start()
|
||||||
|
|
||||||
|
syncer.tickerCtxCancel = tickerCtxCancel
|
||||||
|
|
||||||
|
go syncer.runMetricsTricker(tickerCtx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (syncer *Syncer) runMetricsTricker(tickerCtx context.Context) {
|
||||||
|
genesisTime := time.Unix(int64(syncer.Genesis.MinTimestamp()), 0)
|
||||||
|
ticker := build.Clock.Ticker(time.Duration(build.BlockDelaySecs) * time.Second)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
sinceGenesis := build.Clock.Now().Sub(genesisTime)
|
||||||
|
expectedHeight := int64(sinceGenesis.Seconds()) / int64(build.BlockDelaySecs)
|
||||||
|
|
||||||
|
stats.Record(tickerCtx, metrics.ChainNodeHeightExpected.M(expectedHeight))
|
||||||
|
case <-tickerCtx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (syncer *Syncer) Stop() {
|
func (syncer *Syncer) Stop() {
|
||||||
syncer.syncmgr.Stop()
|
syncer.syncmgr.Stop()
|
||||||
|
syncer.tickerCtxCancel()
|
||||||
}
|
}
|
||||||
|
|
||||||
// InformNewHead informs the syncer about a new potential tipset
|
// InformNewHead informs the syncer about a new potential tipset
|
||||||
@ -220,7 +269,7 @@ func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
syncer.Bsync.AddPeer(from)
|
syncer.Exchange.AddPeer(from)
|
||||||
|
|
||||||
bestPweight := syncer.store.GetHeaviestTipSet().ParentWeight()
|
bestPweight := syncer.store.GetHeaviestTipSet().ParentWeight()
|
||||||
targetWeight := fts.TipSet().ParentWeight()
|
targetWeight := fts.TipSet().ParentWeight()
|
||||||
@ -451,7 +500,7 @@ func computeMsgMeta(bs cbor.IpldStore, bmsgCids, smsgCids []cid.Cid) (cid.Cid, e
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FetchTipSet tries to load the provided tipset from the store, and falls back
|
// FetchTipSet tries to load the provided tipset from the store, and falls back
|
||||||
// to the network (BlockSync) by querying the supplied peer if not found
|
// to the network (client) by querying the supplied peer if not found
|
||||||
// locally.
|
// locally.
|
||||||
//
|
//
|
||||||
// {hint/usage} This is used from the HELLO protocol, to fetch the greeting
|
// {hint/usage} This is used from the HELLO protocol, to fetch the greeting
|
||||||
@ -462,7 +511,7 @@ func (syncer *Syncer) FetchTipSet(ctx context.Context, p peer.ID, tsk types.TipS
|
|||||||
}
|
}
|
||||||
|
|
||||||
// fall back to the network.
|
// fall back to the network.
|
||||||
return syncer.Bsync.GetFullTipSet(ctx, p, tsk)
|
return syncer.Exchange.GetFullTipSet(ctx, p, tsk)
|
||||||
}
|
}
|
||||||
|
|
||||||
// tryLoadFullTipSet queries the tipset in the ChainStore, and returns a full
|
// tryLoadFullTipSet queries the tipset in the ChainStore, and returns a full
|
||||||
@ -604,7 +653,7 @@ func (syncer *Syncer) minerIsValid(ctx context.Context, maddr address.Address, b
|
|||||||
}
|
}
|
||||||
|
|
||||||
var claim power.Claim
|
var claim power.Claim
|
||||||
exist, err := cm.Get(adt.AddrKey(maddr), &claim)
|
exist, err := cm.Get(abi.AddrKey(maddr), &claim)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -835,7 +884,7 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) (er
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := beacon.ValidateBlockValues(syncer.beacon, h, *prevBeacon); err != nil {
|
if err := beacon.ValidateBlockValues(syncer.beacon, h, baseTs.Height(), *prevBeacon); err != nil {
|
||||||
return xerrors.Errorf("failed to validate blocks random beacon values: %w", err)
|
return xerrors.Errorf("failed to validate blocks random beacon values: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -847,10 +896,12 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) (er
|
|||||||
return xerrors.Errorf("failed to marshal miner address to cbor: %w", err)
|
return xerrors.Errorf("failed to marshal miner address to cbor: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
beaconBase := *prevBeacon
|
if h.Height > build.UpgradeSmokeHeight {
|
||||||
if len(h.BeaconEntries) == 0 {
|
|
||||||
buf.Write(baseTs.MinTicket().VRFProof)
|
buf.Write(baseTs.MinTicket().VRFProof)
|
||||||
} else {
|
}
|
||||||
|
|
||||||
|
beaconBase := *prevBeacon
|
||||||
|
if len(h.BeaconEntries) != 0 {
|
||||||
beaconBase = h.BeaconEntries[len(h.BeaconEntries)-1]
|
beaconBase = h.BeaconEntries[len(h.BeaconEntries)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -940,7 +991,7 @@ func (syncer *Syncer) VerifyWinningPoStProof(ctx context.Context, h *types.Block
|
|||||||
|
|
||||||
rand, err := store.DrawRandomness(rbase.Data, crypto.DomainSeparationTag_WinningPoStChallengeSeed, h.Height, buf.Bytes())
|
rand, err := store.DrawRandomness(rbase.Data, crypto.DomainSeparationTag_WinningPoStChallengeSeed, h.Height, buf.Bytes())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("failed to get randomness for verifying winningPost proof: %w", err)
|
return xerrors.Errorf("failed to get randomness for verifying winning post proof: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
mid, err := address.IDFromAddress(h.Miner)
|
mid, err := address.IDFromAddress(h.Miner)
|
||||||
@ -953,7 +1004,7 @@ func (syncer *Syncer) VerifyWinningPoStProof(ctx context.Context, h *types.Block
|
|||||||
return xerrors.Errorf("getting winning post sector set: %w", err)
|
return xerrors.Errorf("getting winning post sector set: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ok, err := ffiwrapper.ProofVerifier.VerifyWinningPoSt(ctx, abi.WinningPoStVerifyInfo{
|
ok, err := ffiwrapper.ProofVerifier.VerifyWinningPoSt(ctx, proof.WinningPoStVerifyInfo{
|
||||||
Randomness: rand,
|
Randomness: rand,
|
||||||
Proofs: h.WinPoStProof,
|
Proofs: h.WinPoStProof,
|
||||||
ChallengedSectors: sectors,
|
ChallengedSectors: sectors,
|
||||||
@ -1164,7 +1215,7 @@ func extractSyncState(ctx context.Context) *SyncerState {
|
|||||||
// total equality of the BeaconEntries in each block.
|
// total equality of the BeaconEntries in each block.
|
||||||
// 3. Traverse the chain backwards, for each tipset:
|
// 3. Traverse the chain backwards, for each tipset:
|
||||||
// 3a. Load it from the chainstore; if found, it move on to its parent.
|
// 3a. Load it from the chainstore; if found, it move on to its parent.
|
||||||
// 3b. Query our peers via BlockSync in batches, requesting up to a
|
// 3b. Query our peers via client in batches, requesting up to a
|
||||||
// maximum of 500 tipsets every time.
|
// maximum of 500 tipsets every time.
|
||||||
//
|
//
|
||||||
// Once we've concluded, if we find a mismatching tipset at the height where the
|
// Once we've concluded, if we find a mismatching tipset at the height where the
|
||||||
@ -1265,7 +1316,7 @@ loop:
|
|||||||
if gap := int(blockSet[len(blockSet)-1].Height() - untilHeight); gap < window {
|
if gap := int(blockSet[len(blockSet)-1].Height() - untilHeight); gap < window {
|
||||||
window = gap
|
window = gap
|
||||||
}
|
}
|
||||||
blks, err := syncer.Bsync.GetBlocks(ctx, at, window)
|
blks, err := syncer.Exchange.GetBlocks(ctx, at, window)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Most likely our peers aren't fully synced yet, but forwarded
|
// Most likely our peers aren't fully synced yet, but forwarded
|
||||||
// new block message (ideally we'd find better peers)
|
// new block message (ideally we'd find better peers)
|
||||||
@ -1283,7 +1334,7 @@ loop:
|
|||||||
// have. Since we fetch from the head backwards our reassembled chain
|
// have. Since we fetch from the head backwards our reassembled chain
|
||||||
// is sorted in reverse here: we have a child -> parent order, our last
|
// is sorted in reverse here: we have a child -> parent order, our last
|
||||||
// tipset then should be child of the first tipset retrieved.
|
// tipset then should be child of the first tipset retrieved.
|
||||||
// FIXME: The reassembly logic should be part of the `BlockSync`
|
// FIXME: The reassembly logic should be part of the `client`
|
||||||
// service, the consumer should not be concerned with the
|
// service, the consumer should not be concerned with the
|
||||||
// `MaxRequestLength` limitation, it should just be able to request
|
// `MaxRequestLength` limitation, it should just be able to request
|
||||||
// an segment of arbitrary length. The same burden is put on
|
// an segment of arbitrary length. The same burden is put on
|
||||||
@ -1333,7 +1384,7 @@ loop:
|
|||||||
log.Warnf("(fork detected) synced header chain (%s - %d) does not link to our best block (%s - %d)", incoming.Cids(), incoming.Height(), known.Cids(), known.Height())
|
log.Warnf("(fork detected) synced header chain (%s - %d) does not link to our best block (%s - %d)", incoming.Cids(), incoming.Height(), known.Cids(), known.Height())
|
||||||
fork, err := syncer.syncFork(ctx, base, known)
|
fork, err := syncer.syncFork(ctx, base, known)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if xerrors.Is(err, ErrForkTooLong) {
|
if xerrors.Is(err, ErrForkTooLong) || xerrors.Is(err, ErrForkCheckpoint) {
|
||||||
// TODO: we're marking this block bad in the same way that we mark invalid blocks bad. Maybe distinguish?
|
// TODO: we're marking this block bad in the same way that we mark invalid blocks bad. Maybe distinguish?
|
||||||
log.Warn("adding forked chain to our bad tipset cache")
|
log.Warn("adding forked chain to our bad tipset cache")
|
||||||
for _, b := range incoming.Blocks() {
|
for _, b := range incoming.Blocks() {
|
||||||
@ -1349,15 +1400,24 @@ loop:
|
|||||||
}
|
}
|
||||||
|
|
||||||
var ErrForkTooLong = fmt.Errorf("fork longer than threshold")
|
var ErrForkTooLong = fmt.Errorf("fork longer than threshold")
|
||||||
|
var ErrForkCheckpoint = fmt.Errorf("fork would require us to diverge from checkpointed block")
|
||||||
|
|
||||||
// syncFork tries to obtain the chain fragment that links a fork into a common
|
// syncFork tries to obtain the chain fragment that links a fork into a common
|
||||||
// ancestor in our view of the chain.
|
// ancestor in our view of the chain.
|
||||||
//
|
//
|
||||||
// If the fork is too long (build.ForkLengthThreshold), we add the entire subchain to the
|
// If the fork is too long (build.ForkLengthThreshold), or would cause us to diverge from the checkpoint (ErrForkCheckpoint),
|
||||||
// denylist. Else, we find the common ancestor, and add the missing chain
|
// we add the entire subchain to the denylist. Else, we find the common ancestor, and add the missing chain
|
||||||
// fragment until the fork point to the returned []TipSet.
|
// fragment until the fork point to the returned []TipSet.
|
||||||
func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, known *types.TipSet) ([]*types.TipSet, error) {
|
func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, known *types.TipSet) ([]*types.TipSet, error) {
|
||||||
tips, err := syncer.Bsync.GetBlocks(ctx, incoming.Parents(), int(build.ForkLengthThreshold))
|
|
||||||
|
chkpt := syncer.GetCheckpoint()
|
||||||
|
if known.Key() == chkpt {
|
||||||
|
return nil, ErrForkCheckpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Does this mean we always ask for ForkLengthThreshold blocks from the network, even if we just need, like, 2?
|
||||||
|
// Would it not be better to ask in smaller chunks, given that an ~ForkLengthThreshold is very rare?
|
||||||
|
tips, err := syncer.Exchange.GetBlocks(ctx, incoming.Parents(), int(build.ForkLengthThreshold))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -1382,12 +1442,18 @@ func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, know
|
|||||||
if nts.Height() < tips[cur].Height() {
|
if nts.Height() < tips[cur].Height() {
|
||||||
cur++
|
cur++
|
||||||
} else {
|
} else {
|
||||||
|
// We will be forking away from nts, check that it isn't checkpointed
|
||||||
|
if nts.Key() == chkpt {
|
||||||
|
return nil, ErrForkCheckpoint
|
||||||
|
}
|
||||||
|
|
||||||
nts, err = syncer.store.LoadTipSet(nts.Parents())
|
nts, err = syncer.store.LoadTipSet(nts.Parents())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("loading next local tipset: %w", err)
|
return nil, xerrors.Errorf("loading next local tipset: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, ErrForkTooLong
|
return nil, ErrForkTooLong
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1411,6 +1477,7 @@ func (syncer *Syncer) syncMessagesAndCheckState(ctx context.Context, headers []*
|
|||||||
|
|
||||||
// fills out each of the given tipsets with messages and calls the callback with it
|
// fills out each of the given tipsets with messages and calls the callback with it
|
||||||
func (syncer *Syncer) iterFullTipsets(ctx context.Context, headers []*types.TipSet, cb func(context.Context, *store.FullTipSet) error) error {
|
func (syncer *Syncer) iterFullTipsets(ctx context.Context, headers []*types.TipSet, cb func(context.Context, *store.FullTipSet) error) error {
|
||||||
|
ss := extractSyncState(ctx)
|
||||||
ctx, span := trace.StartSpan(ctx, "iterFullTipsets")
|
ctx, span := trace.StartSpan(ctx, "iterFullTipsets")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
@ -1438,12 +1505,13 @@ mainLoop:
|
|||||||
|
|
||||||
nextI := (i + 1) - batchSize // want to fetch batchSize values, 'i' points to last one we want to fetch, so its 'inclusive' of our request, thus we need to add one to our request start index
|
nextI := (i + 1) - batchSize // want to fetch batchSize values, 'i' points to last one we want to fetch, so its 'inclusive' of our request, thus we need to add one to our request start index
|
||||||
|
|
||||||
var bstout []*blocksync.CompactedMessages
|
ss.SetStage(api.StageFetchingMessages)
|
||||||
|
var bstout []*exchange.CompactedMessages
|
||||||
for len(bstout) < batchSize {
|
for len(bstout) < batchSize {
|
||||||
next := headers[nextI]
|
next := headers[nextI]
|
||||||
|
|
||||||
nreq := batchSize - len(bstout)
|
nreq := batchSize - len(bstout)
|
||||||
bstips, err := syncer.Bsync.GetChainMessages(ctx, next, uint64(nreq))
|
bstips, err := syncer.Exchange.GetChainMessages(ctx, next, uint64(nreq))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// TODO check errors for temporary nature
|
// TODO check errors for temporary nature
|
||||||
if windowSize > 1 {
|
if windowSize > 1 {
|
||||||
@ -1457,6 +1525,7 @@ mainLoop:
|
|||||||
bstout = append(bstout, bstips...)
|
bstout = append(bstout, bstips...)
|
||||||
nextI += len(bstips)
|
nextI += len(bstips)
|
||||||
}
|
}
|
||||||
|
ss.SetStage(api.StageMessages)
|
||||||
|
|
||||||
for bsi := 0; bsi < len(bstout); bsi++ {
|
for bsi := 0; bsi < len(bstout); bsi++ {
|
||||||
// temp storage so we don't persist data we dont want to
|
// temp storage so we don't persist data we dont want to
|
||||||
@ -1488,8 +1557,8 @@ mainLoop:
|
|||||||
|
|
||||||
if i >= windowSize {
|
if i >= windowSize {
|
||||||
newWindowSize := windowSize + 10
|
newWindowSize := windowSize + 10
|
||||||
if newWindowSize > int(blocksync.MaxRequestLength) {
|
if newWindowSize > int(exchange.MaxRequestLength) {
|
||||||
newWindowSize = int(blocksync.MaxRequestLength)
|
newWindowSize = int(exchange.MaxRequestLength)
|
||||||
}
|
}
|
||||||
if newWindowSize > windowSize {
|
if newWindowSize > windowSize {
|
||||||
windowSize = newWindowSize
|
windowSize = newWindowSize
|
||||||
@ -1506,7 +1575,7 @@ mainLoop:
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func persistMessages(bs bstore.Blockstore, bst *blocksync.CompactedMessages) error {
|
func persistMessages(bs bstore.Blockstore, bst *exchange.CompactedMessages) error {
|
||||||
for _, m := range bst.Bls {
|
for _, m := range bst.Bls {
|
||||||
//log.Infof("putting BLS message: %s", m.Cid())
|
//log.Infof("putting BLS message: %s", m.Cid())
|
||||||
if _, err := store.PutMessage(bs, m); err != nil {
|
if _, err := store.PutMessage(bs, m); err != nil {
|
||||||
@ -1601,11 +1670,7 @@ func VerifyElectionPoStVRF(ctx context.Context, worker address.Address, rand []b
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (syncer *Syncer) State() []SyncerState {
|
func (syncer *Syncer) State() []SyncerState {
|
||||||
var out []SyncerState
|
return syncer.syncmgr.State()
|
||||||
for _, ss := range syncer.syncmgr.syncStates {
|
|
||||||
out = append(out, ss.Snapshot())
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarkBad manually adds a block to the "bad blocks" cache.
|
// MarkBad manually adds a block to the "bad blocks" cache.
|
||||||
@ -1613,6 +1678,11 @@ func (syncer *Syncer) MarkBad(blk cid.Cid) {
|
|||||||
syncer.bad.Add(blk, NewBadBlockReason([]cid.Cid{blk}, "manually marked bad"))
|
syncer.bad.Add(blk, NewBadBlockReason([]cid.Cid{blk}, "manually marked bad"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmarkBad manually adds a block to the "bad blocks" cache.
|
||||||
|
func (syncer *Syncer) UnmarkBad(blk cid.Cid) {
|
||||||
|
syncer.bad.Remove(blk)
|
||||||
|
}
|
||||||
|
|
||||||
func (syncer *Syncer) CheckBadBlockCache(blk cid.Cid) (string, bool) {
|
func (syncer *Syncer) CheckBadBlockCache(blk cid.Cid) (string, bool) {
|
||||||
bbr, ok := syncer.bad.Has(blk)
|
bbr, ok := syncer.bad.Has(blk)
|
||||||
return bbr.String(), ok
|
return bbr.String(), ok
|
||||||
|
@ -20,7 +20,28 @@ const (
|
|||||||
|
|
||||||
type SyncFunc func(context.Context, *types.TipSet) error
|
type SyncFunc func(context.Context, *types.TipSet) error
|
||||||
|
|
||||||
type SyncManager struct {
|
// SyncManager manages the chain synchronization process, both at bootstrap time
|
||||||
|
// and during ongoing operation.
|
||||||
|
//
|
||||||
|
// It receives candidate chain heads in the form of tipsets from peers,
|
||||||
|
// and schedules them onto sync workers, deduplicating processing for
|
||||||
|
// already-active syncs.
|
||||||
|
type SyncManager interface {
|
||||||
|
// Start starts the SyncManager.
|
||||||
|
Start()
|
||||||
|
|
||||||
|
// Stop stops the SyncManager.
|
||||||
|
Stop()
|
||||||
|
|
||||||
|
// SetPeerHead informs the SyncManager that the supplied peer reported the
|
||||||
|
// supplied tipset.
|
||||||
|
SetPeerHead(ctx context.Context, p peer.ID, ts *types.TipSet)
|
||||||
|
|
||||||
|
// State retrieves the state of the sync workers.
|
||||||
|
State() []SyncerState
|
||||||
|
}
|
||||||
|
|
||||||
|
type syncManager struct {
|
||||||
lk sync.Mutex
|
lk sync.Mutex
|
||||||
peerHeads map[peer.ID]*types.TipSet
|
peerHeads map[peer.ID]*types.TipSet
|
||||||
|
|
||||||
@ -48,6 +69,8 @@ type SyncManager struct {
|
|||||||
workerChan chan *types.TipSet
|
workerChan chan *types.TipSet
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var _ SyncManager = (*syncManager)(nil)
|
||||||
|
|
||||||
type syncResult struct {
|
type syncResult struct {
|
||||||
ts *types.TipSet
|
ts *types.TipSet
|
||||||
success bool
|
success bool
|
||||||
@ -55,8 +78,8 @@ type syncResult struct {
|
|||||||
|
|
||||||
const syncWorkerCount = 3
|
const syncWorkerCount = 3
|
||||||
|
|
||||||
func NewSyncManager(sync SyncFunc) *SyncManager {
|
func NewSyncManager(sync SyncFunc) SyncManager {
|
||||||
return &SyncManager{
|
return &syncManager{
|
||||||
bspThresh: 1,
|
bspThresh: 1,
|
||||||
peerHeads: make(map[peer.ID]*types.TipSet),
|
peerHeads: make(map[peer.ID]*types.TipSet),
|
||||||
syncTargets: make(chan *types.TipSet),
|
syncTargets: make(chan *types.TipSet),
|
||||||
@ -69,18 +92,18 @@ func NewSyncManager(sync SyncFunc) *SyncManager {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *SyncManager) Start() {
|
func (sm *syncManager) Start() {
|
||||||
go sm.syncScheduler()
|
go sm.syncScheduler()
|
||||||
for i := 0; i < syncWorkerCount; i++ {
|
for i := 0; i < syncWorkerCount; i++ {
|
||||||
go sm.syncWorker(i)
|
go sm.syncWorker(i)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *SyncManager) Stop() {
|
func (sm *syncManager) Stop() {
|
||||||
close(sm.stop)
|
close(sm.stop)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *SyncManager) SetPeerHead(ctx context.Context, p peer.ID, ts *types.TipSet) {
|
func (sm *syncManager) SetPeerHead(ctx context.Context, p peer.ID, ts *types.TipSet) {
|
||||||
sm.lk.Lock()
|
sm.lk.Lock()
|
||||||
defer sm.lk.Unlock()
|
defer sm.lk.Unlock()
|
||||||
sm.peerHeads[p] = ts
|
sm.peerHeads[p] = ts
|
||||||
@ -105,6 +128,14 @@ func (sm *SyncManager) SetPeerHead(ctx context.Context, p peer.ID, ts *types.Tip
|
|||||||
sm.incomingTipSets <- ts
|
sm.incomingTipSets <- ts
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (sm *syncManager) State() []SyncerState {
|
||||||
|
ret := make([]SyncerState, 0, len(sm.syncStates))
|
||||||
|
for _, s := range sm.syncStates {
|
||||||
|
ret = append(ret, s.Snapshot())
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
type syncBucketSet struct {
|
type syncBucketSet struct {
|
||||||
buckets []*syncTargetBucket
|
buckets []*syncTargetBucket
|
||||||
}
|
}
|
||||||
@ -234,7 +265,7 @@ func (stb *syncTargetBucket) heaviestTipSet() *types.TipSet {
|
|||||||
return best
|
return best
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *SyncManager) selectSyncTarget() (*types.TipSet, error) {
|
func (sm *syncManager) selectSyncTarget() (*types.TipSet, error) {
|
||||||
var buckets syncBucketSet
|
var buckets syncBucketSet
|
||||||
|
|
||||||
var peerHeads []*types.TipSet
|
var peerHeads []*types.TipSet
|
||||||
@ -258,7 +289,7 @@ func (sm *SyncManager) selectSyncTarget() (*types.TipSet, error) {
|
|||||||
return buckets.Heaviest(), nil
|
return buckets.Heaviest(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *SyncManager) syncScheduler() {
|
func (sm *syncManager) syncScheduler() {
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
@ -280,7 +311,7 @@ func (sm *SyncManager) syncScheduler() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *SyncManager) scheduleIncoming(ts *types.TipSet) {
|
func (sm *syncManager) scheduleIncoming(ts *types.TipSet) {
|
||||||
log.Debug("scheduling incoming tipset sync: ", ts.Cids())
|
log.Debug("scheduling incoming tipset sync: ", ts.Cids())
|
||||||
if sm.getBootstrapState() == BSStateSelected {
|
if sm.getBootstrapState() == BSStateSelected {
|
||||||
sm.setBootstrapState(BSStateScheduled)
|
sm.setBootstrapState(BSStateScheduled)
|
||||||
@ -328,10 +359,11 @@ func (sm *SyncManager) scheduleIncoming(ts *types.TipSet) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *SyncManager) scheduleProcessResult(res *syncResult) {
|
func (sm *syncManager) scheduleProcessResult(res *syncResult) {
|
||||||
if res.success && sm.getBootstrapState() != BSStateComplete {
|
if res.success && sm.getBootstrapState() != BSStateComplete {
|
||||||
sm.setBootstrapState(BSStateComplete)
|
sm.setBootstrapState(BSStateComplete)
|
||||||
}
|
}
|
||||||
|
|
||||||
delete(sm.activeSyncs, res.ts.Key())
|
delete(sm.activeSyncs, res.ts.Key())
|
||||||
relbucket := sm.activeSyncTips.PopRelated(res.ts)
|
relbucket := sm.activeSyncTips.PopRelated(res.ts)
|
||||||
if relbucket != nil {
|
if relbucket != nil {
|
||||||
@ -360,7 +392,7 @@ func (sm *SyncManager) scheduleProcessResult(res *syncResult) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *SyncManager) scheduleWorkSent() {
|
func (sm *syncManager) scheduleWorkSent() {
|
||||||
hts := sm.nextSyncTarget.heaviestTipSet()
|
hts := sm.nextSyncTarget.heaviestTipSet()
|
||||||
sm.activeSyncs[hts.Key()] = hts
|
sm.activeSyncs[hts.Key()] = hts
|
||||||
|
|
||||||
@ -372,7 +404,7 @@ func (sm *SyncManager) scheduleWorkSent() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *SyncManager) syncWorker(id int) {
|
func (sm *syncManager) syncWorker(id int) {
|
||||||
ss := &SyncerState{}
|
ss := &SyncerState{}
|
||||||
sm.syncStates[id] = ss
|
sm.syncStates[id] = ss
|
||||||
for {
|
for {
|
||||||
@ -397,7 +429,7 @@ func (sm *SyncManager) syncWorker(id int) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *SyncManager) syncedPeerCount() int {
|
func (sm *syncManager) syncedPeerCount() int {
|
||||||
var count int
|
var count int
|
||||||
for _, ts := range sm.peerHeads {
|
for _, ts := range sm.peerHeads {
|
||||||
if ts.Height() > 0 {
|
if ts.Height() > 0 {
|
||||||
@ -407,19 +439,19 @@ func (sm *SyncManager) syncedPeerCount() int {
|
|||||||
return count
|
return count
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *SyncManager) getBootstrapState() int {
|
func (sm *syncManager) getBootstrapState() int {
|
||||||
sm.bssLk.Lock()
|
sm.bssLk.Lock()
|
||||||
defer sm.bssLk.Unlock()
|
defer sm.bssLk.Unlock()
|
||||||
return sm.bootstrapState
|
return sm.bootstrapState
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *SyncManager) setBootstrapState(v int) {
|
func (sm *syncManager) setBootstrapState(v int) {
|
||||||
sm.bssLk.Lock()
|
sm.bssLk.Lock()
|
||||||
defer sm.bssLk.Unlock()
|
defer sm.bssLk.Unlock()
|
||||||
sm.bootstrapState = v
|
sm.bootstrapState = v
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *SyncManager) IsBootstrapped() bool {
|
func (sm *syncManager) IsBootstrapped() bool {
|
||||||
sm.bssLk.Lock()
|
sm.bssLk.Lock()
|
||||||
defer sm.bssLk.Unlock()
|
defer sm.bssLk.Unlock()
|
||||||
return sm.bootstrapState == BSStateComplete
|
return sm.bootstrapState == BSStateComplete
|
||||||
|
@ -17,7 +17,7 @@ type syncOp struct {
|
|||||||
done func()
|
done func()
|
||||||
}
|
}
|
||||||
|
|
||||||
func runSyncMgrTest(t *testing.T, tname string, thresh int, tf func(*testing.T, *SyncManager, chan *syncOp)) {
|
func runSyncMgrTest(t *testing.T, tname string, thresh int, tf func(*testing.T, *syncManager, chan *syncOp)) {
|
||||||
syncTargets := make(chan *syncOp)
|
syncTargets := make(chan *syncOp)
|
||||||
sm := NewSyncManager(func(ctx context.Context, ts *types.TipSet) error {
|
sm := NewSyncManager(func(ctx context.Context, ts *types.TipSet) error {
|
||||||
ch := make(chan struct{})
|
ch := make(chan struct{})
|
||||||
@ -27,7 +27,7 @@ func runSyncMgrTest(t *testing.T, tname string, thresh int, tf func(*testing.T,
|
|||||||
}
|
}
|
||||||
<-ch
|
<-ch
|
||||||
return nil
|
return nil
|
||||||
})
|
}).(*syncManager)
|
||||||
sm.bspThresh = thresh
|
sm.bspThresh = thresh
|
||||||
|
|
||||||
sm.Start()
|
sm.Start()
|
||||||
@ -77,12 +77,12 @@ func TestSyncManager(t *testing.T) {
|
|||||||
c3 := mock.TipSet(mock.MkBlock(b, 3, 5))
|
c3 := mock.TipSet(mock.MkBlock(b, 3, 5))
|
||||||
d := mock.TipSet(mock.MkBlock(c1, 4, 5))
|
d := mock.TipSet(mock.MkBlock(c1, 4, 5))
|
||||||
|
|
||||||
runSyncMgrTest(t, "testBootstrap", 1, func(t *testing.T, sm *SyncManager, stc chan *syncOp) {
|
runSyncMgrTest(t, "testBootstrap", 1, func(t *testing.T, sm *syncManager, stc chan *syncOp) {
|
||||||
sm.SetPeerHead(ctx, "peer1", c1)
|
sm.SetPeerHead(ctx, "peer1", c1)
|
||||||
assertGetSyncOp(t, stc, c1)
|
assertGetSyncOp(t, stc, c1)
|
||||||
})
|
})
|
||||||
|
|
||||||
runSyncMgrTest(t, "testBootstrap", 2, func(t *testing.T, sm *SyncManager, stc chan *syncOp) {
|
runSyncMgrTest(t, "testBootstrap", 2, func(t *testing.T, sm *syncManager, stc chan *syncOp) {
|
||||||
sm.SetPeerHead(ctx, "peer1", c1)
|
sm.SetPeerHead(ctx, "peer1", c1)
|
||||||
assertNoOp(t, stc)
|
assertNoOp(t, stc)
|
||||||
|
|
||||||
@ -90,7 +90,7 @@ func TestSyncManager(t *testing.T) {
|
|||||||
assertGetSyncOp(t, stc, c1)
|
assertGetSyncOp(t, stc, c1)
|
||||||
})
|
})
|
||||||
|
|
||||||
runSyncMgrTest(t, "testSyncAfterBootstrap", 1, func(t *testing.T, sm *SyncManager, stc chan *syncOp) {
|
runSyncMgrTest(t, "testSyncAfterBootstrap", 1, func(t *testing.T, sm *syncManager, stc chan *syncOp) {
|
||||||
sm.SetPeerHead(ctx, "peer1", b)
|
sm.SetPeerHead(ctx, "peer1", b)
|
||||||
assertGetSyncOp(t, stc, b)
|
assertGetSyncOp(t, stc, b)
|
||||||
|
|
||||||
@ -101,7 +101,7 @@ func TestSyncManager(t *testing.T) {
|
|||||||
assertGetSyncOp(t, stc, c2)
|
assertGetSyncOp(t, stc, c2)
|
||||||
})
|
})
|
||||||
|
|
||||||
runSyncMgrTest(t, "testCoalescing", 1, func(t *testing.T, sm *SyncManager, stc chan *syncOp) {
|
runSyncMgrTest(t, "testCoalescing", 1, func(t *testing.T, sm *syncManager, stc chan *syncOp) {
|
||||||
sm.SetPeerHead(ctx, "peer1", a)
|
sm.SetPeerHead(ctx, "peer1", a)
|
||||||
assertGetSyncOp(t, stc, a)
|
assertGetSyncOp(t, stc, a)
|
||||||
|
|
||||||
@ -122,7 +122,7 @@ func TestSyncManager(t *testing.T) {
|
|||||||
assertGetSyncOp(t, stc, d)
|
assertGetSyncOp(t, stc, d)
|
||||||
})
|
})
|
||||||
|
|
||||||
runSyncMgrTest(t, "testSyncIncomingTipset", 1, func(t *testing.T, sm *SyncManager, stc chan *syncOp) {
|
runSyncMgrTest(t, "testSyncIncomingTipset", 1, func(t *testing.T, sm *syncManager, stc chan *syncOp) {
|
||||||
sm.SetPeerHead(ctx, "peer1", a)
|
sm.SetPeerHead(ctx, "peer1", a)
|
||||||
assertGetSyncOp(t, stc, a)
|
assertGetSyncOp(t, stc, a)
|
||||||
|
|
||||||
|
@ -7,6 +7,8 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/runtime/proof"
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
|
|
||||||
ds "github.com/ipfs/go-datastore"
|
ds "github.com/ipfs/go-datastore"
|
||||||
@ -16,8 +18,8 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/power"
|
"github.com/filecoin-project/specs-actors/actors/builtin/power"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
|
"github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
|
||||||
@ -331,6 +333,36 @@ func (tu *syncTestUtil) compareSourceState(with int) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (tu *syncTestUtil) assertBad(node int, ts *types.TipSet) {
|
||||||
|
for _, blk := range ts.Cids() {
|
||||||
|
rsn, err := tu.nds[node].SyncCheckBad(context.TODO(), blk)
|
||||||
|
require.NoError(tu.t, err)
|
||||||
|
require.True(tu.t, len(rsn) != 0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tu *syncTestUtil) getHead(node int) *types.TipSet {
|
||||||
|
ts, err := tu.nds[node].ChainHead(context.TODO())
|
||||||
|
require.NoError(tu.t, err)
|
||||||
|
return ts
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tu *syncTestUtil) checkpointTs(node int, tsk types.TipSetKey) {
|
||||||
|
require.NoError(tu.t, tu.nds[node].SyncCheckpoint(context.TODO(), tsk))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tu *syncTestUtil) waitUntilNodeHasTs(node int, tsk types.TipSetKey) {
|
||||||
|
for {
|
||||||
|
_, err := tu.nds[node].ChainGetTipSet(context.TODO(), tsk)
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Time to allow for syncing and validation
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
func (tu *syncTestUtil) waitUntilSync(from, to int) {
|
func (tu *syncTestUtil) waitUntilSync(from, to int) {
|
||||||
target, err := tu.nds[from].ChainHead(tu.ctx)
|
target, err := tu.nds[from].ChainHead(tu.ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -442,8 +474,8 @@ func (wpp badWpp) GenerateCandidates(context.Context, abi.PoStRandomness, uint64
|
|||||||
return []uint64{1}, nil
|
return []uint64{1}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wpp badWpp) ComputeProof(context.Context, []abi.SectorInfo, abi.PoStRandomness) ([]abi.PoStProof, error) {
|
func (wpp badWpp) ComputeProof(context.Context, []proof.SectorInfo, abi.PoStRandomness) ([]proof.PoStProof, error) {
|
||||||
return []abi.PoStProof{
|
return []proof.PoStProof{
|
||||||
{
|
{
|
||||||
PoStProof: abi.RegisteredPoStProof_StackedDrgWinning2KiBV1,
|
PoStProof: abi.RegisteredPoStProof_StackedDrgWinning2KiBV1,
|
||||||
ProofBytes: []byte("evil"),
|
ProofBytes: []byte("evil"),
|
||||||
@ -630,6 +662,49 @@ func TestDuplicateNonce(t *testing.T) {
|
|||||||
require.Equal(t, includedMsg, mft[0].VMMessage().Cid(), "messages for tipset didn't contain expected message")
|
require.Equal(t, includedMsg, mft[0].VMMessage().Cid(), "messages for tipset didn't contain expected message")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This test asserts that a block that includes a message with bad nonce can't be synced. A nonce is "bad" if it can't
|
||||||
|
// be applied on the parent state.
|
||||||
|
func TestBadNonce(t *testing.T) {
|
||||||
|
H := 10
|
||||||
|
tu := prepSyncTest(t, H)
|
||||||
|
|
||||||
|
base := tu.g.CurTipset
|
||||||
|
|
||||||
|
// Produce a message from the banker with a bad nonce
|
||||||
|
makeBadMsg := func() *types.SignedMessage {
|
||||||
|
|
||||||
|
ba, err := tu.nds[0].StateGetActor(context.TODO(), tu.g.Banker(), base.TipSet().Key())
|
||||||
|
require.NoError(t, err)
|
||||||
|
msg := types.Message{
|
||||||
|
To: tu.g.Banker(),
|
||||||
|
From: tu.g.Banker(),
|
||||||
|
|
||||||
|
Nonce: ba.Nonce + 5,
|
||||||
|
|
||||||
|
Value: types.NewInt(1),
|
||||||
|
|
||||||
|
Method: 0,
|
||||||
|
|
||||||
|
GasLimit: 100_000_000,
|
||||||
|
GasFeeCap: types.NewInt(0),
|
||||||
|
GasPremium: types.NewInt(0),
|
||||||
|
}
|
||||||
|
|
||||||
|
sig, err := tu.g.Wallet().Sign(context.TODO(), tu.g.Banker(), msg.Cid().Bytes())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
return &types.SignedMessage{
|
||||||
|
Message: msg,
|
||||||
|
Signature: *sig,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
msgs := make([][]*types.SignedMessage, 1)
|
||||||
|
msgs[0] = []*types.SignedMessage{makeBadMsg()}
|
||||||
|
|
||||||
|
tu.mineOnBlock(base, 0, []int{0}, true, true, msgs)
|
||||||
|
}
|
||||||
|
|
||||||
func BenchmarkSyncBasic(b *testing.B) {
|
func BenchmarkSyncBasic(b *testing.B) {
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
runSyncBenchLength(b, 100)
|
runSyncBenchLength(b, 100)
|
||||||
@ -676,3 +751,87 @@ func TestSyncInputs(t *testing.T) {
|
|||||||
t.Fatal("should error on block with nil election proof")
|
t.Fatal("should error on block with nil election proof")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSyncCheckpointHead(t *testing.T) {
|
||||||
|
H := 10
|
||||||
|
tu := prepSyncTest(t, H)
|
||||||
|
|
||||||
|
p1 := tu.addClientNode()
|
||||||
|
p2 := tu.addClientNode()
|
||||||
|
|
||||||
|
fmt.Println("GENESIS: ", tu.g.Genesis().Cid())
|
||||||
|
tu.loadChainToNode(p1)
|
||||||
|
tu.loadChainToNode(p2)
|
||||||
|
|
||||||
|
base := tu.g.CurTipset
|
||||||
|
fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height())
|
||||||
|
|
||||||
|
// The two nodes fork at this point into 'a' and 'b'
|
||||||
|
a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil)
|
||||||
|
a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil)
|
||||||
|
a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil)
|
||||||
|
|
||||||
|
tu.waitUntilSyncTarget(p1, a.TipSet())
|
||||||
|
tu.checkpointTs(p1, a.TipSet().Key())
|
||||||
|
|
||||||
|
require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet()))
|
||||||
|
// chain B will now be heaviest
|
||||||
|
b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil)
|
||||||
|
b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil)
|
||||||
|
b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil)
|
||||||
|
b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil)
|
||||||
|
|
||||||
|
fmt.Println("A: ", a.Cids(), a.TipSet().Height())
|
||||||
|
fmt.Println("B: ", b.Cids(), b.TipSet().Height())
|
||||||
|
|
||||||
|
// Now for the fun part!! p1 should mark p2's head as BAD.
|
||||||
|
|
||||||
|
require.NoError(t, tu.mn.LinkAll())
|
||||||
|
tu.connect(p1, p2)
|
||||||
|
tu.waitUntilNodeHasTs(p1, b.TipSet().Key())
|
||||||
|
p1Head := tu.getHead(p1)
|
||||||
|
require.Equal(tu.t, p1Head, a.TipSet())
|
||||||
|
tu.assertBad(p1, b.TipSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSyncCheckpointEarlierThanHead(t *testing.T) {
|
||||||
|
H := 10
|
||||||
|
tu := prepSyncTest(t, H)
|
||||||
|
|
||||||
|
p1 := tu.addClientNode()
|
||||||
|
p2 := tu.addClientNode()
|
||||||
|
|
||||||
|
fmt.Println("GENESIS: ", tu.g.Genesis().Cid())
|
||||||
|
tu.loadChainToNode(p1)
|
||||||
|
tu.loadChainToNode(p2)
|
||||||
|
|
||||||
|
base := tu.g.CurTipset
|
||||||
|
fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height())
|
||||||
|
|
||||||
|
// The two nodes fork at this point into 'a' and 'b'
|
||||||
|
a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil)
|
||||||
|
a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil)
|
||||||
|
a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil)
|
||||||
|
|
||||||
|
tu.waitUntilSyncTarget(p1, a.TipSet())
|
||||||
|
tu.checkpointTs(p1, a1.TipSet().Key())
|
||||||
|
|
||||||
|
require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet()))
|
||||||
|
// chain B will now be heaviest
|
||||||
|
b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil)
|
||||||
|
b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil)
|
||||||
|
b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil)
|
||||||
|
b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil)
|
||||||
|
|
||||||
|
fmt.Println("A: ", a.Cids(), a.TipSet().Height())
|
||||||
|
fmt.Println("B: ", b.Cids(), b.TipSet().Height())
|
||||||
|
|
||||||
|
// Now for the fun part!! p1 should mark p2's head as BAD.
|
||||||
|
|
||||||
|
require.NoError(t, tu.mn.LinkAll())
|
||||||
|
tu.connect(p1, p2)
|
||||||
|
tu.waitUntilNodeHasTs(p1, b.TipSet().Key())
|
||||||
|
p1Head := tu.getHead(p1)
|
||||||
|
require.Equal(tu.t, p1Head, a.TipSet())
|
||||||
|
tu.assertBad(p1, b.TipSet())
|
||||||
|
}
|
||||||
|
@ -1,34 +1,16 @@
|
|||||||
package chain
|
package chain
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
func SyncStageString(v api.SyncStateStage) string {
|
|
||||||
switch v {
|
|
||||||
case api.StageHeaders:
|
|
||||||
return "header sync"
|
|
||||||
case api.StagePersistHeaders:
|
|
||||||
return "persisting headers"
|
|
||||||
case api.StageMessages:
|
|
||||||
return "message sync"
|
|
||||||
case api.StageSyncComplete:
|
|
||||||
return "complete"
|
|
||||||
case api.StageSyncErrored:
|
|
||||||
return "error"
|
|
||||||
default:
|
|
||||||
return fmt.Sprintf("<unknown: %d>", v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type SyncerState struct {
|
type SyncerState struct {
|
||||||
lk sync.Mutex
|
lk sync.Mutex
|
||||||
Target *types.TipSet
|
Target *types.TipSet
|
||||||
|
@ -4,7 +4,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
big2 "github.com/filecoin-project/specs-actors/actors/abi/big"
|
big2 "github.com/filecoin-project/go-state-types/big"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
)
|
)
|
||||||
|
@ -4,10 +4,12 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/runtime/proof"
|
||||||
|
|
||||||
"github.com/minio/blake2b-simd"
|
"github.com/minio/blake2b-simd"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/crypto"
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
|
|
||||||
block "github.com/ipfs/go-block-format"
|
block "github.com/ipfs/go-block-format"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
@ -53,7 +55,7 @@ type BlockHeader struct {
|
|||||||
|
|
||||||
BeaconEntries []BeaconEntry // 3
|
BeaconEntries []BeaconEntry // 3
|
||||||
|
|
||||||
WinPoStProof []abi.PoStProof // 4
|
WinPoStProof []proof.PoStProof // 4
|
||||||
|
|
||||||
Parents []cid.Cid // 5
|
Parents []cid.Cid // 5
|
||||||
|
|
||||||
|
@ -7,12 +7,14 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/runtime/proof"
|
||||||
|
|
||||||
cid "github.com/ipfs/go-cid"
|
cid "github.com/ipfs/go-cid"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/crypto"
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
)
|
)
|
||||||
|
|
||||||
func testBlockHeader(t testing.TB) *BlockHeader {
|
func testBlockHeader(t testing.TB) *BlockHeader {
|
||||||
@ -80,7 +82,7 @@ func TestInteropBH(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
posts := []abi.PoStProof{
|
posts := []proof.PoStProof{
|
||||||
{PoStProof: abi.RegisteredPoStProof_StackedDrgWinning2KiBV1, ProofBytes: []byte{0x07}},
|
{PoStProof: abi.RegisteredPoStProof_StackedDrgWinning2KiBV1, ProofBytes: []byte{0x07}},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6,9 +6,10 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
abi "github.com/filecoin-project/specs-actors/actors/abi"
|
abi "github.com/filecoin-project/go-state-types/abi"
|
||||||
crypto "github.com/filecoin-project/specs-actors/actors/crypto"
|
crypto "github.com/filecoin-project/go-state-types/crypto"
|
||||||
exitcode "github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
|
exitcode "github.com/filecoin-project/go-state-types/exitcode"
|
||||||
|
proof "github.com/filecoin-project/specs-actors/actors/runtime/proof"
|
||||||
cid "github.com/ipfs/go-cid"
|
cid "github.com/ipfs/go-cid"
|
||||||
cbg "github.com/whyrusleeping/cbor-gen"
|
cbg "github.com/whyrusleeping/cbor-gen"
|
||||||
xerrors "golang.org/x/xerrors"
|
xerrors "golang.org/x/xerrors"
|
||||||
@ -58,7 +59,7 @@ func (t *BlockHeader) MarshalCBOR(w io.Writer) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// t.WinPoStProof ([]abi.PoStProof) (slice)
|
// t.WinPoStProof ([]proof.PoStProof) (slice)
|
||||||
if len(t.WinPoStProof) > cbg.MaxLength {
|
if len(t.WinPoStProof) > cbg.MaxLength {
|
||||||
return xerrors.Errorf("Slice value in field t.WinPoStProof was too long")
|
return xerrors.Errorf("Slice value in field t.WinPoStProof was too long")
|
||||||
}
|
}
|
||||||
@ -243,7 +244,7 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) error {
|
|||||||
t.BeaconEntries[i] = v
|
t.BeaconEntries[i] = v
|
||||||
}
|
}
|
||||||
|
|
||||||
// t.WinPoStProof ([]abi.PoStProof) (slice)
|
// t.WinPoStProof ([]proof.PoStProof) (slice)
|
||||||
|
|
||||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -259,12 +260,12 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if extra > 0 {
|
if extra > 0 {
|
||||||
t.WinPoStProof = make([]abi.PoStProof, extra)
|
t.WinPoStProof = make([]proof.PoStProof, extra)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < int(extra); i++ {
|
for i := 0; i < int(extra); i++ {
|
||||||
|
|
||||||
var v abi.PoStProof
|
var v proof.PoStProof
|
||||||
if err := v.UnmarshalCBOR(br); err != nil {
|
if err := v.UnmarshalCBOR(br); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user