solve merge conflicts
This commit is contained in:
commit
7d4bc90d42
@ -5,7 +5,7 @@ orbs:
|
||||
executors:
|
||||
golang:
|
||||
docker:
|
||||
- image: circleci/golang:1.14.6
|
||||
- image: circleci/golang:1.15.5
|
||||
resource_class: 2xlarge
|
||||
ubuntu:
|
||||
docker:
|
||||
@ -276,7 +276,7 @@ jobs:
|
||||
- run: cd extern/filecoin-ffi && make
|
||||
- run:
|
||||
name: "replace lotus, filecoin-ffi, blst and fil-blst deps"
|
||||
command: cd extern/oni/lotus-soup && go mod edit -replace github.com/filecoin-project/lotus=../../../ && go mod edit -replace github.com/filecoin-project/filecoin-ffi=../../filecoin-ffi && go mod edit -replace github.com/supranational/blst=../../fil-blst/blst && go mod edit -replace github.com/filecoin-project/fil-blst=../../fil-blst
|
||||
command: cd extern/oni/lotus-soup && go mod edit -replace github.com/filecoin-project/lotus=../../../ && go mod edit -replace github.com/filecoin-project/filecoin-ffi=../../filecoin-ffi && go mod edit -replace github.com/supranational/blst=../../blst
|
||||
- run:
|
||||
name: "build lotus-soup testplan"
|
||||
command: pushd extern/oni/lotus-soup && go build -tags=testground .
|
||||
@ -294,8 +294,8 @@ jobs:
|
||||
- run:
|
||||
name: Install go
|
||||
command: |
|
||||
curl -O https://dl.google.com/go/go1.14.2.darwin-amd64.pkg && \
|
||||
sudo installer -pkg go1.14.2.darwin-amd64.pkg -target /
|
||||
curl -O https://dl.google.com/go/go1.15.5.darwin-amd64.pkg && \
|
||||
sudo installer -pkg go1.15.5.darwin-amd64.pkg -target /
|
||||
- run:
|
||||
name: Install pkg-config
|
||||
command: HOMEBREW_NO_AUTO_UPDATE=1 brew install pkg-config
|
||||
@ -359,6 +359,7 @@ jobs:
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run: make deps
|
||||
- run: make docsgen
|
||||
- run: git --no-pager diff
|
||||
- run: git --no-pager diff --quiet
|
||||
|
11
.gitmodules
vendored
11
.gitmodules
vendored
@ -1,16 +1,15 @@
|
||||
[submodule "extern/filecoin-ffi"]
|
||||
path = extern/filecoin-ffi
|
||||
url = https://github.com/filecoin-project/filecoin-ffi.git
|
||||
branch = master
|
||||
[submodule "extern/serialization-vectors"]
|
||||
path = extern/serialization-vectors
|
||||
url = https://github.com/filecoin-project/serialization-vectors
|
||||
url = https://github.com/filecoin-project/serialization-vectors.git
|
||||
[submodule "extern/test-vectors"]
|
||||
path = extern/test-vectors
|
||||
url = https://github.com/filecoin-project/test-vectors.git
|
||||
[submodule "extern/fil-blst"]
|
||||
path = extern/fil-blst
|
||||
url = https://github.com/filecoin-project/fil-blst.git
|
||||
[submodule "extern/oni"]
|
||||
path = extern/oni
|
||||
url = https://github.com/filecoin-project/oni
|
||||
url = https://github.com/filecoin-project/oni.git
|
||||
[submodule "extern/blst"]
|
||||
path = extern/blst
|
||||
url = https://github.com/supranational/blst.git
|
||||
|
166
CHANGELOG.md
166
CHANGELOG.md
@ -1,5 +1,171 @@
|
||||
# Lotus changelog
|
||||
|
||||
# 1.2.0 / 2020-11-18
|
||||
|
||||
This is a mandatory release of Lotus that introduces the second post-liftoff upgrade to the Filecoin network. The network upgrade occurs at height 265200, before which time all nodes must have update to this release (or later). This release also bumps the required version of Go to 1.15.
|
||||
|
||||
The changes that break consensus are:
|
||||
|
||||
- Upgrading to sepcs-actors 2.3.2 (https://github.com/filecoin-project/specs-actors/releases/tag/v2.3.2)
|
||||
- Introducing proofs v5.4.0 (https://github.com/filecoin-project/rust-fil-proofs/releases/tag/storage-proofs-v5.4.0), and switching between the proof types (https://github.com/filecoin-project/lotus/pull/4873)
|
||||
- Don't use terminated sectors for winning PoSt (https://github.com/filecoin-project/lotus/pull/4770)
|
||||
- Various small VM-level edge-case handling (https://github.com/filecoin-project/lotus/pull/4783)
|
||||
- Correction of the VM circulating supply calculation (https://github.com/filecoin-project/lotus/pull/4862)
|
||||
- Retuning gas costs (https://github.com/filecoin-project/lotus/pull/4830)
|
||||
- Avoid sending messages to the zero BLS address (https://github.com/filecoin-project/lotus/pull/4888)
|
||||
|
||||
## Other Changes
|
||||
|
||||
- delayed pubsub subscribe for messages topic (https://github.com/filecoin-project/lotus/pull/3646)
|
||||
- add chain base64 decode params (https://github.com/filecoin-project/lotus/pull/4748)
|
||||
- chore(dep): update bitswap to fix an initialization race that could panic (https://github.com/filecoin-project/lotus/pull/4855)
|
||||
- Chore/blockstore nits (https://github.com/filecoin-project/lotus/pull/4813)
|
||||
- Print Consensus Faults in miner info (https://github.com/filecoin-project/lotus/pull/4853)
|
||||
- Truncate genesis file before generating (https://github.com/filecoin-project/lotus/pull/4851)
|
||||
- miner: Winning PoSt Warmup (https://github.com/filecoin-project/lotus/pull/4824)
|
||||
- Fix init actor address map diffing (https://github.com/filecoin-project/lotus/pull/4875)
|
||||
- Bump API versions to 1.0.0 (https://github.com/filecoin-project/lotus/pull/4884)
|
||||
- Fix cid recording issue (https://github.com/filecoin-project/lotus/pull/4874)
|
||||
- Speed up worker key retrieval (https://github.com/filecoin-project/lotus/pull/4885)
|
||||
- Add error codes to worker return (https://github.com/filecoin-project/lotus/pull/4890)
|
||||
- Update go to 1.15.5 (https://github.com/filecoin-project/lotus/pull/4896)
|
||||
- Fix MaxSealingSectrosForDeals getting reset to 0 (https://github.com/filecoin-project/lotus/pull/4879)
|
||||
- add sanity check for maximum block size (https://github.com/filecoin-project/lotus/pull/3171)
|
||||
- Check (pre)commit receipt before other checks in failed states (https://github.com/filecoin-project/lotus/pull/4712)
|
||||
- fix badger double open on daemon --import-snapshot; chainstore lifecycle (https://github.com/filecoin-project/lotus/pull/4872)
|
||||
- Update to ipfs-blockstore 1.0.3 (https://github.com/filecoin-project/lotus/pull/4897)
|
||||
- break loop when found warm up sector (https://github.com/filecoin-project/lotus/pull/4869)
|
||||
- Tweak handling of bad beneficaries in DeleteActor (https://github.com/filecoin-project/lotus/pull/4903)
|
||||
- cap maximum number of messages per block in selection (https://github.com/filecoin-project/lotus/pull/4905)
|
||||
- Set Calico epoch (https://github.com/filecoin-project/lotus/pull/4889)
|
||||
|
||||
# 1.1.3 / 2020-11-13
|
||||
|
||||
This is an optional release of Lotus that upgrades Lotus dependencies, and includes many performance enhancements, bugfixes, and UX improvements.
|
||||
|
||||
## Highlights
|
||||
|
||||
- Refactored much of the miner code (https://github.com/filecoin-project/lotus/pull/3618), improving its recovery from restarts and overall sector success rate
|
||||
- Updated [proofs](https://github.com/filecoin-project/rust-fil-proofs) to v5.3.0, which brings significant performance improvements
|
||||
- Updated [markets](https://github.com/filecoin-project/go-fil-markets/releases/tag/v1.0.4) to v1.0.4, which reduces failures due to reorgs (https://github.com/filecoin-project/lotus/pull/4730) and uses the newly refactored fund manager (https://github.com/filecoin-project/lotus/pull/4736)
|
||||
|
||||
## Changes
|
||||
|
||||
#### Core Lotus
|
||||
|
||||
- polish: add Equals method to MinerInfo shim (https://github.com/filecoin-project/lotus/pull/4604)
|
||||
- Fix messagepool accounting (https://github.com/filecoin-project/lotus/pull/4668)
|
||||
- Prep for gas balancing (https://github.com/filecoin-project/lotus/pull/4651)
|
||||
- Reduce badger ValueThreshold to 128 (https://github.com/filecoin-project/lotus/pull/4629)
|
||||
- Config for default max gas fee (https://github.com/filecoin-project/lotus/pull/4652)
|
||||
- bootstrap: don't return early when one drand resolution fails (https://github.com/filecoin-project/lotus/pull/4626)
|
||||
- polish: add ClaimsChanged and DiffClaims method to power shim (https://github.com/filecoin-project/lotus/pull/4628)
|
||||
- Simplify chain event Called API (https://github.com/filecoin-project/lotus/pull/4664)
|
||||
- Cache deal states for most recent old/new tipset (https://github.com/filecoin-project/lotus/pull/4623)
|
||||
- Add miner available balance and power info to state miner info (https://github.com/filecoin-project/lotus/pull/4618)
|
||||
- Call GetHeaviestTipSet() only once when syncing (https://github.com/filecoin-project/lotus/pull/4696)
|
||||
- modify runtime gasUsed printf (https://github.com/filecoin-project/lotus/pull/4704)
|
||||
- Rename builtin actor generators (https://github.com/filecoin-project/lotus/pull/4697)
|
||||
- Move gas multiplier as property of pricelist (https://github.com/filecoin-project/lotus/pull/4728)
|
||||
- polish: add msig pendingtxn diffing and comp (https://github.com/filecoin-project/lotus/pull/4719)
|
||||
- Optional chain Bitswap (https://github.com/filecoin-project/lotus/pull/4717)
|
||||
- rewrite sync manager (https://github.com/filecoin-project/lotus/pull/4599)
|
||||
- async connect to bootstrappers (https://github.com/filecoin-project/lotus/pull/4785)
|
||||
- head change coalescer (https://github.com/filecoin-project/lotus/pull/4688)
|
||||
- move to native badger blockstore; leverage zero-copy View() to deserialize in-place (https://github.com/filecoin-project/lotus/pull/4681)
|
||||
- badger blockstore: minor improvements (https://github.com/filecoin-project/lotus/pull/4811)
|
||||
- Do not fail wallet delete because of pre-existing trashed key (https://github.com/filecoin-project/lotus/pull/4589)
|
||||
- Correctly delete the default wallet address (https://github.com/filecoin-project/lotus/pull/4705)
|
||||
- Reduce badger ValueTreshold to 128 (https://github.com/filecoin-project/lotus/pull/4629)
|
||||
- predicates: Fast StateGetActor wrapper (https://github.com/filecoin-project/lotus/pull/4835)
|
||||
|
||||
#### Mining
|
||||
|
||||
- worker key should change when set sender found key not equal with the value on chain (https://github.com/filecoin-project/lotus/pull/4595)
|
||||
- extern/sector-storage: fix GPU usage overwrite bug (https://github.com/filecoin-project/lotus/pull/4627)
|
||||
- sectorstorage: Fix manager restart edge-case (https://github.com/filecoin-project/lotus/pull/4645)
|
||||
- storagefsm: Fix GetTicket loop when the sector is already precommitted (https://github.com/filecoin-project/lotus/pull/4643)
|
||||
- Debug flag to force running sealing scheduler (https://github.com/filecoin-project/lotus/pull/4662)
|
||||
- Fix worker reenabling, handle multiple restarts in worker (https://github.com/filecoin-project/lotus/pull/4666)
|
||||
- keep retrying the proof until we run out of sectors to skip (https://github.com/filecoin-project/lotus/pull/4633)
|
||||
- worker: Commands to pause/resume task processing (https://github.com/filecoin-project/lotus/pull/4615)
|
||||
- struct name incorrect (https://github.com/filecoin-project/lotus/pull/4699)
|
||||
- optimize code replace strings with constants (https://github.com/filecoin-project/lotus/pull/4769)
|
||||
- optimize pledge sector (https://github.com/filecoin-project/lotus/pull/4765)
|
||||
- Track sealing processes across lotus-miner restarts (https://github.com/filecoin-project/lotus/pull/3618)
|
||||
- Fix scheduler lockups after storage is freed (https://github.com/filecoin-project/lotus/pull/4778)
|
||||
- storage: Track worker hostnames with work (https://github.com/filecoin-project/lotus/pull/4779)
|
||||
- Expand sched-diag; Command to abort sealing calls (https://github.com/filecoin-project/lotus/pull/4804)
|
||||
- miner: Winning PoSt Warmup (https://github.com/filecoin-project/lotus/pull/4824)
|
||||
- docsgen: Support miner/worker (https://github.com/filecoin-project/lotus/pull/4817)
|
||||
- miner: Basic storage cleanup command (https://github.com/filecoin-project/lotus/pull/4834)
|
||||
|
||||
#### Markets and Data Transfer
|
||||
|
||||
- Flesh out data transfer features (https://github.com/filecoin-project/lotus/pull/4572)
|
||||
- Fix memory leaks in data transfer (https://github.com/filecoin-project/lotus/pull/4619)
|
||||
- Handle deal id changes in OnDealSectorCommitted (https://github.com/filecoin-project/lotus/pull/4730)
|
||||
- Refactor FundManager (https://github.com/filecoin-project/lotus/pull/4736)
|
||||
- refactor: integrate new FundManager (https://github.com/filecoin-project/lotus/pull/4787)
|
||||
- Fix race in paych manager when req context is cancelled (https://github.com/filecoin-project/lotus/pull/4803)
|
||||
- fix race in paych manager add funds (https://github.com/filecoin-project/lotus/pull/4597)
|
||||
- Fix panic in FundManager (https://github.com/filecoin-project/lotus/pull/4808)
|
||||
- Fix: dont crash on startup if funds migration fails (https://github.com/filecoin-project/lotus/pull/4827)
|
||||
|
||||
#### UX
|
||||
|
||||
- Make EarlyExpiration in sectors list less scary (https://github.com/filecoin-project/lotus/pull/4600)
|
||||
- Add commands to change the worker key (https://github.com/filecoin-project/lotus/pull/4513)
|
||||
- Expose ClientDealSize via CLI (https://github.com/filecoin-project/lotus/pull/4569)
|
||||
- client deal: Cache CommD when creating multiple deals (https://github.com/filecoin-project/lotus/pull/4535)
|
||||
- miner sectors list: flags for events/seal time (https://github.com/filecoin-project/lotus/pull/4649)
|
||||
- make IPFS online mode configurable (https://github.com/filecoin-project/lotus/pull/4650)
|
||||
- Add sync status to miner info command (https://github.com/filecoin-project/lotus/pull/4669)
|
||||
- Add a StateDecodeParams method (https://github.com/filecoin-project/lotus/pull/4105)
|
||||
- sched: Interactive RPC Shell (https://github.com/filecoin-project/lotus/pull/4692)
|
||||
- Add api for getting status given a code (https://github.com/filecoin-project/lotus/pull/4210)
|
||||
- Update lotus-stats with a richer cli (https://github.com/filecoin-project/lotus/pull/4718)
|
||||
- Use TSK passed to GasEstimateGasLimit (https://github.com/filecoin-project/lotus/pull/4739)
|
||||
- match data type for reward state api (https://github.com/filecoin-project/lotus/pull/4745)
|
||||
- Add `termination-estimate` to get an estimation for how much a termination penalty will be (https://github.com/filecoin-project/lotus/pull/4617)
|
||||
- Restrict `ParseFIL` input length (https://github.com/filecoin-project/lotus/pull/4780)
|
||||
- cmd sectors commitIDs len debug (https://github.com/filecoin-project/lotus/pull/4786)
|
||||
- Add client deal-stats CLI (https://github.com/filecoin-project/lotus/pull/4788)
|
||||
- Modify printf format (https://github.com/filecoin-project/lotus/pull/4795)
|
||||
- Updated msig inspect (https://github.com/filecoin-project/lotus/pull/4533)
|
||||
- Delete the duplicate output (https://github.com/filecoin-project/lotus/pull/4819)
|
||||
- miner: Storage list sectors command (https://github.com/filecoin-project/lotus/pull/4831)
|
||||
- drop a few logs down to debug (https://github.com/filecoin-project/lotus/pull/4832)
|
||||
|
||||
#### Testing and Tooling
|
||||
|
||||
- refactor: share code between CLI tests (https://github.com/filecoin-project/lotus/pull/4598)
|
||||
- Fix flaky TestCLIDealFlow (https://github.com/filecoin-project/lotus/pull/4608)
|
||||
- Fix flaky testMiningReal (https://github.com/filecoin-project/lotus/pull/4609)
|
||||
- Add election run-dummy command (https://github.com/filecoin-project/lotus/pull/4498)
|
||||
- Fix .gitmodules (https://github.com/filecoin-project/lotus/pull/4713)
|
||||
- fix metrics wiring.(https://github.com/filecoin-project/lotus/pull/4691)
|
||||
- shed: Util for creating ID CIDs (https://github.com/filecoin-project/lotus/pull/4726)
|
||||
- Run kumquat upgrade on devnets (https://github.com/filecoin-project/lotus/pull/4734)
|
||||
- Make pond work again (https://github.com/filecoin-project/lotus/pull/4775)
|
||||
- lotus-stats: fix influx flags (https://github.com/filecoin-project/lotus/pull/4810)
|
||||
- 2k sync BootstrapPeerThreshold (https://github.com/filecoin-project/lotus/pull/4797)
|
||||
- test for FundManager panic to ensure it is fixed (https://github.com/filecoin-project/lotus/pull/4825)
|
||||
- Stop mining at the end of tests (https://github.com/filecoin-project/lotus/pull/4826)
|
||||
- Make some logs quieter (https://github.com/filecoin-project/lotus/pull/4709)
|
||||
|
||||
#### Dependencies
|
||||
|
||||
- update filecoin-ffi in go mod (https://github.com/filecoin-project/lotus/pull/4584)
|
||||
- Update FFI (https://github.com/filecoin-project/lotus/pull/4613)
|
||||
- feat: integrate new optional blst backend and verification optimizations from proofs (https://github.com/filecoin-project/lotus/pull/4630)
|
||||
- Use https for blst submodule (https://github.com/filecoin-project/lotus/pull/4710)
|
||||
- Update go-bitfield (https://github.com/filecoin-project/lotus/pull/4756)
|
||||
- Update Yamux (https://github.com/filecoin-project/lotus/pull/4758)
|
||||
- Update to latest go-bitfield (https://github.com/filecoin-project/lotus/pull/4793)
|
||||
- Update to latest go-address (https://github.com/filecoin-project/lotus/pull/4798)
|
||||
- update libp2p for stream interface changes (https://github.com/filecoin-project/lotus/pull/4814)
|
||||
|
||||
# 1.1.2 / 2020-10-24
|
||||
|
||||
This is a patch release of Lotus that builds on the fixes involving worker keys that was introduced in v1.1.1. Miners and node operators should update to this release as soon as possible in order to ensure their blocks are propagated and validated.
|
||||
|
14
Makefile
14
Makefile
@ -5,10 +5,10 @@ all: build
|
||||
|
||||
unexport GOFLAGS
|
||||
|
||||
GOVERSION:=$(shell go version | cut -d' ' -f 3 | cut -d. -f 2)
|
||||
ifeq ($(shell expr $(GOVERSION) \< 14), 1)
|
||||
$(warning Your Golang version is go 1.$(GOVERSION))
|
||||
$(error Update Golang to version $(shell grep '^go' go.mod))
|
||||
GOVERSION:=$(shell go version | cut -d' ' -f 3 | awk -F. '{printf "%d%03d", $$2, $$3}')
|
||||
ifeq ($(shell expr $(GOVERSION) \< 15005), 1)
|
||||
$(warning Your Golang version is go 1.$(shell expr $(GOVERSION) / 1000).$(shell expr $(GOVERSION) % 1000))
|
||||
$(error Update Golang to version to at least 1.15.5)
|
||||
endif
|
||||
|
||||
# git modules that need to be loaded
|
||||
@ -179,7 +179,7 @@ BINS+=lotus-bench
|
||||
|
||||
lotus-stats:
|
||||
rm -f lotus-stats
|
||||
go build -o lotus-stats ./cmd/lotus-stats
|
||||
go build $(GOFLAGS) -o lotus-stats ./cmd/lotus-stats
|
||||
go run github.com/GeertJohan/go.rice/rice append --exec lotus-stats -i ./build
|
||||
.PHONY: lotus-stats
|
||||
BINS+=lotus-stats
|
||||
@ -304,7 +304,9 @@ method-gen:
|
||||
gen: type-gen method-gen
|
||||
|
||||
docsgen:
|
||||
go run ./api/docgen > documentation/en/api-methods.md
|
||||
go run ./api/docgen "api/api_full.go" "FullNode" > documentation/en/api-methods.md
|
||||
go run ./api/docgen "api/api_storage.go" "StorageMiner" > documentation/en/api-methods-miner.md
|
||||
go run ./api/docgen "api/api_worker.go" "WorkerAPI" > documentation/en/api-methods-worker.md
|
||||
|
||||
print-%:
|
||||
@echo $*=$($*)
|
||||
|
@ -514,8 +514,10 @@ type FullNode interface {
|
||||
// along with the address removal.
|
||||
MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error)
|
||||
|
||||
MarketEnsureAvailable(context.Context, address.Address, address.Address, types.BigInt) (cid.Cid, error)
|
||||
// MarketFreeBalance
|
||||
// MarketReserveFunds reserves funds for a deal
|
||||
MarketReserveFunds(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error)
|
||||
// MarketReleaseFunds releases funds reserved by MarketReserveFunds
|
||||
MarketReleaseFunds(ctx context.Context, addr address.Address, amt types.BigInt) error
|
||||
|
||||
// MethodGroup: Paych
|
||||
// The Paych methods are for interacting with and managing payment channels
|
||||
@ -789,8 +791,9 @@ type IpldObject struct {
|
||||
}
|
||||
|
||||
type ActiveSync struct {
|
||||
Base *types.TipSet
|
||||
Target *types.TipSet
|
||||
WorkerID uint64
|
||||
Base *types.TipSet
|
||||
Target *types.TipSet
|
||||
|
||||
Stage SyncStateStage
|
||||
Height abi.ChainEpoch
|
||||
|
@ -71,6 +71,7 @@ type StorageMiner interface {
|
||||
|
||||
// SealingSchedDiag dumps internal sealing scheduler state
|
||||
SealingSchedDiag(ctx context.Context, doSched bool) (interface{}, error)
|
||||
SealingAbort(ctx context.Context, call storiface.CallID) error
|
||||
|
||||
stores.SectorIndex
|
||||
|
||||
|
@ -241,7 +241,8 @@ type FullNodeStruct struct {
|
||||
MsigSwapCancel func(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (cid.Cid, error) `perm:"sign"`
|
||||
MsigRemoveSigner func(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error) `perm:"sign"`
|
||||
|
||||
MarketEnsureAvailable func(context.Context, address.Address, address.Address, types.BigInt) (cid.Cid, error) `perm:"sign"`
|
||||
MarketReserveFunds func(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error) `perm:"sign"`
|
||||
MarketReleaseFunds func(ctx context.Context, addr address.Address, amt types.BigInt) error `perm:"sign"`
|
||||
|
||||
PaychGet func(ctx context.Context, from, to address.Address, amt types.BigInt) (*api.ChannelInfo, error) `perm:"sign"`
|
||||
PaychGetWaitReady func(context.Context, cid.Cid) (address.Address, error) `perm:"sign"`
|
||||
@ -310,19 +311,20 @@ type StorageMinerStruct struct {
|
||||
WorkerStats func(context.Context) (map[uuid.UUID]storiface.WorkerStats, error) `perm:"admin"`
|
||||
WorkerJobs func(context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) `perm:"admin"`
|
||||
|
||||
ReturnAddPiece func(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err string) error `perm:"admin" retry:"true"`
|
||||
ReturnSealPreCommit1 func(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err string) error `perm:"admin" retry:"true"`
|
||||
ReturnSealPreCommit2 func(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err string) error `perm:"admin" retry:"true"`
|
||||
ReturnSealCommit1 func(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err string) error `perm:"admin" retry:"true"`
|
||||
ReturnSealCommit2 func(ctx context.Context, callID storiface.CallID, proof storage.Proof, err string) error `perm:"admin" retry:"true"`
|
||||
ReturnFinalizeSector func(ctx context.Context, callID storiface.CallID, err string) error `perm:"admin" retry:"true"`
|
||||
ReturnReleaseUnsealed func(ctx context.Context, callID storiface.CallID, err string) error `perm:"admin" retry:"true"`
|
||||
ReturnMoveStorage func(ctx context.Context, callID storiface.CallID, err string) error `perm:"admin" retry:"true"`
|
||||
ReturnUnsealPiece func(ctx context.Context, callID storiface.CallID, err string) error `perm:"admin" retry:"true"`
|
||||
ReturnReadPiece func(ctx context.Context, callID storiface.CallID, ok bool, err string) error `perm:"admin" retry:"true"`
|
||||
ReturnFetch func(ctx context.Context, callID storiface.CallID, err string) error `perm:"admin" retry:"true"`
|
||||
ReturnAddPiece func(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error `perm:"admin" retry:"true"`
|
||||
ReturnSealPreCommit1 func(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err *storiface.CallError) error `perm:"admin" retry:"true"`
|
||||
ReturnSealPreCommit2 func(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err *storiface.CallError) error `perm:"admin" retry:"true"`
|
||||
ReturnSealCommit1 func(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err *storiface.CallError) error `perm:"admin" retry:"true"`
|
||||
ReturnSealCommit2 func(ctx context.Context, callID storiface.CallID, proof storage.Proof, err *storiface.CallError) error `perm:"admin" retry:"true"`
|
||||
ReturnFinalizeSector func(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error `perm:"admin" retry:"true"`
|
||||
ReturnReleaseUnsealed func(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error `perm:"admin" retry:"true"`
|
||||
ReturnMoveStorage func(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error `perm:"admin" retry:"true"`
|
||||
ReturnUnsealPiece func(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error `perm:"admin" retry:"true"`
|
||||
ReturnReadPiece func(ctx context.Context, callID storiface.CallID, ok bool, err *storiface.CallError) error `perm:"admin" retry:"true"`
|
||||
ReturnFetch func(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error `perm:"admin" retry:"true"`
|
||||
|
||||
SealingSchedDiag func(context.Context, bool) (interface{}, error) `perm:"admin"`
|
||||
SealingSchedDiag func(context.Context, bool) (interface{}, error) `perm:"admin"`
|
||||
SealingAbort func(ctx context.Context, call storiface.CallID) error `perm:"admin"`
|
||||
|
||||
StorageList func(context.Context) (map[stores.ID][]stores.Decl, error) `perm:"admin"`
|
||||
StorageLocal func(context.Context) (map[stores.ID]string, error) `perm:"admin"`
|
||||
@ -371,17 +373,17 @@ type WorkerStruct struct {
|
||||
Paths func(context.Context) ([]stores.StoragePath, error) `perm:"admin"`
|
||||
Info func(context.Context) (storiface.WorkerInfo, error) `perm:"admin"`
|
||||
|
||||
AddPiece func(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) `perm:"admin"`
|
||||
SealPreCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) `perm:"admin"`
|
||||
SealPreCommit2 func(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storiface.CallID, error) `perm:"admin"`
|
||||
SealCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) `perm:"admin"`
|
||||
SealCommit2 func(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storiface.CallID, error) `perm:"admin"`
|
||||
FinalizeSector func(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (storiface.CallID, error) `perm:"admin"`
|
||||
ReleaseUnsealed func(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) (storiface.CallID, error) `perm:"admin"`
|
||||
MoveStorage func(ctx context.Context, sector abi.SectorID, types storiface.SectorFileType) (storiface.CallID, error) `perm:"admin"`
|
||||
UnsealPiece func(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (storiface.CallID, error) `perm:"admin"`
|
||||
ReadPiece func(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (storiface.CallID, error) `perm:"admin"`
|
||||
Fetch func(context.Context, abi.SectorID, storiface.SectorFileType, storiface.PathType, storiface.AcquireMode) (storiface.CallID, error) `perm:"admin"`
|
||||
AddPiece func(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) `perm:"admin"`
|
||||
SealPreCommit1 func(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) `perm:"admin"`
|
||||
SealPreCommit2 func(ctx context.Context, sector storage.SectorRef, pc1o storage.PreCommit1Out) (storiface.CallID, error) `perm:"admin"`
|
||||
SealCommit1 func(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) `perm:"admin"`
|
||||
SealCommit2 func(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storiface.CallID, error) `perm:"admin"`
|
||||
FinalizeSector func(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) `perm:"admin"`
|
||||
ReleaseUnsealed func(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (storiface.CallID, error) `perm:"admin"`
|
||||
MoveStorage func(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) `perm:"admin"`
|
||||
UnsealPiece func(context.Context, storage.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (storiface.CallID, error) `perm:"admin"`
|
||||
ReadPiece func(context.Context, io.Writer, storage.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (storiface.CallID, error) `perm:"admin"`
|
||||
Fetch func(context.Context, storage.SectorRef, storiface.SectorFileType, storiface.PathType, storiface.AcquireMode) (storiface.CallID, error) `perm:"admin"`
|
||||
|
||||
Remove func(ctx context.Context, sector abi.SectorID) error `perm:"admin"`
|
||||
StorageAddLocal func(ctx context.Context, path string) error `perm:"admin"`
|
||||
@ -1117,8 +1119,12 @@ func (c *FullNodeStruct) MsigRemoveSigner(ctx context.Context, msig address.Addr
|
||||
return c.Internal.MsigRemoveSigner(ctx, msig, proposer, toRemove, decrease)
|
||||
}
|
||||
|
||||
func (c *FullNodeStruct) MarketEnsureAvailable(ctx context.Context, addr, wallet address.Address, amt types.BigInt) (cid.Cid, error) {
|
||||
return c.Internal.MarketEnsureAvailable(ctx, addr, wallet, amt)
|
||||
func (c *FullNodeStruct) MarketReserveFunds(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error) {
|
||||
return c.Internal.MarketReserveFunds(ctx, wallet, addr, amt)
|
||||
}
|
||||
|
||||
func (c *FullNodeStruct) MarketReleaseFunds(ctx context.Context, addr address.Address, amt types.BigInt) error {
|
||||
return c.Internal.MarketReleaseFunds(ctx, addr, amt)
|
||||
}
|
||||
|
||||
func (c *FullNodeStruct) PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*api.ChannelInfo, error) {
|
||||
@ -1265,47 +1271,47 @@ func (c *StorageMinerStruct) WorkerJobs(ctx context.Context) (map[uuid.UUID][]st
|
||||
return c.Internal.WorkerJobs(ctx)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err string) error {
|
||||
func (c *StorageMinerStruct) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error {
|
||||
return c.Internal.ReturnAddPiece(ctx, callID, pi, err)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err string) error {
|
||||
func (c *StorageMinerStruct) ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err *storiface.CallError) error {
|
||||
return c.Internal.ReturnSealPreCommit1(ctx, callID, p1o, err)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err string) error {
|
||||
func (c *StorageMinerStruct) ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err *storiface.CallError) error {
|
||||
return c.Internal.ReturnSealPreCommit2(ctx, callID, sealed, err)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err string) error {
|
||||
func (c *StorageMinerStruct) ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err *storiface.CallError) error {
|
||||
return c.Internal.ReturnSealCommit1(ctx, callID, out, err)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err string) error {
|
||||
func (c *StorageMinerStruct) ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err *storiface.CallError) error {
|
||||
return c.Internal.ReturnSealCommit2(ctx, callID, proof, err)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err string) error {
|
||||
func (c *StorageMinerStruct) ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error {
|
||||
return c.Internal.ReturnFinalizeSector(ctx, callID, err)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err string) error {
|
||||
func (c *StorageMinerStruct) ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error {
|
||||
return c.Internal.ReturnReleaseUnsealed(ctx, callID, err)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err string) error {
|
||||
func (c *StorageMinerStruct) ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error {
|
||||
return c.Internal.ReturnMoveStorage(ctx, callID, err)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err string) error {
|
||||
func (c *StorageMinerStruct) ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error {
|
||||
return c.Internal.ReturnUnsealPiece(ctx, callID, err)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err string) error {
|
||||
func (c *StorageMinerStruct) ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err *storiface.CallError) error {
|
||||
return c.Internal.ReturnReadPiece(ctx, callID, ok, err)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) ReturnFetch(ctx context.Context, callID storiface.CallID, err string) error {
|
||||
func (c *StorageMinerStruct) ReturnFetch(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error {
|
||||
return c.Internal.ReturnFetch(ctx, callID, err)
|
||||
}
|
||||
|
||||
@ -1313,6 +1319,10 @@ func (c *StorageMinerStruct) SealingSchedDiag(ctx context.Context, doSched bool)
|
||||
return c.Internal.SealingSchedDiag(ctx, doSched)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) SealingAbort(ctx context.Context, call storiface.CallID) error {
|
||||
return c.Internal.SealingAbort(ctx, call)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) StorageAttach(ctx context.Context, si stores.StorageInfo, st fsutil.FsStat) error {
|
||||
return c.Internal.StorageAttach(ctx, si, st)
|
||||
}
|
||||
@ -1503,47 +1513,47 @@ func (w *WorkerStruct) Info(ctx context.Context) (storiface.WorkerInfo, error) {
|
||||
return w.Internal.Info(ctx)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) {
|
||||
func (w *WorkerStruct) AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) {
|
||||
return w.Internal.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) {
|
||||
func (w *WorkerStruct) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) {
|
||||
return w.Internal.SealPreCommit1(ctx, sector, ticket, pieces)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storiface.CallID, error) {
|
||||
func (w *WorkerStruct) SealPreCommit2(ctx context.Context, sector storage.SectorRef, pc1o storage.PreCommit1Out) (storiface.CallID, error) {
|
||||
return w.Internal.SealPreCommit2(ctx, sector, pc1o)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) {
|
||||
func (w *WorkerStruct) SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) {
|
||||
return w.Internal.SealCommit1(ctx, sector, ticket, seed, pieces, cids)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storiface.CallID, error) {
|
||||
func (w *WorkerStruct) SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storiface.CallID, error) {
|
||||
return w.Internal.SealCommit2(ctx, sector, c1o)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (storiface.CallID, error) {
|
||||
func (w *WorkerStruct) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) {
|
||||
return w.Internal.FinalizeSector(ctx, sector, keepUnsealed)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) (storiface.CallID, error) {
|
||||
func (w *WorkerStruct) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (storiface.CallID, error) {
|
||||
return w.Internal.ReleaseUnsealed(ctx, sector, safeToFree)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) MoveStorage(ctx context.Context, sector abi.SectorID, types storiface.SectorFileType) (storiface.CallID, error) {
|
||||
func (w *WorkerStruct) MoveStorage(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) {
|
||||
return w.Internal.MoveStorage(ctx, sector, types)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) UnsealPiece(ctx context.Context, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, c cid.Cid) (storiface.CallID, error) {
|
||||
func (w *WorkerStruct) UnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, c cid.Cid) (storiface.CallID, error) {
|
||||
return w.Internal.UnsealPiece(ctx, sector, offset, size, ticket, c)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) ReadPiece(ctx context.Context, sink io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) {
|
||||
func (w *WorkerStruct) ReadPiece(ctx context.Context, sink io.Writer, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) {
|
||||
return w.Internal.ReadPiece(ctx, sink, sector, offset, size)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) Fetch(ctx context.Context, id abi.SectorID, fileType storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) {
|
||||
func (w *WorkerStruct) Fetch(ctx context.Context, id storage.SectorRef, fileType storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) {
|
||||
return w.Internal.Fetch(ctx, id, fileType, ptype, am)
|
||||
}
|
||||
|
||||
|
@ -6,12 +6,14 @@ import (
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-filestore"
|
||||
metrics "github.com/libp2p/go-libp2p-core/metrics"
|
||||
@ -24,6 +26,7 @@ import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer"
|
||||
filestore2 "github.com/filecoin-project/go-fil-markets/filestore"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||
"github.com/filecoin-project/go-multistore"
|
||||
@ -36,6 +39,10 @@ import (
|
||||
"github.com/filecoin-project/lotus/api/apistruct"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
)
|
||||
|
||||
@ -83,7 +90,7 @@ func init() {
|
||||
addExample(&pid)
|
||||
|
||||
addExample(bitfield.NewFromSet([]uint64{5}))
|
||||
addExample(abi.RegisteredSealProof_StackedDrg32GiBV1)
|
||||
addExample(abi.RegisteredSealProof_StackedDrg32GiBV1_1)
|
||||
addExample(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1)
|
||||
addExample(abi.ChainEpoch(10101))
|
||||
addExample(crypto.SigTypeBLS)
|
||||
@ -117,17 +124,17 @@ func init() {
|
||||
addExample(network.ReachabilityPublic)
|
||||
addExample(build.NewestNetworkVersion)
|
||||
addExample(&types.ExecutionTrace{
|
||||
Msg: exampleValue(reflect.TypeOf(&types.Message{}), nil).(*types.Message),
|
||||
MsgRct: exampleValue(reflect.TypeOf(&types.MessageReceipt{}), nil).(*types.MessageReceipt),
|
||||
Msg: exampleValue("init", reflect.TypeOf(&types.Message{}), nil).(*types.Message),
|
||||
MsgRct: exampleValue("init", reflect.TypeOf(&types.MessageReceipt{}), nil).(*types.MessageReceipt),
|
||||
})
|
||||
addExample(map[string]types.Actor{
|
||||
"t01236": exampleValue(reflect.TypeOf(types.Actor{}), nil).(types.Actor),
|
||||
"t01236": exampleValue("init", reflect.TypeOf(types.Actor{}), nil).(types.Actor),
|
||||
})
|
||||
addExample(map[string]api.MarketDeal{
|
||||
"t026363": exampleValue(reflect.TypeOf(api.MarketDeal{}), nil).(api.MarketDeal),
|
||||
"t026363": exampleValue("init", reflect.TypeOf(api.MarketDeal{}), nil).(api.MarketDeal),
|
||||
})
|
||||
addExample(map[string]api.MarketBalance{
|
||||
"t026363": exampleValue(reflect.TypeOf(api.MarketBalance{}), nil).(api.MarketBalance),
|
||||
"t026363": exampleValue("init", reflect.TypeOf(api.MarketBalance{}), nil).(api.MarketBalance),
|
||||
})
|
||||
addExample(map[string]*pubsub.TopicScoreSnapshot{
|
||||
"/blocks": {
|
||||
@ -162,9 +169,81 @@ func init() {
|
||||
// because reflect.TypeOf(maddr) returns the concrete type...
|
||||
ExampleValues[reflect.TypeOf(struct{ A multiaddr.Multiaddr }{}).Field(0).Type] = maddr
|
||||
|
||||
// miner specific
|
||||
addExample(filestore2.Path(".lotusminer/fstmp123"))
|
||||
si := multistore.StoreID(12)
|
||||
addExample(&si)
|
||||
addExample(retrievalmarket.DealID(5))
|
||||
addExample(abi.ActorID(1000))
|
||||
addExample(map[string][]api.SealedRef{
|
||||
"98000": {
|
||||
api.SealedRef{
|
||||
SectorID: 100,
|
||||
Offset: 10 << 20,
|
||||
Size: 1 << 20,
|
||||
},
|
||||
},
|
||||
})
|
||||
addExample(api.SectorState(sealing.Proving))
|
||||
addExample(stores.ID("76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8"))
|
||||
addExample(storiface.FTUnsealed)
|
||||
addExample(storiface.PathSealing)
|
||||
addExample(map[stores.ID][]stores.Decl{
|
||||
"76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8": {
|
||||
{
|
||||
SectorID: abi.SectorID{Miner: 1000, Number: 100},
|
||||
SectorFileType: storiface.FTSealed,
|
||||
},
|
||||
},
|
||||
})
|
||||
addExample(map[stores.ID]string{
|
||||
"76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8": "/data/path",
|
||||
})
|
||||
addExample(map[uuid.UUID][]storiface.WorkerJob{
|
||||
uuid.MustParse("ef8d99a2-6865-4189-8ffa-9fef0f806eee"): {
|
||||
{
|
||||
ID: storiface.CallID{
|
||||
Sector: abi.SectorID{Miner: 1000, Number: 100},
|
||||
ID: uuid.MustParse("76081ba0-61bd-45a5-bc08-af05f1c26e5d"),
|
||||
},
|
||||
Sector: abi.SectorID{Miner: 1000, Number: 100},
|
||||
Task: sealtasks.TTPreCommit2,
|
||||
RunWait: 0,
|
||||
Start: time.Unix(1605172927, 0).UTC(),
|
||||
Hostname: "host",
|
||||
},
|
||||
},
|
||||
})
|
||||
addExample(map[uuid.UUID]storiface.WorkerStats{
|
||||
uuid.MustParse("ef8d99a2-6865-4189-8ffa-9fef0f806eee"): {
|
||||
Info: storiface.WorkerInfo{
|
||||
Hostname: "host",
|
||||
Resources: storiface.WorkerResources{
|
||||
MemPhysical: 256 << 30,
|
||||
MemSwap: 120 << 30,
|
||||
MemReserved: 2 << 30,
|
||||
CPUs: 64,
|
||||
GPUs: []string{"aGPU 1337"},
|
||||
},
|
||||
},
|
||||
Enabled: true,
|
||||
MemUsedMin: 0,
|
||||
MemUsedMax: 0,
|
||||
GpuUsed: false,
|
||||
CpuUse: 0,
|
||||
},
|
||||
})
|
||||
addExample(storiface.ErrorCode(0))
|
||||
|
||||
// worker specific
|
||||
addExample(storiface.AcquireMove)
|
||||
addExample(storiface.UnpaddedByteIndex(abi.PaddedPieceSize(1 << 20).Unpadded()))
|
||||
addExample(map[sealtasks.TaskType]struct{}{
|
||||
sealtasks.TTPreCommit2: {},
|
||||
})
|
||||
}
|
||||
|
||||
func exampleValue(t, parent reflect.Type) interface{} {
|
||||
func exampleValue(method string, t, parent reflect.Type) interface{} {
|
||||
v, ok := ExampleValues[t]
|
||||
if ok {
|
||||
return v
|
||||
@ -173,25 +252,25 @@ func exampleValue(t, parent reflect.Type) interface{} {
|
||||
switch t.Kind() {
|
||||
case reflect.Slice:
|
||||
out := reflect.New(t).Elem()
|
||||
reflect.Append(out, reflect.ValueOf(exampleValue(t.Elem(), t)))
|
||||
reflect.Append(out, reflect.ValueOf(exampleValue(method, t.Elem(), t)))
|
||||
return out.Interface()
|
||||
case reflect.Chan:
|
||||
return exampleValue(t.Elem(), nil)
|
||||
return exampleValue(method, t.Elem(), nil)
|
||||
case reflect.Struct:
|
||||
es := exampleStruct(t, parent)
|
||||
es := exampleStruct(method, t, parent)
|
||||
v := reflect.ValueOf(es).Elem().Interface()
|
||||
ExampleValues[t] = v
|
||||
return v
|
||||
case reflect.Array:
|
||||
out := reflect.New(t).Elem()
|
||||
for i := 0; i < t.Len(); i++ {
|
||||
out.Index(i).Set(reflect.ValueOf(exampleValue(t.Elem(), t)))
|
||||
out.Index(i).Set(reflect.ValueOf(exampleValue(method, t.Elem(), t)))
|
||||
}
|
||||
return out.Interface()
|
||||
|
||||
case reflect.Ptr:
|
||||
if t.Elem().Kind() == reflect.Struct {
|
||||
es := exampleStruct(t.Elem(), t)
|
||||
es := exampleStruct(method, t.Elem(), t)
|
||||
//ExampleValues[t] = es
|
||||
return es
|
||||
}
|
||||
@ -199,10 +278,10 @@ func exampleValue(t, parent reflect.Type) interface{} {
|
||||
return struct{}{}
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("No example value for type: %s", t))
|
||||
panic(fmt.Sprintf("No example value for type: %s (method '%s')", t, method))
|
||||
}
|
||||
|
||||
func exampleStruct(t, parent reflect.Type) interface{} {
|
||||
func exampleStruct(method string, t, parent reflect.Type) interface{} {
|
||||
ns := reflect.New(t)
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
@ -210,7 +289,7 @@ func exampleStruct(t, parent reflect.Type) interface{} {
|
||||
continue
|
||||
}
|
||||
if strings.Title(f.Name) == f.Name {
|
||||
ns.Elem().Field(i).Set(reflect.ValueOf(exampleValue(f.Type, t)))
|
||||
ns.Elem().Field(i).Set(reflect.ValueOf(exampleValue(method, f.Type, t)))
|
||||
}
|
||||
}
|
||||
|
||||
@ -218,6 +297,7 @@ func exampleStruct(t, parent reflect.Type) interface{} {
|
||||
}
|
||||
|
||||
type Visitor struct {
|
||||
Root string
|
||||
Methods map[string]ast.Node
|
||||
}
|
||||
|
||||
@ -227,7 +307,7 @@ func (v *Visitor) Visit(node ast.Node) ast.Visitor {
|
||||
return v
|
||||
}
|
||||
|
||||
if st.Name.Name != "FullNode" {
|
||||
if st.Name.Name != v.Root {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -243,7 +323,7 @@ func (v *Visitor) Visit(node ast.Node) ast.Visitor {
|
||||
|
||||
const noComment = "There are not yet any comments for this method."
|
||||
|
||||
func parseApiASTInfo() (map[string]string, map[string]string) { //nolint:golint
|
||||
func parseApiASTInfo(apiFile, iface string) (map[string]string, map[string]string) { //nolint:golint
|
||||
fset := token.NewFileSet()
|
||||
pkgs, err := parser.ParseDir(fset, "./api", nil, parser.AllErrors|parser.ParseComments)
|
||||
if err != nil {
|
||||
@ -252,11 +332,11 @@ func parseApiASTInfo() (map[string]string, map[string]string) { //nolint:golint
|
||||
|
||||
ap := pkgs["api"]
|
||||
|
||||
f := ap.Files["api/api_full.go"]
|
||||
f := ap.Files[apiFile]
|
||||
|
||||
cmap := ast.NewCommentMap(fset, f, f.Comments)
|
||||
|
||||
v := &Visitor{make(map[string]ast.Node)}
|
||||
v := &Visitor{iface, make(map[string]ast.Node)}
|
||||
ast.Walk(v, pkgs["api"])
|
||||
|
||||
groupDocs := make(map[string]string)
|
||||
@ -312,13 +392,30 @@ func methodGroupFromName(mn string) string {
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
||||
comments, groupComments := parseApiASTInfo()
|
||||
comments, groupComments := parseApiASTInfo(os.Args[1], os.Args[2])
|
||||
|
||||
groups := make(map[string]*MethodGroup)
|
||||
|
||||
var api struct{ api.FullNode }
|
||||
t := reflect.TypeOf(api)
|
||||
var t reflect.Type
|
||||
var permStruct, commonPermStruct reflect.Type
|
||||
|
||||
switch os.Args[2] {
|
||||
case "FullNode":
|
||||
t = reflect.TypeOf(new(struct{ api.FullNode })).Elem()
|
||||
permStruct = reflect.TypeOf(apistruct.FullNodeStruct{}.Internal)
|
||||
commonPermStruct = reflect.TypeOf(apistruct.CommonStruct{}.Internal)
|
||||
case "StorageMiner":
|
||||
t = reflect.TypeOf(new(struct{ api.StorageMiner })).Elem()
|
||||
permStruct = reflect.TypeOf(apistruct.StorageMinerStruct{}.Internal)
|
||||
commonPermStruct = reflect.TypeOf(apistruct.CommonStruct{}.Internal)
|
||||
case "WorkerAPI":
|
||||
t = reflect.TypeOf(new(struct{ api.WorkerAPI })).Elem()
|
||||
permStruct = reflect.TypeOf(apistruct.WorkerStruct{}.Internal)
|
||||
commonPermStruct = reflect.TypeOf(apistruct.WorkerStruct{}.Internal)
|
||||
default:
|
||||
panic("unknown type")
|
||||
}
|
||||
|
||||
for i := 0; i < t.NumMethod(); i++ {
|
||||
m := t.Method(i)
|
||||
|
||||
@ -336,7 +433,7 @@ func main() {
|
||||
ft := m.Func.Type()
|
||||
for j := 2; j < ft.NumIn(); j++ {
|
||||
inp := ft.In(j)
|
||||
args = append(args, exampleValue(inp, nil))
|
||||
args = append(args, exampleValue(m.Name, inp, nil))
|
||||
}
|
||||
|
||||
v, err := json.MarshalIndent(args, "", " ")
|
||||
@ -344,7 +441,7 @@ func main() {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
outv := exampleValue(ft.Out(0), nil)
|
||||
outv := exampleValue(m.Name, ft.Out(0), nil)
|
||||
|
||||
ov, err := json.MarshalIndent(outv, "", " ")
|
||||
if err != nil {
|
||||
@ -377,9 +474,6 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
permStruct := reflect.TypeOf(apistruct.FullNodeStruct{}.Internal)
|
||||
commonPermStruct := reflect.TypeOf(apistruct.CommonStruct{}.Internal)
|
||||
|
||||
for _, g := range groupslice {
|
||||
g := g
|
||||
fmt.Printf("## %s\n", g.GroupName)
|
||||
|
@ -37,7 +37,12 @@ func (bm *BlockMiner) MineBlocks() {
|
||||
go func() {
|
||||
defer close(bm.done)
|
||||
for atomic.LoadInt64(&bm.mine) == 1 {
|
||||
time.Sleep(bm.blocktime)
|
||||
select {
|
||||
case <-bm.ctx.Done():
|
||||
return
|
||||
case <-time.After(bm.blocktime):
|
||||
}
|
||||
|
||||
nulls := atomic.SwapInt64(&bm.nulls, 0)
|
||||
if err := bm.miner.MineOne(bm.ctx, miner.MineReq{
|
||||
InjectNulls: abi.ChainEpoch(nulls),
|
||||
|
@ -31,7 +31,7 @@ func TestCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
|
||||
func testCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration, upgradeHeight abi.ChainEpoch) {
|
||||
ctx := context.Background()
|
||||
n, sn := b(t, []FullNodeOpts{FullNodeWithUpgradeAt(upgradeHeight)}, OneMiner)
|
||||
n, sn := b(t, []FullNodeOpts{FullNodeWithActorsV2At(upgradeHeight)}, OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
|
@ -109,7 +109,7 @@ var OneMiner = []StorageMiner{{Full: 0, Preseal: PresealGenesis}}
|
||||
var OneFull = DefaultFullOpts(1)
|
||||
var TwoFull = DefaultFullOpts(2)
|
||||
|
||||
var FullNodeWithUpgradeAt = func(upgradeHeight abi.ChainEpoch) FullNodeOpts {
|
||||
var FullNodeWithActorsV2At = func(upgradeHeight abi.ChainEpoch) FullNodeOpts {
|
||||
return FullNodeOpts{
|
||||
Opts: func(nodes []TestNode) node.Option {
|
||||
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
|
||||
@ -122,6 +122,25 @@ var FullNodeWithUpgradeAt = func(upgradeHeight abi.ChainEpoch) FullNodeOpts {
|
||||
}
|
||||
}
|
||||
|
||||
var FullNodeWithSDRAt = func(calico, persian abi.ChainEpoch) FullNodeOpts {
|
||||
return FullNodeOpts{
|
||||
Opts: func(nodes []TestNode) node.Option {
|
||||
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
|
||||
Network: network.Version6,
|
||||
Height: 1,
|
||||
Migration: stmgr.UpgradeActorsV2,
|
||||
}, {
|
||||
Network: network.Version7,
|
||||
Height: calico,
|
||||
Migration: stmgr.UpgradeCalico,
|
||||
}, {
|
||||
Network: network.Version8,
|
||||
Height: persian,
|
||||
}})
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var MineNext = miner.MineReq{
|
||||
InjectNulls: 0,
|
||||
Done: func(bool, abi.ChainEpoch, error) {},
|
||||
|
@ -3,18 +3,22 @@ package test
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync/atomic"
|
||||
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/mock"
|
||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
@ -23,6 +27,90 @@ import (
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
)
|
||||
|
||||
func TestSDRUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
n, sn := b(t, []FullNodeOpts{FullNodeWithSDRAt(500, 1000)}, OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
build.Clock.Sleep(time.Second)
|
||||
|
||||
pledge := make(chan struct{})
|
||||
mine := int64(1)
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
round := 0
|
||||
for atomic.LoadInt64(&mine) != 0 {
|
||||
build.Clock.Sleep(blocktime)
|
||||
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
|
||||
|
||||
}}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// 3 sealing rounds: before, during after.
|
||||
if round >= 3 {
|
||||
continue
|
||||
}
|
||||
|
||||
head, err := client.ChainHead(ctx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// rounds happen every 100 blocks, with a 50 block offset.
|
||||
if head.Height() >= abi.ChainEpoch(round*500+50) {
|
||||
round++
|
||||
pledge <- struct{}{}
|
||||
|
||||
ver, err := client.StateNetworkVersion(ctx, head.Key())
|
||||
assert.NoError(t, err)
|
||||
switch round {
|
||||
case 1:
|
||||
assert.Equal(t, network.Version6, ver)
|
||||
case 2:
|
||||
assert.Equal(t, network.Version7, ver)
|
||||
case 3:
|
||||
assert.Equal(t, network.Version8, ver)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}()
|
||||
|
||||
// before.
|
||||
pledgeSectors(t, ctx, miner, 9, 0, pledge)
|
||||
|
||||
s, err := miner.SectorsList(ctx)
|
||||
require.NoError(t, err)
|
||||
sort.Slice(s, func(i, j int) bool {
|
||||
return s[i] < s[j]
|
||||
})
|
||||
|
||||
for i, id := range s {
|
||||
info, err := miner.SectorsStatus(ctx, id, true)
|
||||
require.NoError(t, err)
|
||||
expectProof := abi.RegisteredSealProof_StackedDrg2KiBV1
|
||||
if i >= 3 {
|
||||
// after
|
||||
expectProof = abi.RegisteredSealProof_StackedDrg2KiBV1_1
|
||||
}
|
||||
assert.Equal(t, expectProof, info.SealProof, "sector %d, id %d", i, id)
|
||||
}
|
||||
|
||||
atomic.StoreInt64(&mine, 0)
|
||||
<-done
|
||||
}
|
||||
|
||||
func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
@ -63,11 +151,13 @@ func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSect
|
||||
|
||||
func pledgeSectors(t *testing.T, ctx context.Context, miner TestStorageNode, n, existing int, blockNotif <-chan struct{}) {
|
||||
for i := 0; i < n; i++ {
|
||||
err := miner.PledgeSector(ctx)
|
||||
require.NoError(t, err)
|
||||
if i%3 == 0 && blockNotif != nil {
|
||||
<-blockNotif
|
||||
log.Errorf("WAIT")
|
||||
}
|
||||
log.Errorf("PLEDGING %d", i)
|
||||
err := miner.PledgeSector(ctx)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
for {
|
||||
@ -126,7 +216,7 @@ func testWindowPostUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration,
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
n, sn := b(t, []FullNodeOpts{FullNodeWithUpgradeAt(upgradeHeight)}, OneMiner)
|
||||
n, sn := b(t, []FullNodeOpts{FullNodeWithActorsV2At(upgradeHeight)}, OneMiner)
|
||||
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
@ -209,15 +299,17 @@ func testWindowPostUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration,
|
||||
|
||||
// Drop the partition
|
||||
err = secs.ForEach(func(sid uint64) error {
|
||||
return miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkCorrupted(abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: abi.SectorNumber(sid),
|
||||
return miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkCorrupted(storage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: abi.SectorNumber(sid),
|
||||
},
|
||||
}, true)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
var s abi.SectorID
|
||||
var s storage.SectorRef
|
||||
|
||||
// Drop 1 sectors from deadline 3 partition 0
|
||||
{
|
||||
@ -238,9 +330,11 @@ func testWindowPostUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration,
|
||||
require.NoError(t, err)
|
||||
fmt.Println("the sectors", all)
|
||||
|
||||
s = abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: abi.SectorNumber(sn),
|
||||
s = storage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: abi.SectorNumber(sn),
|
||||
},
|
||||
}
|
||||
|
||||
err = miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(s, true)
|
||||
|
@ -7,7 +7,6 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
)
|
||||
|
||||
@ -22,7 +21,9 @@ const UpgradeTapeHeight = -4
|
||||
var UpgradeActorsV2Height = abi.ChainEpoch(10)
|
||||
var UpgradeLiftoffHeight = abi.ChainEpoch(-5)
|
||||
|
||||
const UpgradeKumquatHeight = -6
|
||||
const UpgradeKumquatHeight = 15
|
||||
const UpgradeCalicoHeight = 20
|
||||
const UpgradePersianHeight = 25
|
||||
|
||||
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
@ -53,3 +54,5 @@ const SlashablePowerDelay = 20
|
||||
|
||||
// Epochs
|
||||
const InteractivePoRepConfidence = 6
|
||||
|
||||
const BootstrapPeerThreshold = 1
|
||||
|
@ -11,7 +11,6 @@ import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
)
|
||||
|
||||
@ -39,12 +38,11 @@ const UpgradeLiftoffHeight = 148888
|
||||
|
||||
const UpgradeKumquatHeight = 170000
|
||||
|
||||
const UpgradeCalicoHeight = 265200
|
||||
const UpgradePersianHeight = UpgradeCalicoHeight + (builtin2.EpochsInHour * 60)
|
||||
|
||||
func init() {
|
||||
policy.SetConsensusMinerMinPower(abi.NewStoragePower(10 << 40))
|
||||
policy.SetSupportedProofTypes(
|
||||
abi.RegisteredSealProof_StackedDrg32GiBV1,
|
||||
abi.RegisteredSealProof_StackedDrg64GiBV1,
|
||||
)
|
||||
|
||||
if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" {
|
||||
SetAddressNetwork(address.Mainnet)
|
||||
@ -60,3 +58,5 @@ func init() {
|
||||
const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
|
||||
|
||||
const PropagationDelaySecs = uint64(6)
|
||||
|
||||
const BootstrapPeerThreshold = 4
|
||||
|
@ -19,3 +19,12 @@ func DhtProtocolName(netName dtypes.NetworkName) protocol.ID {
|
||||
func SetAddressNetwork(n address.Network) {
|
||||
address.CurrentNetwork = n
|
||||
}
|
||||
|
||||
func MustParseAddress(addr string) address.Address {
|
||||
ret, err := address.NewFromString(addr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
@ -25,7 +25,7 @@ const UnixfsLinksPerLevel = 1024
|
||||
// Consensus / Network
|
||||
|
||||
const AllowableClockDriftSecs = uint64(1)
|
||||
const NewestNetworkVersion = network.Version6
|
||||
const NewestNetworkVersion = network.Version8
|
||||
const ActorUpgradeNetworkVersion = network.Version4
|
||||
|
||||
// Epochs
|
||||
@ -61,6 +61,9 @@ const TicketRandomnessLookback = abi.ChainEpoch(1)
|
||||
|
||||
const AddressMainnetEnvVar = "_mainnet_"
|
||||
|
||||
// the 'f' prefix doesn't matter
|
||||
var ZeroAddress = MustParseAddress("f3yaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaby2smx7a")
|
||||
|
||||
// /////
|
||||
// Devnet settings
|
||||
|
||||
|
@ -88,13 +88,18 @@ var (
|
||||
UpgradeActorsV2Height abi.ChainEpoch = 10
|
||||
UpgradeLiftoffHeight abi.ChainEpoch = -5
|
||||
UpgradeKumquatHeight abi.ChainEpoch = -6
|
||||
UpgradeCalicoHeight abi.ChainEpoch = -7
|
||||
UpgradePersianHeight abi.ChainEpoch = -8
|
||||
|
||||
DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
}
|
||||
|
||||
NewestNetworkVersion = network.Version5
|
||||
NewestNetworkVersion = network.Version8
|
||||
ActorUpgradeNetworkVersion = network.Version4
|
||||
|
||||
Devnet = true
|
||||
Devnet = true
|
||||
ZeroAddress = MustParseAddress("f3yaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaby2smx7a")
|
||||
)
|
||||
|
||||
const BootstrapPeerThreshold = 1
|
||||
|
@ -29,7 +29,7 @@ func buildType() string {
|
||||
}
|
||||
|
||||
// BuildVersion is the local build version, set by build system
|
||||
const BuildVersion = "1.1.2"
|
||||
const BuildVersion = "1.2.0"
|
||||
|
||||
func UserVersion() string {
|
||||
return BuildVersion + buildType() + CurrentCommit
|
||||
@ -83,9 +83,9 @@ func VersionForType(nodeType NodeType) (Version, error) {
|
||||
|
||||
// semver versions of the rpc api exposed
|
||||
var (
|
||||
FullAPIVersion = newVer(0, 17, 0)
|
||||
MinerAPIVersion = newVer(0, 17, 0)
|
||||
WorkerAPIVersion = newVer(0, 16, 0)
|
||||
FullAPIVersion = newVer(1, 0, 0)
|
||||
MinerAPIVersion = newVer(1, 0, 0)
|
||||
WorkerAPIVersion = newVer(1, 0, 0)
|
||||
)
|
||||
|
||||
//nolint:varcheck,deadcode
|
||||
|
152
chain/actors/builtin/init/diff.go
Normal file
152
chain/actors/builtin/init/diff.go
Normal file
@ -0,0 +1,152 @@
|
||||
package init
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
typegen "github.com/whyrusleeping/cbor-gen"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
)
|
||||
|
||||
func DiffAddressMap(pre, cur State) (*AddressMapChanges, error) {
|
||||
prem, err := pre.addressMap()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
curm, err := cur.addressMap()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
preRoot, err := prem.Root()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
curRoot, err := curm.Root()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
results := new(AddressMapChanges)
|
||||
// no change.
|
||||
if curRoot.Equals(preRoot) {
|
||||
return results, nil
|
||||
}
|
||||
|
||||
err = adt.DiffAdtMap(prem, curm, &addressMapDiffer{results, pre, cur})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
type addressMapDiffer struct {
|
||||
Results *AddressMapChanges
|
||||
pre, adter State
|
||||
}
|
||||
|
||||
type AddressMapChanges struct {
|
||||
Added []AddressPair
|
||||
Modified []AddressChange
|
||||
Removed []AddressPair
|
||||
}
|
||||
|
||||
func (i *addressMapDiffer) AsKey(key string) (abi.Keyer, error) {
|
||||
addr, err := address.NewFromBytes([]byte(key))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return abi.AddrKey(addr), nil
|
||||
}
|
||||
|
||||
func (i *addressMapDiffer) Add(key string, val *typegen.Deferred) error {
|
||||
pkAddr, err := address.NewFromBytes([]byte(key))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
id := new(typegen.CborInt)
|
||||
if err := id.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
|
||||
return err
|
||||
}
|
||||
idAddr, err := address.NewIDAddress(uint64(*id))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
i.Results.Added = append(i.Results.Added, AddressPair{
|
||||
ID: idAddr,
|
||||
PK: pkAddr,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *addressMapDiffer) Modify(key string, from, to *typegen.Deferred) error {
|
||||
pkAddr, err := address.NewFromBytes([]byte(key))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fromID := new(typegen.CborInt)
|
||||
if err := fromID.UnmarshalCBOR(bytes.NewReader(from.Raw)); err != nil {
|
||||
return err
|
||||
}
|
||||
fromIDAddr, err := address.NewIDAddress(uint64(*fromID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
toID := new(typegen.CborInt)
|
||||
if err := toID.UnmarshalCBOR(bytes.NewReader(to.Raw)); err != nil {
|
||||
return err
|
||||
}
|
||||
toIDAddr, err := address.NewIDAddress(uint64(*toID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
i.Results.Modified = append(i.Results.Modified, AddressChange{
|
||||
From: AddressPair{
|
||||
ID: fromIDAddr,
|
||||
PK: pkAddr,
|
||||
},
|
||||
To: AddressPair{
|
||||
ID: toIDAddr,
|
||||
PK: pkAddr,
|
||||
},
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *addressMapDiffer) Remove(key string, val *typegen.Deferred) error {
|
||||
pkAddr, err := address.NewFromBytes([]byte(key))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
id := new(typegen.CborInt)
|
||||
if err := id.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
|
||||
return err
|
||||
}
|
||||
idAddr, err := address.NewIDAddress(uint64(*id))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
i.Results.Removed = append(i.Results.Removed, AddressPair{
|
||||
ID: idAddr,
|
||||
PK: pkAddr,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
type AddressChange struct {
|
||||
From AddressPair
|
||||
To AddressPair
|
||||
}
|
||||
|
||||
type AddressPair struct {
|
||||
ID address.Address
|
||||
PK address.Address
|
||||
}
|
@ -57,4 +57,6 @@ type State interface {
|
||||
|
||||
// Sets the network's name. This should only be used on upgrade/fork.
|
||||
SetNetworkName(name string) error
|
||||
|
||||
addressMap() (adt.Map, error)
|
||||
}
|
||||
|
@ -79,3 +79,7 @@ func (s *state0) Remove(addrs ...address.Address) (err error) {
|
||||
s.State.AddressMap = amr
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *state0) addressMap() (adt.Map, error) {
|
||||
return adt0.AsMap(s.store, s.AddressMap)
|
||||
}
|
||||
|
@ -79,3 +79,7 @@ func (s *state2) Remove(addrs ...address.Address) (err error) {
|
||||
s.State.AddressMap = amr
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *state2) addressMap() (adt.Map, error) {
|
||||
return adt2.AsMap(s.store, s.AddressMap)
|
||||
}
|
||||
|
@ -81,6 +81,7 @@ type DealProposals interface {
|
||||
type PublishStorageDealsParams = market0.PublishStorageDealsParams
|
||||
type PublishStorageDealsReturn = market0.PublishStorageDealsReturn
|
||||
type VerifyDealsForActivationParams = market0.VerifyDealsForActivationParams
|
||||
type WithdrawBalanceParams = market0.WithdrawBalanceParams
|
||||
|
||||
type ClientDealProposal = market0.ClientDealProposal
|
||||
|
||||
|
@ -4,6 +4,8 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
)
|
||||
|
||||
func AllPartSectors(mas State, sget func(Partition) (bitfield.BitField, error)) (bitfield.BitField, error) {
|
||||
@ -26,3 +28,42 @@ func AllPartSectors(mas State, sget func(Partition) (bitfield.BitField, error))
|
||||
|
||||
return bitfield.MultiMerge(parts...)
|
||||
}
|
||||
|
||||
// SealProofTypeFromSectorSize returns preferred seal proof type for creating
|
||||
// new miner actors and new sectors
|
||||
func SealProofTypeFromSectorSize(ssize abi.SectorSize, nv network.Version) (abi.RegisteredSealProof, error) {
|
||||
switch {
|
||||
case nv < network.Version7:
|
||||
switch ssize {
|
||||
case 2 << 10:
|
||||
return abi.RegisteredSealProof_StackedDrg2KiBV1, nil
|
||||
case 8 << 20:
|
||||
return abi.RegisteredSealProof_StackedDrg8MiBV1, nil
|
||||
case 512 << 20:
|
||||
return abi.RegisteredSealProof_StackedDrg512MiBV1, nil
|
||||
case 32 << 30:
|
||||
return abi.RegisteredSealProof_StackedDrg32GiBV1, nil
|
||||
case 64 << 30:
|
||||
return abi.RegisteredSealProof_StackedDrg64GiBV1, nil
|
||||
default:
|
||||
return 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize)
|
||||
}
|
||||
case nv >= network.Version7:
|
||||
switch ssize {
|
||||
case 2 << 10:
|
||||
return abi.RegisteredSealProof_StackedDrg2KiBV1_1, nil
|
||||
case 8 << 20:
|
||||
return abi.RegisteredSealProof_StackedDrg8MiBV1_1, nil
|
||||
case 512 << 20:
|
||||
return abi.RegisteredSealProof_StackedDrg512MiBV1_1, nil
|
||||
case 32 << 30:
|
||||
return abi.RegisteredSealProof_StackedDrg32GiBV1_1, nil
|
||||
case 64 << 30:
|
||||
return abi.RegisteredSealProof_StackedDrg64GiBV1_1, nil
|
||||
default:
|
||||
return 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize)
|
||||
}
|
||||
}
|
||||
|
||||
return 0, xerrors.Errorf("unsupported network version")
|
||||
}
|
||||
|
134
chain/actors/builtin/multisig/diff.go
Normal file
134
chain/actors/builtin/multisig/diff.go
Normal file
@ -0,0 +1,134 @@
|
||||
package multisig
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
)
|
||||
|
||||
type PendingTransactionChanges struct {
|
||||
Added []TransactionChange
|
||||
Modified []TransactionModification
|
||||
Removed []TransactionChange
|
||||
}
|
||||
|
||||
type TransactionChange struct {
|
||||
TxID int64
|
||||
Tx Transaction
|
||||
}
|
||||
|
||||
type TransactionModification struct {
|
||||
TxID int64
|
||||
From Transaction
|
||||
To Transaction
|
||||
}
|
||||
|
||||
func DiffPendingTransactions(pre, cur State) (*PendingTransactionChanges, error) {
|
||||
results := new(PendingTransactionChanges)
|
||||
if changed, err := pre.PendingTxnChanged(cur); err != nil {
|
||||
return nil, err
|
||||
} else if !changed { // if nothing has changed then return an empty result and bail.
|
||||
return results, nil
|
||||
}
|
||||
|
||||
pret, err := pre.transactions()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
curt, err := cur.transactions()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := adt.DiffAdtMap(pret, curt, &transactionDiffer{results, pre, cur}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
type transactionDiffer struct {
|
||||
Results *PendingTransactionChanges
|
||||
pre, after State
|
||||
}
|
||||
|
||||
func (t *transactionDiffer) AsKey(key string) (abi.Keyer, error) {
|
||||
txID, err := abi.ParseIntKey(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return abi.IntKey(txID), nil
|
||||
}
|
||||
|
||||
func (t *transactionDiffer) Add(key string, val *cbg.Deferred) error {
|
||||
txID, err := abi.ParseIntKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tx, err := t.after.decodeTransaction(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.Results.Added = append(t.Results.Added, TransactionChange{
|
||||
TxID: txID,
|
||||
Tx: tx,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *transactionDiffer) Modify(key string, from, to *cbg.Deferred) error {
|
||||
txID, err := abi.ParseIntKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
txFrom, err := t.pre.decodeTransaction(from)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
txTo, err := t.after.decodeTransaction(to)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if approvalsChanged(txFrom.Approved, txTo.Approved) {
|
||||
t.Results.Modified = append(t.Results.Modified, TransactionModification{
|
||||
TxID: txID,
|
||||
From: txFrom,
|
||||
To: txTo,
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func approvalsChanged(from, to []address.Address) bool {
|
||||
if len(from) != len(to) {
|
||||
return true
|
||||
}
|
||||
for idx := range from {
|
||||
if from[idx] != to[idx] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (t *transactionDiffer) Remove(key string, val *cbg.Deferred) error {
|
||||
txID, err := abi.ParseIntKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tx, err := t.pre.decodeTransaction(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.Results.Removed = append(t.Results.Removed, TransactionChange{
|
||||
TxID: txID,
|
||||
Tx: tx,
|
||||
})
|
||||
return nil
|
||||
}
|
@ -1,6 +1,7 @@
|
||||
package multisig
|
||||
|
||||
import (
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
@ -47,6 +48,10 @@ type State interface {
|
||||
Signers() ([]address.Address, error)
|
||||
|
||||
ForEachPendingTxn(func(id int64, txn Transaction) error) error
|
||||
PendingTxnChanged(State) (bool, error)
|
||||
|
||||
transactions() (adt.Map, error)
|
||||
decodeTransaction(val *cbg.Deferred) (Transaction, error)
|
||||
}
|
||||
|
||||
type Transaction = msig0.Transaction
|
||||
|
@ -1,17 +1,20 @@
|
||||
package multisig
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
|
||||
msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
|
||||
adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||
multisig0 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig"
|
||||
)
|
||||
|
||||
var _ State = (*state0)(nil)
|
||||
@ -68,3 +71,24 @@ func (s *state0) ForEachPendingTxn(cb func(id int64, txn Transaction) error) err
|
||||
return cb(txid, (Transaction)(out))
|
||||
})
|
||||
}
|
||||
|
||||
func (s *state0) PendingTxnChanged(other State) (bool, error) {
|
||||
other0, ok := other.(*state0)
|
||||
if !ok {
|
||||
// treat an upgrade as a change, always
|
||||
return true, nil
|
||||
}
|
||||
return !s.State.PendingTxns.Equals(other0.PendingTxns), nil
|
||||
}
|
||||
|
||||
func (s *state0) transactions() (adt.Map, error) {
|
||||
return adt0.AsMap(s.store, s.PendingTxns)
|
||||
}
|
||||
|
||||
func (s *state0) decodeTransaction(val *cbg.Deferred) (Transaction, error) {
|
||||
var tx multisig0.Transaction
|
||||
if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
|
||||
return Transaction{}, err
|
||||
}
|
||||
return tx, nil
|
||||
}
|
||||
|
@ -1,11 +1,13 @@
|
||||
package multisig
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
@ -68,3 +70,24 @@ func (s *state2) ForEachPendingTxn(cb func(id int64, txn Transaction) error) err
|
||||
return cb(txid, (Transaction)(out))
|
||||
})
|
||||
}
|
||||
|
||||
func (s *state2) PendingTxnChanged(other State) (bool, error) {
|
||||
other2, ok := other.(*state2)
|
||||
if !ok {
|
||||
// treat an upgrade as a change, always
|
||||
return true, nil
|
||||
}
|
||||
return !s.State.PendingTxns.Equals(other2.PendingTxns), nil
|
||||
}
|
||||
|
||||
func (s *state2) transactions() (adt.Map, error) {
|
||||
return adt2.AsMap(s.store, s.PendingTxns)
|
||||
}
|
||||
|
||||
func (s *state2) decodeTransaction(val *cbg.Deferred) (Transaction, error) {
|
||||
var tx msig2.Transaction
|
||||
if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
|
||||
return Transaction{}, err
|
||||
}
|
||||
return tx, nil
|
||||
}
|
||||
|
@ -28,7 +28,7 @@ type state2 struct {
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state2) ThisEpochReward() (abi.StoragePower, error) {
|
||||
func (s *state2) ThisEpochReward() (abi.TokenAmount, error) {
|
||||
return s.State.ThisEpochReward, nil
|
||||
}
|
||||
|
||||
@ -55,11 +55,11 @@ func (s *state2) EffectiveNetworkTime() (abi.ChainEpoch, error) {
|
||||
return s.State.EffectiveNetworkTime, nil
|
||||
}
|
||||
|
||||
func (s *state2) CumsumBaseline() (abi.StoragePower, error) {
|
||||
func (s *state2) CumsumBaseline() (reward2.Spacetime, error) {
|
||||
return s.State.CumsumBaseline, nil
|
||||
}
|
||||
|
||||
func (s *state2) CumsumRealized() (abi.StoragePower, error) {
|
||||
func (s *state2) CumsumRealized() (reward2.Spacetime, error) {
|
||||
return s.State.CumsumRealized, nil
|
||||
}
|
||||
|
||||
|
@ -3,6 +3,8 @@ package actors
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/exitcode"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/aerrors"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
)
|
||||
@ -11,7 +13,7 @@ func SerializeParams(i cbg.CBORMarshaler) ([]byte, aerrors.ActorError) {
|
||||
buf := new(bytes.Buffer)
|
||||
if err := i.MarshalCBOR(buf); err != nil {
|
||||
// TODO: shouldnt this be a fatal error?
|
||||
return nil, aerrors.Absorb(err, 1, "failed to encode parameter")
|
||||
return nil, aerrors.Absorb(err, exitcode.ErrSerialization, "failed to encode parameter")
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
@ -26,22 +26,29 @@ const (
|
||||
// SetSupportedProofTypes sets supported proof types, across all actor versions.
|
||||
// This should only be used for testing.
|
||||
func SetSupportedProofTypes(types ...abi.RegisteredSealProof) {
|
||||
newTypes := make(map[abi.RegisteredSealProof]struct{}, len(types))
|
||||
for _, t := range types {
|
||||
newTypes[t] = struct{}{}
|
||||
}
|
||||
// Set for all miner versions.
|
||||
miner0.SupportedProofTypes = newTypes
|
||||
miner2.SupportedProofTypes = newTypes
|
||||
miner0.SupportedProofTypes = make(map[abi.RegisteredSealProof]struct{}, len(types))
|
||||
miner2.PreCommitSealProofTypesV0 = make(map[abi.RegisteredSealProof]struct{}, len(types))
|
||||
miner2.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types))
|
||||
miner2.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
|
||||
|
||||
AddSupportedProofTypes(types...)
|
||||
}
|
||||
|
||||
// AddSupportedProofTypes sets supported proof types, across all actor versions.
|
||||
// This should only be used for testing.
|
||||
func AddSupportedProofTypes(types ...abi.RegisteredSealProof) {
|
||||
for _, t := range types {
|
||||
if t >= abi.RegisteredSealProof_StackedDrg2KiBV1_1 {
|
||||
panic("must specify v1 proof types only")
|
||||
}
|
||||
// Set for all miner versions.
|
||||
miner0.SupportedProofTypes[t] = struct{}{}
|
||||
miner2.SupportedProofTypes[t] = struct{}{}
|
||||
miner2.PreCommitSealProofTypesV0[t] = struct{}{}
|
||||
|
||||
miner2.PreCommitSealProofTypesV7[t] = struct{}{}
|
||||
miner2.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
|
||||
|
||||
miner2.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
@ -133,9 +140,9 @@ func GetMaxPoStPartitions(p abi.RegisteredPoStProof) (int, error) {
|
||||
}
|
||||
|
||||
func GetDefaultSectorSize() abi.SectorSize {
|
||||
// supported proof types are the same across versions.
|
||||
szs := make([]abi.SectorSize, 0, len(miner2.SupportedProofTypes))
|
||||
for spt := range miner2.SupportedProofTypes {
|
||||
// supported sector sizes are the same across versions.
|
||||
szs := make([]abi.SectorSize, 0, len(miner2.PreCommitSealProofTypesV8))
|
||||
for spt := range miner2.PreCommitSealProofTypesV8 {
|
||||
ss, err := spt.SectorSize()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@ -44,7 +44,7 @@ func TestSupportedProofTypes(t *testing.T) {
|
||||
|
||||
// Tests assumptions about policies being the same between actor versions.
|
||||
func TestAssumptions(t *testing.T) {
|
||||
require.EqualValues(t, miner0.SupportedProofTypes, miner2.SupportedProofTypes)
|
||||
require.EqualValues(t, miner0.SupportedProofTypes, miner2.PreCommitSealProofTypesV0)
|
||||
require.Equal(t, miner0.PreCommitChallengeDelay, miner2.PreCommitChallengeDelay)
|
||||
require.Equal(t, miner0.MaxSectorExpirationExtension, miner2.MaxSectorExpirationExtension)
|
||||
require.Equal(t, miner0.ChainFinality, miner2.ChainFinality)
|
||||
@ -57,10 +57,10 @@ func TestAssumptions(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPartitionSizes(t *testing.T) {
|
||||
for p := range abi.PoStSealProofTypes {
|
||||
sizeNew, err := builtin2.PoStProofWindowPoStPartitionSectors(p)
|
||||
for _, p := range abi.SealProofInfos {
|
||||
sizeNew, err := builtin2.PoStProofWindowPoStPartitionSectors(p.WindowPoStProof)
|
||||
require.NoError(t, err)
|
||||
sizeOld, err := builtin0.PoStProofWindowPoStPartitionSectors(p)
|
||||
sizeOld, err := builtin0.PoStProofWindowPoStPartitionSectors(p.WindowPoStProof)
|
||||
if err != nil {
|
||||
// new proof type.
|
||||
continue
|
||||
|
@ -18,7 +18,7 @@ func VersionForNetwork(version network.Version) Version {
|
||||
switch version {
|
||||
case network.Version0, network.Version1, network.Version2, network.Version3:
|
||||
return Version0
|
||||
case network.Version4, network.Version5, network.Version6:
|
||||
case network.Version4, network.Version5, network.Version6, network.Version7, network.Version8:
|
||||
return Version2
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported network version %d", version))
|
||||
|
34
chain/events/state/fastapi.go
Normal file
34
chain/events/state/fastapi.go
Normal file
@ -0,0 +1,34 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
type FastChainApiAPI interface {
|
||||
ChainAPI
|
||||
|
||||
ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error)
|
||||
}
|
||||
|
||||
type fastAPI struct {
|
||||
FastChainApiAPI
|
||||
}
|
||||
|
||||
func WrapFastAPI(api FastChainApiAPI) ChainAPI {
|
||||
return &fastAPI{
|
||||
api,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *fastAPI) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) {
|
||||
ts, err := a.FastChainApiAPI.ChainGetTipSet(ctx, tsk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return a.FastChainApiAPI.StateGetActor(ctx, actor, ts.Parents())
|
||||
}
|
@ -1,7 +1,6 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
@ -10,7 +9,6 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
typegen "github.com/whyrusleeping/cbor-gen"
|
||||
|
||||
"github.com/filecoin-project/lotus/api/apibstore"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
@ -419,179 +417,17 @@ type AddressPair struct {
|
||||
PK address.Address
|
||||
}
|
||||
|
||||
type InitActorAddressChanges struct {
|
||||
Added []AddressPair
|
||||
Modified []AddressChange
|
||||
Removed []AddressPair
|
||||
}
|
||||
|
||||
type AddressChange struct {
|
||||
From AddressPair
|
||||
To AddressPair
|
||||
}
|
||||
|
||||
type DiffInitActorStateFunc func(ctx context.Context, oldState init_.State, newState init_.State) (changed bool, user UserData, err error)
|
||||
|
||||
func (i *InitActorAddressChanges) AsKey(key string) (abi.Keyer, error) {
|
||||
addr, err := address.NewFromBytes([]byte(key))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return abi.AddrKey(addr), nil
|
||||
}
|
||||
|
||||
func (i *InitActorAddressChanges) Add(key string, val *typegen.Deferred) error {
|
||||
pkAddr, err := address.NewFromBytes([]byte(key))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
id := new(typegen.CborInt)
|
||||
if err := id.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
|
||||
return err
|
||||
}
|
||||
idAddr, err := address.NewIDAddress(uint64(*id))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
i.Added = append(i.Added, AddressPair{
|
||||
ID: idAddr,
|
||||
PK: pkAddr,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *InitActorAddressChanges) Modify(key string, from, to *typegen.Deferred) error {
|
||||
pkAddr, err := address.NewFromBytes([]byte(key))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fromID := new(typegen.CborInt)
|
||||
if err := fromID.UnmarshalCBOR(bytes.NewReader(from.Raw)); err != nil {
|
||||
return err
|
||||
}
|
||||
fromIDAddr, err := address.NewIDAddress(uint64(*fromID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
toID := new(typegen.CborInt)
|
||||
if err := toID.UnmarshalCBOR(bytes.NewReader(to.Raw)); err != nil {
|
||||
return err
|
||||
}
|
||||
toIDAddr, err := address.NewIDAddress(uint64(*toID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
i.Modified = append(i.Modified, AddressChange{
|
||||
From: AddressPair{
|
||||
ID: fromIDAddr,
|
||||
PK: pkAddr,
|
||||
},
|
||||
To: AddressPair{
|
||||
ID: toIDAddr,
|
||||
PK: pkAddr,
|
||||
},
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *InitActorAddressChanges) Remove(key string, val *typegen.Deferred) error {
|
||||
pkAddr, err := address.NewFromBytes([]byte(key))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
id := new(typegen.CborInt)
|
||||
if err := id.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
|
||||
return err
|
||||
}
|
||||
idAddr, err := address.NewIDAddress(uint64(*id))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
i.Removed = append(i.Removed, AddressPair{
|
||||
ID: idAddr,
|
||||
PK: pkAddr,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sp *StatePredicates) OnAddressMapChange() DiffInitActorStateFunc {
|
||||
return func(ctx context.Context, oldState, newState init_.State) (changed bool, user UserData, err error) {
|
||||
addressChanges := &InitActorAddressChanges{
|
||||
Added: []AddressPair{},
|
||||
Modified: []AddressChange{},
|
||||
Removed: []AddressPair{},
|
||||
}
|
||||
|
||||
err = oldState.ForEachActor(func(oldId abi.ActorID, oldAddress address.Address) error {
|
||||
oldIdAddress, err := address.NewIDAddress(uint64(oldId))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newIdAddress, found, err := newState.ResolveAddress(oldAddress)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !found {
|
||||
addressChanges.Removed = append(addressChanges.Removed, AddressPair{
|
||||
ID: oldIdAddress,
|
||||
PK: oldAddress,
|
||||
})
|
||||
}
|
||||
|
||||
if oldIdAddress != newIdAddress {
|
||||
addressChanges.Modified = append(addressChanges.Modified, AddressChange{
|
||||
From: AddressPair{
|
||||
ID: oldIdAddress,
|
||||
PK: oldAddress,
|
||||
},
|
||||
To: AddressPair{
|
||||
ID: newIdAddress,
|
||||
PK: oldAddress,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
addressChanges, err := init_.DiffAddressMap(oldState, newState)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
err = newState.ForEachActor(func(newId abi.ActorID, newAddress address.Address) error {
|
||||
newIdAddress, err := address.NewIDAddress(uint64(newId))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, found, err := newState.ResolveAddress(newAddress)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !found {
|
||||
addressChanges.Added = append(addressChanges.Added, AddressPair{
|
||||
ID: newIdAddress,
|
||||
PK: newAddress,
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
if len(addressChanges.Added)+len(addressChanges.Removed)+len(addressChanges.Modified) == 0 {
|
||||
if len(addressChanges.Added)+len(addressChanges.Modified)+len(addressChanges.Removed) == 0 {
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
return true, addressChanges, nil
|
||||
}
|
||||
}
|
||||
|
@ -7,7 +7,6 @@ import (
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p-core/helpers"
|
||||
"github.com/libp2p/go-libp2p-core/host"
|
||||
"github.com/libp2p/go-libp2p-core/network"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
@ -412,11 +411,7 @@ func (c *client) sendRequestToPeer(ctx context.Context, peer peer.ID, req *Reque
|
||||
return nil, xerrors.Errorf("failed to open stream to peer: %w", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// Note: this will become just stream.Close once we've completed the go-libp2p migration to
|
||||
// go-libp2p-core 0.7.0
|
||||
go helpers.FullClose(stream) //nolint:errcheck
|
||||
}()
|
||||
defer stream.Close() //nolint:errcheck
|
||||
|
||||
// Write request.
|
||||
_ = stream.SetWriteDeadline(time.Now().Add(WriteReqDeadline))
|
||||
|
@ -15,7 +15,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/libp2p/go-libp2p-core/helpers"
|
||||
inet "github.com/libp2p/go-libp2p-core/network"
|
||||
)
|
||||
|
||||
@ -40,16 +39,14 @@ func (s *server) HandleStream(stream inet.Stream) {
|
||||
ctx, span := trace.StartSpan(context.Background(), "chainxchg.HandleStream")
|
||||
defer span.End()
|
||||
|
||||
// Note: this will become just stream.Close once we've completed the go-libp2p migration to
|
||||
// go-libp2p-core 0.7.0
|
||||
defer helpers.FullClose(stream) //nolint:errcheck
|
||||
defer stream.Close() //nolint:errcheck
|
||||
|
||||
var req Request
|
||||
if err := cborutil.ReadCborRPC(bufio.NewReader(stream), &req); err != nil {
|
||||
log.Warnf("failed to read block sync request: %s", err)
|
||||
return
|
||||
}
|
||||
log.Infow("block sync request",
|
||||
log.Debugw("block sync request",
|
||||
"start", req.Head, "len", req.Length)
|
||||
|
||||
resp, err := s.processRequest(ctx, &req)
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
@ -138,12 +139,20 @@ func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) {
|
||||
return nil, xerrors.Errorf("failed to get metadata datastore: %w", err)
|
||||
}
|
||||
|
||||
bds, err := lr.Datastore("/chain")
|
||||
bs, err := lr.Blockstore(repo.BlockstoreChain)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to get blocks datastore: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bs := mybs{blockstore.NewBlockstore(bds)}
|
||||
defer func() {
|
||||
if c, ok := bs.(io.Closer); ok {
|
||||
if err := c.Close(); err != nil {
|
||||
log.Warnf("failed to close blockstore: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
bs = mybs{bs}
|
||||
|
||||
ks, err := lr.KeyStore()
|
||||
if err != nil {
|
||||
@ -236,7 +245,7 @@ func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) {
|
||||
return nil, xerrors.Errorf("make genesis block failed: %w", err)
|
||||
}
|
||||
|
||||
cs := store.NewChainStore(bs, ds, sys, j)
|
||||
cs := store.NewChainStore(bs, bs, ds, sys, j)
|
||||
|
||||
genfb := &types.FullBlock{Header: genb.Genesis}
|
||||
gents := store.NewFullTipSet([]*types.FullBlock{genfb})
|
||||
|
@ -482,7 +482,7 @@ func MakeGenesisBlock(ctx context.Context, j journal.Journal, bs bstore.Blocksto
|
||||
}
|
||||
|
||||
// temp chainstore
|
||||
cs := store.NewChainStore(bs, datastore.NewMapDatastore(), sys, j)
|
||||
cs := store.NewChainStore(bs, bs, datastore.NewMapDatastore(), sys, j)
|
||||
|
||||
// Verify PreSealed Data
|
||||
stateroot, err = VerifyPreSealedData(ctx, cs, stateroot, template, keyIDs)
|
||||
|
@ -23,8 +23,6 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||
power0 "github.com/filecoin-project/specs-actors/actors/builtin/power"
|
||||
@ -101,7 +99,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
|
||||
i := i
|
||||
m := m
|
||||
|
||||
spt, err := ffiwrapper.SealProofTypeFromSectorSize(m.SectorSize)
|
||||
spt, err := miner.SealProofTypeFromSectorSize(m.SectorSize, GenesisNetworkVersion)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
112
chain/market/cbor_gen.go
Normal file
112
chain/market/cbor_gen.go
Normal file
@ -0,0 +1,112 @@
|
||||
// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT.
|
||||
|
||||
package market
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
xerrors "golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
var _ = xerrors.Errorf
|
||||
|
||||
var lengthBufFundedAddressState = []byte{131}
|
||||
|
||||
func (t *FundedAddressState) MarshalCBOR(w io.Writer) error {
|
||||
if t == nil {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write(lengthBufFundedAddressState); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.Addr (address.Address) (struct)
|
||||
if err := t.Addr.MarshalCBOR(w); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.AmtReserved (big.Int) (struct)
|
||||
if err := t.AmtReserved.MarshalCBOR(w); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.MsgCid (cid.Cid) (struct)
|
||||
|
||||
if t.MsgCid == nil {
|
||||
if _, err := w.Write(cbg.CborNull); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cbg.WriteCidBuf(scratch, w, *t.MsgCid); err != nil {
|
||||
return xerrors.Errorf("failed to write cid field t.MsgCid: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *FundedAddressState) UnmarshalCBOR(r io.Reader) error {
|
||||
*t = FundedAddressState{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if maj != cbg.MajArray {
|
||||
return fmt.Errorf("cbor input should be of type array")
|
||||
}
|
||||
|
||||
if extra != 3 {
|
||||
return fmt.Errorf("cbor input had wrong number of fields")
|
||||
}
|
||||
|
||||
// t.Addr (address.Address) (struct)
|
||||
|
||||
{
|
||||
|
||||
if err := t.Addr.UnmarshalCBOR(br); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.Addr: %w", err)
|
||||
}
|
||||
|
||||
}
|
||||
// t.AmtReserved (big.Int) (struct)
|
||||
|
||||
{
|
||||
|
||||
if err := t.AmtReserved.UnmarshalCBOR(br); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.AmtReserved: %w", err)
|
||||
}
|
||||
|
||||
}
|
||||
// t.MsgCid (cid.Cid) (struct)
|
||||
|
||||
{
|
||||
|
||||
b, err := br.ReadByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b != cbg.CborNull[0] {
|
||||
if err := br.UnreadByte(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c, err := cbg.ReadCid(br)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to read cid field t.MsgCid: %w", err)
|
||||
}
|
||||
|
||||
t.MsgCid = &c
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
707
chain/market/fundmanager.go
Normal file
707
chain/market/fundmanager.go
Normal file
@ -0,0 +1,707 @@
|
||||
package market
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/node/impl/full"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-datastore"
|
||||
logging "github.com/ipfs/go-log"
|
||||
"go.uber.org/fx"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
var log = logging.Logger("market_adapter")
|
||||
|
||||
// API is the fx dependencies need to run a fund manager
|
||||
type FundManagerAPI struct {
|
||||
fx.In
|
||||
|
||||
full.StateAPI
|
||||
full.MpoolAPI
|
||||
}
|
||||
|
||||
// fundManagerAPI is the specific methods called by the FundManager
|
||||
// (used by the tests)
|
||||
type fundManagerAPI interface {
|
||||
MpoolPushMessage(context.Context, *types.Message, *api.MessageSendSpec) (*types.SignedMessage, error)
|
||||
StateMarketBalance(context.Context, address.Address, types.TipSetKey) (api.MarketBalance, error)
|
||||
StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64) (*api.MsgLookup, error)
|
||||
}
|
||||
|
||||
// FundManager keeps track of funds in a set of addresses
|
||||
type FundManager struct {
|
||||
ctx context.Context
|
||||
shutdown context.CancelFunc
|
||||
api fundManagerAPI
|
||||
str *Store
|
||||
|
||||
lk sync.Mutex
|
||||
fundedAddrs map[address.Address]*fundedAddress
|
||||
}
|
||||
|
||||
func NewFundManager(lc fx.Lifecycle, api FundManagerAPI, ds dtypes.MetadataDS) *FundManager {
|
||||
fm := newFundManager(&api, ds)
|
||||
lc.Append(fx.Hook{
|
||||
OnStart: func(ctx context.Context) error {
|
||||
return fm.Start()
|
||||
},
|
||||
OnStop: func(ctx context.Context) error {
|
||||
fm.Stop()
|
||||
return nil
|
||||
},
|
||||
})
|
||||
return fm
|
||||
}
|
||||
|
||||
// newFundManager is used by the tests
|
||||
func newFundManager(api fundManagerAPI, ds datastore.Batching) *FundManager {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
return &FundManager{
|
||||
ctx: ctx,
|
||||
shutdown: cancel,
|
||||
api: api,
|
||||
str: newStore(ds),
|
||||
fundedAddrs: make(map[address.Address]*fundedAddress),
|
||||
}
|
||||
}
|
||||
|
||||
func (fm *FundManager) Stop() {
|
||||
fm.shutdown()
|
||||
}
|
||||
|
||||
func (fm *FundManager) Start() error {
|
||||
fm.lk.Lock()
|
||||
defer fm.lk.Unlock()
|
||||
|
||||
// TODO:
|
||||
// To save memory:
|
||||
// - in State() only load addresses with in-progress messages
|
||||
// - load the others just-in-time from getFundedAddress
|
||||
// - delete(fm.fundedAddrs, addr) when the queue has been processed
|
||||
return fm.str.forEach(func(state *FundedAddressState) {
|
||||
fa := newFundedAddress(fm, state.Addr)
|
||||
fa.state = state
|
||||
fm.fundedAddrs[fa.state.Addr] = fa
|
||||
fa.start()
|
||||
})
|
||||
}
|
||||
|
||||
// Creates a fundedAddress if it doesn't already exist, and returns it
|
||||
func (fm *FundManager) getFundedAddress(addr address.Address) *fundedAddress {
|
||||
fm.lk.Lock()
|
||||
defer fm.lk.Unlock()
|
||||
|
||||
fa, ok := fm.fundedAddrs[addr]
|
||||
if !ok {
|
||||
fa = newFundedAddress(fm, addr)
|
||||
fm.fundedAddrs[addr] = fa
|
||||
}
|
||||
return fa
|
||||
}
|
||||
|
||||
// Reserve adds amt to `reserved`. If there are not enough available funds for
|
||||
// the address, submits a message on chain to top up available funds.
|
||||
// Returns the cid of the message that was submitted on chain, or cid.Undef if
|
||||
// the required funds were already available.
|
||||
func (fm *FundManager) Reserve(ctx context.Context, wallet, addr address.Address, amt abi.TokenAmount) (cid.Cid, error) {
|
||||
return fm.getFundedAddress(addr).reserve(ctx, wallet, amt)
|
||||
}
|
||||
|
||||
// Subtract from `reserved`.
|
||||
func (fm *FundManager) Release(addr address.Address, amt abi.TokenAmount) error {
|
||||
return fm.getFundedAddress(addr).release(amt)
|
||||
}
|
||||
|
||||
// Withdraw unreserved funds. Only succeeds if there are enough unreserved
|
||||
// funds for the address.
|
||||
// Returns the cid of the message that was submitted on chain.
|
||||
func (fm *FundManager) Withdraw(ctx context.Context, wallet, addr address.Address, amt abi.TokenAmount) (cid.Cid, error) {
|
||||
return fm.getFundedAddress(addr).withdraw(ctx, wallet, amt)
|
||||
}
|
||||
|
||||
// FundedAddressState keeps track of the state of an address with funds in the
|
||||
// datastore
|
||||
type FundedAddressState struct {
|
||||
Addr address.Address
|
||||
// AmtReserved is the amount that must be kept in the address (cannot be
|
||||
// withdrawn)
|
||||
AmtReserved abi.TokenAmount
|
||||
// MsgCid is the cid of an in-progress on-chain message
|
||||
MsgCid *cid.Cid
|
||||
}
|
||||
|
||||
// fundedAddress keeps track of the state and request queues for a
|
||||
// particular address
|
||||
type fundedAddress struct {
|
||||
ctx context.Context
|
||||
env *fundManagerEnvironment
|
||||
str *Store
|
||||
|
||||
lk sync.Mutex
|
||||
state *FundedAddressState
|
||||
|
||||
// Note: These request queues are ephemeral, they are not saved to store
|
||||
reservations []*fundRequest
|
||||
releases []*fundRequest
|
||||
withdrawals []*fundRequest
|
||||
|
||||
// Used by the tests
|
||||
onProcessStartListener func() bool
|
||||
}
|
||||
|
||||
func newFundedAddress(fm *FundManager, addr address.Address) *fundedAddress {
|
||||
return &fundedAddress{
|
||||
ctx: fm.ctx,
|
||||
env: &fundManagerEnvironment{api: fm.api},
|
||||
str: fm.str,
|
||||
state: &FundedAddressState{
|
||||
Addr: addr,
|
||||
AmtReserved: abi.NewTokenAmount(0),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// If there is an in-progress on-chain message, don't submit any more messages
|
||||
// on chain until it completes
|
||||
func (a *fundedAddress) start() {
|
||||
a.lk.Lock()
|
||||
defer a.lk.Unlock()
|
||||
|
||||
if a.state.MsgCid != nil {
|
||||
a.debugf("restart: wait for %s", a.state.MsgCid)
|
||||
a.startWaitForResults(*a.state.MsgCid)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *fundedAddress) reserve(ctx context.Context, wallet address.Address, amt abi.TokenAmount) (cid.Cid, error) {
|
||||
return a.requestAndWait(ctx, wallet, amt, &a.reservations)
|
||||
}
|
||||
|
||||
func (a *fundedAddress) release(amt abi.TokenAmount) error {
|
||||
_, err := a.requestAndWait(context.Background(), address.Undef, amt, &a.releases)
|
||||
return err
|
||||
}
|
||||
|
||||
func (a *fundedAddress) withdraw(ctx context.Context, wallet address.Address, amt abi.TokenAmount) (cid.Cid, error) {
|
||||
return a.requestAndWait(ctx, wallet, amt, &a.withdrawals)
|
||||
}
|
||||
|
||||
func (a *fundedAddress) requestAndWait(ctx context.Context, wallet address.Address, amt abi.TokenAmount, reqs *[]*fundRequest) (cid.Cid, error) {
|
||||
// Create a request and add it to the request queue
|
||||
req := newFundRequest(ctx, wallet, amt)
|
||||
|
||||
a.lk.Lock()
|
||||
*reqs = append(*reqs, req)
|
||||
a.lk.Unlock()
|
||||
|
||||
// Process the queue
|
||||
go a.process()
|
||||
|
||||
// Wait for the results
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return cid.Undef, ctx.Err()
|
||||
case r := <-req.Result:
|
||||
return r.msgCid, r.err
|
||||
}
|
||||
}
|
||||
|
||||
// Used by the tests
|
||||
func (a *fundedAddress) onProcessStart(fn func() bool) {
|
||||
a.lk.Lock()
|
||||
defer a.lk.Unlock()
|
||||
|
||||
a.onProcessStartListener = fn
|
||||
}
|
||||
|
||||
// Process queued requests
|
||||
func (a *fundedAddress) process() {
|
||||
a.lk.Lock()
|
||||
defer a.lk.Unlock()
|
||||
|
||||
// Used by the tests
|
||||
if a.onProcessStartListener != nil {
|
||||
done := a.onProcessStartListener()
|
||||
if !done {
|
||||
return
|
||||
}
|
||||
a.onProcessStartListener = nil
|
||||
}
|
||||
|
||||
// Check if we're still waiting for the response to a message
|
||||
if a.state.MsgCid != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Check if there's anything to do
|
||||
haveReservations := len(a.reservations) > 0 || len(a.releases) > 0
|
||||
haveWithdrawals := len(a.withdrawals) > 0
|
||||
if !haveReservations && !haveWithdrawals {
|
||||
return
|
||||
}
|
||||
|
||||
// Process reservations / releases
|
||||
if haveReservations {
|
||||
res, err := a.processReservations(a.reservations, a.releases)
|
||||
if err == nil {
|
||||
a.applyStateChange(res.msgCid, res.amtReserved)
|
||||
}
|
||||
a.reservations = filterOutProcessedReqs(a.reservations)
|
||||
a.releases = filterOutProcessedReqs(a.releases)
|
||||
}
|
||||
|
||||
// If there was no message sent on chain by adding reservations, and all
|
||||
// reservations have completed processing, process withdrawals
|
||||
if haveWithdrawals && a.state.MsgCid == nil && len(a.reservations) == 0 {
|
||||
withdrawalCid, err := a.processWithdrawals(a.withdrawals)
|
||||
if err == nil && withdrawalCid != cid.Undef {
|
||||
a.applyStateChange(&withdrawalCid, types.EmptyInt)
|
||||
}
|
||||
a.withdrawals = filterOutProcessedReqs(a.withdrawals)
|
||||
}
|
||||
|
||||
// If a message was sent on-chain
|
||||
if a.state.MsgCid != nil {
|
||||
// Start waiting for results of message (async)
|
||||
a.startWaitForResults(*a.state.MsgCid)
|
||||
}
|
||||
|
||||
// Process any remaining queued requests
|
||||
go a.process()
|
||||
}
|
||||
|
||||
// Filter out completed requests
|
||||
func filterOutProcessedReqs(reqs []*fundRequest) []*fundRequest {
|
||||
filtered := make([]*fundRequest, 0, len(reqs))
|
||||
for _, req := range reqs {
|
||||
if !req.Completed() {
|
||||
filtered = append(filtered, req)
|
||||
}
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
|
||||
// Apply the results of processing queues and save to the datastore
|
||||
func (a *fundedAddress) applyStateChange(msgCid *cid.Cid, amtReserved abi.TokenAmount) {
|
||||
a.state.MsgCid = msgCid
|
||||
if !amtReserved.Nil() {
|
||||
a.state.AmtReserved = amtReserved
|
||||
}
|
||||
a.saveState()
|
||||
}
|
||||
|
||||
// Clear the pending message cid so that a new message can be sent
|
||||
func (a *fundedAddress) clearWaitState() {
|
||||
a.state.MsgCid = nil
|
||||
a.saveState()
|
||||
}
|
||||
|
||||
// Save state to datastore
|
||||
func (a *fundedAddress) saveState() {
|
||||
// Not much we can do if saving to the datastore fails, just log
|
||||
err := a.str.save(a.state)
|
||||
if err != nil {
|
||||
log.Errorf("saving state to store for addr %s: %w", a.state.Addr, err)
|
||||
}
|
||||
}
|
||||
|
||||
// The result of processing the reservation / release queues
|
||||
type processResult struct {
|
||||
// Requests that completed without adding funds
|
||||
covered []*fundRequest
|
||||
// Requests that added funds
|
||||
added []*fundRequest
|
||||
|
||||
// The new reserved amount
|
||||
amtReserved abi.TokenAmount
|
||||
// The message cid, if a message was submitted on-chain
|
||||
msgCid *cid.Cid
|
||||
}
|
||||
|
||||
// process reservations and releases, and return the resulting changes to state
|
||||
func (a *fundedAddress) processReservations(reservations []*fundRequest, releases []*fundRequest) (pr *processResult, prerr error) {
|
||||
// When the function returns
|
||||
defer func() {
|
||||
// If there's an error, mark all requests as errored
|
||||
if prerr != nil {
|
||||
for _, req := range append(reservations, releases...) {
|
||||
req.Complete(cid.Undef, prerr)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Complete all release requests
|
||||
for _, req := range releases {
|
||||
req.Complete(cid.Undef, nil)
|
||||
}
|
||||
|
||||
// Complete all requests that were covered by released amounts
|
||||
for _, req := range pr.covered {
|
||||
req.Complete(cid.Undef, nil)
|
||||
}
|
||||
|
||||
// If a message was sent
|
||||
if pr.msgCid != nil {
|
||||
// Complete all add funds requests
|
||||
for _, req := range pr.added {
|
||||
req.Complete(*pr.msgCid, nil)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Split reservations into those that are covered by released amounts,
|
||||
// and those to add to the reserved amount.
|
||||
// Note that we process requests from the same wallet in batches. So some
|
||||
// requests may not be included in covered if they don't match the first
|
||||
// covered request's wallet. These will be processed on a subsequent
|
||||
// invocation of processReservations.
|
||||
toCancel, toAdd, reservedDelta := splitReservations(reservations, releases)
|
||||
|
||||
// Apply the reserved delta to the reserved amount
|
||||
reserved := types.BigAdd(a.state.AmtReserved, reservedDelta)
|
||||
if reserved.LessThan(abi.NewTokenAmount(0)) {
|
||||
reserved = abi.NewTokenAmount(0)
|
||||
}
|
||||
res := &processResult{
|
||||
amtReserved: reserved,
|
||||
covered: toCancel,
|
||||
}
|
||||
|
||||
// Work out the amount to add to the balance
|
||||
amtToAdd := abi.NewTokenAmount(0)
|
||||
if len(toAdd) > 0 && reserved.GreaterThan(abi.NewTokenAmount(0)) {
|
||||
// Get available funds for address
|
||||
avail, err := a.env.AvailableFunds(a.ctx, a.state.Addr)
|
||||
if err != nil {
|
||||
return res, err
|
||||
}
|
||||
|
||||
// amount to add = new reserved amount - available
|
||||
amtToAdd = types.BigSub(reserved, avail)
|
||||
a.debugf("reserved %d - avail %d = to add %d", reserved, avail, amtToAdd)
|
||||
}
|
||||
|
||||
// If there's nothing to add to the balance, bail out
|
||||
if amtToAdd.LessThanEqual(abi.NewTokenAmount(0)) {
|
||||
res.covered = append(res.covered, toAdd...)
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Add funds to address
|
||||
a.debugf("add funds %d", amtToAdd)
|
||||
addFundsCid, err := a.env.AddFunds(a.ctx, toAdd[0].Wallet, a.state.Addr, amtToAdd)
|
||||
if err != nil {
|
||||
return res, err
|
||||
}
|
||||
|
||||
// Mark reservation requests as complete
|
||||
res.added = toAdd
|
||||
|
||||
// Save the message CID to state
|
||||
res.msgCid = &addFundsCid
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Split reservations into those that are under the total release amount
|
||||
// (covered) and those that exceed it (to add).
|
||||
// Note that we process requests from the same wallet in batches. So some
|
||||
// requests may not be included in covered if they don't match the first
|
||||
// covered request's wallet.
|
||||
func splitReservations(reservations []*fundRequest, releases []*fundRequest) ([]*fundRequest, []*fundRequest, abi.TokenAmount) {
|
||||
toCancel := make([]*fundRequest, 0, len(reservations))
|
||||
toAdd := make([]*fundRequest, 0, len(reservations))
|
||||
toAddAmt := abi.NewTokenAmount(0)
|
||||
|
||||
// Sum release amounts
|
||||
releaseAmt := abi.NewTokenAmount(0)
|
||||
for _, req := range releases {
|
||||
releaseAmt = types.BigAdd(releaseAmt, req.Amount())
|
||||
}
|
||||
|
||||
// We only want to combine requests that come from the same wallet
|
||||
batchWallet := address.Undef
|
||||
for _, req := range reservations {
|
||||
amt := req.Amount()
|
||||
|
||||
// If the amount to add to the reserve is cancelled out by a release
|
||||
if amt.LessThanEqual(releaseAmt) {
|
||||
// Cancel the request and update the release total
|
||||
releaseAmt = types.BigSub(releaseAmt, amt)
|
||||
toCancel = append(toCancel, req)
|
||||
continue
|
||||
}
|
||||
|
||||
// The amount to add is greater that the release total so we want
|
||||
// to send an add funds request
|
||||
|
||||
// The first time the wallet will be undefined
|
||||
if batchWallet == address.Undef {
|
||||
batchWallet = req.Wallet
|
||||
}
|
||||
// If this request's wallet is the same as the batch wallet,
|
||||
// the requests will be combined
|
||||
if batchWallet == req.Wallet {
|
||||
delta := types.BigSub(amt, releaseAmt)
|
||||
toAddAmt = types.BigAdd(toAddAmt, delta)
|
||||
releaseAmt = abi.NewTokenAmount(0)
|
||||
toAdd = append(toAdd, req)
|
||||
}
|
||||
}
|
||||
|
||||
// The change in the reserved amount is "amount to add" - "amount to release"
|
||||
reservedDelta := types.BigSub(toAddAmt, releaseAmt)
|
||||
|
||||
return toCancel, toAdd, reservedDelta
|
||||
}
|
||||
|
||||
// process withdrawal queue
|
||||
func (a *fundedAddress) processWithdrawals(withdrawals []*fundRequest) (msgCid cid.Cid, prerr error) {
|
||||
// If there's an error, mark all withdrawal requests as errored
|
||||
defer func() {
|
||||
if prerr != nil {
|
||||
for _, req := range withdrawals {
|
||||
req.Complete(cid.Undef, prerr)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Get the net available balance
|
||||
avail, err := a.env.AvailableFunds(a.ctx, a.state.Addr)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
||||
netAvail := types.BigSub(avail, a.state.AmtReserved)
|
||||
|
||||
// Fit as many withdrawals as possible into the available balance, and fail
|
||||
// the rest
|
||||
withdrawalAmt := abi.NewTokenAmount(0)
|
||||
allowedAmt := abi.NewTokenAmount(0)
|
||||
allowed := make([]*fundRequest, 0, len(withdrawals))
|
||||
var batchWallet address.Address
|
||||
for _, req := range withdrawals {
|
||||
amt := req.Amount()
|
||||
if amt.IsZero() {
|
||||
// If the context for the request was cancelled, bail out
|
||||
req.Complete(cid.Undef, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// If the amount would exceed the available amount, complete the
|
||||
// request with an error
|
||||
newWithdrawalAmt := types.BigAdd(withdrawalAmt, amt)
|
||||
if newWithdrawalAmt.GreaterThan(netAvail) {
|
||||
err := xerrors.Errorf("insufficient funds for withdrawal of %d", amt)
|
||||
a.debugf("%s", err)
|
||||
req.Complete(cid.Undef, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// If this is the first allowed withdrawal request in this batch, save
|
||||
// its wallet address
|
||||
if batchWallet == address.Undef {
|
||||
batchWallet = req.Wallet
|
||||
}
|
||||
// If the request wallet doesn't match the batch wallet, bail out
|
||||
// (the withdrawal will be processed after the current batch has
|
||||
// completed)
|
||||
if req.Wallet != batchWallet {
|
||||
continue
|
||||
}
|
||||
|
||||
// Include this withdrawal request in the batch
|
||||
withdrawalAmt = newWithdrawalAmt
|
||||
a.debugf("withdraw %d", amt)
|
||||
allowed = append(allowed, req)
|
||||
allowedAmt = types.BigAdd(allowedAmt, amt)
|
||||
}
|
||||
|
||||
// Check if there is anything to withdraw.
|
||||
// Note that if the context for a request is cancelled,
|
||||
// req.Amount() returns zero
|
||||
if allowedAmt.Equals(abi.NewTokenAmount(0)) {
|
||||
// Mark allowed requests as complete
|
||||
for _, req := range allowed {
|
||||
req.Complete(cid.Undef, nil)
|
||||
}
|
||||
return cid.Undef, nil
|
||||
}
|
||||
|
||||
// Withdraw funds
|
||||
a.debugf("withdraw funds %d", allowedAmt)
|
||||
withdrawFundsCid, err := a.env.WithdrawFunds(a.ctx, allowed[0].Wallet, a.state.Addr, allowedAmt)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
||||
// Mark allowed requests as complete
|
||||
for _, req := range allowed {
|
||||
req.Complete(withdrawFundsCid, nil)
|
||||
}
|
||||
|
||||
// Save the message CID to state
|
||||
return withdrawFundsCid, nil
|
||||
}
|
||||
|
||||
// asynchonously wait for results of message
|
||||
func (a *fundedAddress) startWaitForResults(msgCid cid.Cid) {
|
||||
go func() {
|
||||
err := a.env.WaitMsg(a.ctx, msgCid)
|
||||
if err != nil {
|
||||
// We don't really care about the results here, we're just waiting
|
||||
// so as to only process one on-chain message at a time
|
||||
log.Errorf("waiting for results of message %s for addr %s: %w", msgCid, a.state.Addr, err)
|
||||
}
|
||||
|
||||
a.lk.Lock()
|
||||
a.debugf("complete wait")
|
||||
a.clearWaitState()
|
||||
a.lk.Unlock()
|
||||
|
||||
a.process()
|
||||
}()
|
||||
}
|
||||
|
||||
func (a *fundedAddress) debugf(args ...interface{}) {
|
||||
fmtStr := args[0].(string)
|
||||
args = args[1:]
|
||||
log.Debugf(a.state.Addr.String()+": "+fmtStr, args...)
|
||||
}
|
||||
|
||||
// The result of a fund request
|
||||
type reqResult struct {
|
||||
msgCid cid.Cid
|
||||
err error
|
||||
}
|
||||
|
||||
// A request to change funds
|
||||
type fundRequest struct {
|
||||
ctx context.Context
|
||||
amt abi.TokenAmount
|
||||
completed chan struct{}
|
||||
Wallet address.Address
|
||||
Result chan reqResult
|
||||
}
|
||||
|
||||
func newFundRequest(ctx context.Context, wallet address.Address, amt abi.TokenAmount) *fundRequest {
|
||||
return &fundRequest{
|
||||
ctx: ctx,
|
||||
amt: amt,
|
||||
Wallet: wallet,
|
||||
Result: make(chan reqResult),
|
||||
completed: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Amount returns zero if the context has expired
|
||||
func (frp *fundRequest) Amount() abi.TokenAmount {
|
||||
if frp.ctx.Err() != nil {
|
||||
return abi.NewTokenAmount(0)
|
||||
}
|
||||
return frp.amt
|
||||
}
|
||||
|
||||
// Complete is called with the message CID when the funds request has been
|
||||
// started or with the error if there was an error
|
||||
func (frp *fundRequest) Complete(msgCid cid.Cid, err error) {
|
||||
select {
|
||||
case <-frp.completed:
|
||||
case <-frp.ctx.Done():
|
||||
case frp.Result <- reqResult{msgCid: msgCid, err: err}:
|
||||
}
|
||||
close(frp.completed)
|
||||
}
|
||||
|
||||
// Completed indicates if Complete has already been called
|
||||
func (frp *fundRequest) Completed() bool {
|
||||
select {
|
||||
case <-frp.completed:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// fundManagerEnvironment simplifies some API calls
|
||||
type fundManagerEnvironment struct {
|
||||
api fundManagerAPI
|
||||
}
|
||||
|
||||
func (env *fundManagerEnvironment) AvailableFunds(ctx context.Context, addr address.Address) (abi.TokenAmount, error) {
|
||||
bal, err := env.api.StateMarketBalance(ctx, addr, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return abi.NewTokenAmount(0), err
|
||||
}
|
||||
|
||||
return types.BigSub(bal.Escrow, bal.Locked), nil
|
||||
}
|
||||
|
||||
func (env *fundManagerEnvironment) AddFunds(
|
||||
ctx context.Context,
|
||||
wallet address.Address,
|
||||
addr address.Address,
|
||||
amt abi.TokenAmount,
|
||||
) (cid.Cid, error) {
|
||||
params, err := actors.SerializeParams(&addr)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
||||
smsg, aerr := env.api.MpoolPushMessage(ctx, &types.Message{
|
||||
To: market.Address,
|
||||
From: wallet,
|
||||
Value: amt,
|
||||
Method: market.Methods.AddBalance,
|
||||
Params: params,
|
||||
}, nil)
|
||||
|
||||
if aerr != nil {
|
||||
return cid.Undef, aerr
|
||||
}
|
||||
|
||||
return smsg.Cid(), nil
|
||||
}
|
||||
|
||||
func (env *fundManagerEnvironment) WithdrawFunds(
|
||||
ctx context.Context,
|
||||
wallet address.Address,
|
||||
addr address.Address,
|
||||
amt abi.TokenAmount,
|
||||
) (cid.Cid, error) {
|
||||
params, err := actors.SerializeParams(&market.WithdrawBalanceParams{
|
||||
ProviderOrClientAddress: addr,
|
||||
Amount: amt,
|
||||
})
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("serializing params: %w", err)
|
||||
}
|
||||
|
||||
smsg, aerr := env.api.MpoolPushMessage(ctx, &types.Message{
|
||||
To: market.Address,
|
||||
From: wallet,
|
||||
Value: types.NewInt(0),
|
||||
Method: market.Methods.WithdrawBalance,
|
||||
Params: params,
|
||||
}, nil)
|
||||
|
||||
if aerr != nil {
|
||||
return cid.Undef, aerr
|
||||
}
|
||||
|
||||
return smsg.Cid(), nil
|
||||
}
|
||||
|
||||
func (env *fundManagerEnvironment) WaitMsg(ctx context.Context, c cid.Cid) error {
|
||||
_, err := env.api.StateWaitMsg(ctx, c, build.MessageConfidence)
|
||||
return err
|
||||
}
|
820
chain/market/fundmanager_test.go
Normal file
820
chain/market/fundmanager_test.go
Normal file
@ -0,0 +1,820 @@
|
||||
package market
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/wallet"
|
||||
tutils "github.com/filecoin-project/specs-actors/v2/support/testing"
|
||||
"github.com/ipfs/go-cid"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
ds_sync "github.com/ipfs/go-datastore/sync"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestFundManagerBasic verifies that the basic fund manager operations work
|
||||
func TestFundManagerBasic(t *testing.T) {
|
||||
s := setup(t)
|
||||
defer s.fm.Stop()
|
||||
|
||||
// Reserve 10
|
||||
// balance: 0 -> 10
|
||||
// reserved: 0 -> 10
|
||||
amt := abi.NewTokenAmount(10)
|
||||
sentinel, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt)
|
||||
require.NoError(t, err)
|
||||
|
||||
msg := s.mockApi.getSentMessage(sentinel)
|
||||
checkAddMessageFields(t, msg, s.walletAddr, s.acctAddr, amt)
|
||||
|
||||
s.mockApi.completeMsg(sentinel)
|
||||
|
||||
// Reserve 7
|
||||
// balance: 10 -> 17
|
||||
// reserved: 10 -> 17
|
||||
amt = abi.NewTokenAmount(7)
|
||||
sentinel, err = s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt)
|
||||
require.NoError(t, err)
|
||||
|
||||
msg = s.mockApi.getSentMessage(sentinel)
|
||||
checkAddMessageFields(t, msg, s.walletAddr, s.acctAddr, amt)
|
||||
|
||||
s.mockApi.completeMsg(sentinel)
|
||||
|
||||
// Release 5
|
||||
// balance: 17
|
||||
// reserved: 17 -> 12
|
||||
amt = abi.NewTokenAmount(5)
|
||||
err = s.fm.Release(s.acctAddr, amt)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Withdraw 2
|
||||
// balance: 17 -> 15
|
||||
// reserved: 12
|
||||
amt = abi.NewTokenAmount(2)
|
||||
sentinel, err = s.fm.Withdraw(s.ctx, s.walletAddr, s.acctAddr, amt)
|
||||
require.NoError(t, err)
|
||||
|
||||
msg = s.mockApi.getSentMessage(sentinel)
|
||||
checkWithdrawMessageFields(t, msg, s.walletAddr, s.acctAddr, amt)
|
||||
|
||||
s.mockApi.completeMsg(sentinel)
|
||||
|
||||
// Reserve 3
|
||||
// balance: 15
|
||||
// reserved: 12 -> 15
|
||||
// Note: reserved (15) is <= balance (15) so should not send on-chain
|
||||
// message
|
||||
msgCount := s.mockApi.messageCount()
|
||||
amt = abi.NewTokenAmount(3)
|
||||
sentinel, err = s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, msgCount, s.mockApi.messageCount())
|
||||
require.Equal(t, sentinel, cid.Undef)
|
||||
|
||||
// Reserve 1
|
||||
// balance: 15 -> 16
|
||||
// reserved: 15 -> 16
|
||||
// Note: reserved (16) is above balance (15) so *should* send on-chain
|
||||
// message to top up balance
|
||||
amt = abi.NewTokenAmount(1)
|
||||
topUp := abi.NewTokenAmount(1)
|
||||
sentinel, err = s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt)
|
||||
require.NoError(t, err)
|
||||
|
||||
s.mockApi.completeMsg(sentinel)
|
||||
msg = s.mockApi.getSentMessage(sentinel)
|
||||
checkAddMessageFields(t, msg, s.walletAddr, s.acctAddr, topUp)
|
||||
|
||||
// Withdraw 1
|
||||
// balance: 16
|
||||
// reserved: 16
|
||||
// Note: Expect failure because there is no available balance to withdraw:
|
||||
// balance - reserved = 16 - 16 = 0
|
||||
amt = abi.NewTokenAmount(1)
|
||||
sentinel, err = s.fm.Withdraw(s.ctx, s.walletAddr, s.acctAddr, amt)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
// TestFundManagerParallel verifies that operations can be run in parallel
|
||||
func TestFundManagerParallel(t *testing.T) {
|
||||
s := setup(t)
|
||||
defer s.fm.Stop()
|
||||
|
||||
// Reserve 10
|
||||
amt := abi.NewTokenAmount(10)
|
||||
sentinelReserve10, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait until all the subsequent requests are queued up
|
||||
queueReady := make(chan struct{})
|
||||
fa := s.fm.getFundedAddress(s.acctAddr)
|
||||
fa.onProcessStart(func() bool {
|
||||
if len(fa.withdrawals) == 1 && len(fa.reservations) == 2 && len(fa.releases) == 1 {
|
||||
close(queueReady)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
// Withdraw 5 (should not run until after reserves / releases)
|
||||
withdrawReady := make(chan error)
|
||||
go func() {
|
||||
amt = abi.NewTokenAmount(5)
|
||||
_, err := s.fm.Withdraw(s.ctx, s.walletAddr, s.acctAddr, amt)
|
||||
withdrawReady <- err
|
||||
}()
|
||||
|
||||
reserveSentinels := make(chan cid.Cid)
|
||||
|
||||
// Reserve 3
|
||||
go func() {
|
||||
amt := abi.NewTokenAmount(3)
|
||||
sentinelReserve3, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt)
|
||||
require.NoError(t, err)
|
||||
reserveSentinels <- sentinelReserve3
|
||||
}()
|
||||
|
||||
// Reserve 5
|
||||
go func() {
|
||||
amt := abi.NewTokenAmount(5)
|
||||
sentinelReserve5, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt)
|
||||
require.NoError(t, err)
|
||||
reserveSentinels <- sentinelReserve5
|
||||
}()
|
||||
|
||||
// Release 2
|
||||
go func() {
|
||||
amt := abi.NewTokenAmount(2)
|
||||
err = s.fm.Release(s.acctAddr, amt)
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
// Everything is queued up
|
||||
<-queueReady
|
||||
|
||||
// Complete the "Reserve 10" message
|
||||
s.mockApi.completeMsg(sentinelReserve10)
|
||||
msg := s.mockApi.getSentMessage(sentinelReserve10)
|
||||
checkAddMessageFields(t, msg, s.walletAddr, s.acctAddr, abi.NewTokenAmount(10))
|
||||
|
||||
// The other requests should now be combined and be submitted on-chain as
|
||||
// a single message
|
||||
rs1 := <-reserveSentinels
|
||||
rs2 := <-reserveSentinels
|
||||
require.Equal(t, rs1, rs2)
|
||||
|
||||
// Withdraw should not have been called yet, because reserve / release
|
||||
// requests run first
|
||||
select {
|
||||
case <-withdrawReady:
|
||||
require.Fail(t, "Withdraw should run after reserve / release")
|
||||
default:
|
||||
}
|
||||
|
||||
// Complete the message
|
||||
s.mockApi.completeMsg(rs1)
|
||||
msg = s.mockApi.getSentMessage(rs1)
|
||||
|
||||
// "Reserve 3" +3
|
||||
// "Reserve 5" +5
|
||||
// "Release 2" -2
|
||||
// Result: 6
|
||||
checkAddMessageFields(t, msg, s.walletAddr, s.acctAddr, abi.NewTokenAmount(6))
|
||||
|
||||
// Expect withdraw to fail because not enough available funds
|
||||
err = <-withdrawReady
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
// TestFundManagerReserveByWallet verifies that reserve requests are grouped by wallet
|
||||
func TestFundManagerReserveByWallet(t *testing.T) {
|
||||
s := setup(t)
|
||||
defer s.fm.Stop()
|
||||
|
||||
walletAddrA, err := s.wllt.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
require.NoError(t, err)
|
||||
walletAddrB, err := s.wllt.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait until all the reservation requests are queued up
|
||||
walletAQueuedUp := make(chan struct{})
|
||||
queueReady := make(chan struct{})
|
||||
fa := s.fm.getFundedAddress(s.acctAddr)
|
||||
fa.onProcessStart(func() bool {
|
||||
if len(fa.reservations) == 1 {
|
||||
close(walletAQueuedUp)
|
||||
}
|
||||
if len(fa.reservations) == 3 {
|
||||
close(queueReady)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
type reserveResult struct {
|
||||
ws cid.Cid
|
||||
err error
|
||||
}
|
||||
results := make(chan *reserveResult)
|
||||
|
||||
amtA1 := abi.NewTokenAmount(1)
|
||||
go func() {
|
||||
// Wallet A: Reserve 1
|
||||
sentinelA1, err := s.fm.Reserve(s.ctx, walletAddrA, s.acctAddr, amtA1)
|
||||
results <- &reserveResult{
|
||||
ws: sentinelA1,
|
||||
err: err,
|
||||
}
|
||||
}()
|
||||
|
||||
amtB1 := abi.NewTokenAmount(2)
|
||||
amtB2 := abi.NewTokenAmount(3)
|
||||
go func() {
|
||||
// Wait for reservation for wallet A to be queued up
|
||||
<-walletAQueuedUp
|
||||
|
||||
// Wallet B: Reserve 2
|
||||
go func() {
|
||||
sentinelB1, err := s.fm.Reserve(s.ctx, walletAddrB, s.acctAddr, amtB1)
|
||||
results <- &reserveResult{
|
||||
ws: sentinelB1,
|
||||
err: err,
|
||||
}
|
||||
}()
|
||||
|
||||
// Wallet B: Reserve 3
|
||||
sentinelB2, err := s.fm.Reserve(s.ctx, walletAddrB, s.acctAddr, amtB2)
|
||||
results <- &reserveResult{
|
||||
ws: sentinelB2,
|
||||
err: err,
|
||||
}
|
||||
}()
|
||||
|
||||
// All reservation requests are queued up
|
||||
<-queueReady
|
||||
|
||||
resA := <-results
|
||||
sentinelA1 := resA.ws
|
||||
|
||||
// Should send to wallet A
|
||||
msg := s.mockApi.getSentMessage(sentinelA1)
|
||||
checkAddMessageFields(t, msg, walletAddrA, s.acctAddr, amtA1)
|
||||
|
||||
// Complete wallet A message
|
||||
s.mockApi.completeMsg(sentinelA1)
|
||||
|
||||
resB1 := <-results
|
||||
resB2 := <-results
|
||||
require.NoError(t, resB1.err)
|
||||
require.NoError(t, resB2.err)
|
||||
sentinelB1 := resB1.ws
|
||||
sentinelB2 := resB2.ws
|
||||
|
||||
// Should send different message to wallet B
|
||||
require.NotEqual(t, sentinelA1, sentinelB1)
|
||||
// Should be single message combining amount 1 and 2
|
||||
require.Equal(t, sentinelB1, sentinelB2)
|
||||
msg = s.mockApi.getSentMessage(sentinelB1)
|
||||
checkAddMessageFields(t, msg, walletAddrB, s.acctAddr, types.BigAdd(amtB1, amtB2))
|
||||
}
|
||||
|
||||
// TestFundManagerWithdrawal verifies that as many withdraw operations as
|
||||
// possible are processed
|
||||
func TestFundManagerWithdrawalLimit(t *testing.T) {
|
||||
s := setup(t)
|
||||
defer s.fm.Stop()
|
||||
|
||||
// Reserve 10
|
||||
amt := abi.NewTokenAmount(10)
|
||||
sentinelReserve10, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Complete the "Reserve 10" message
|
||||
s.mockApi.completeMsg(sentinelReserve10)
|
||||
|
||||
// Release 10
|
||||
err = s.fm.Release(s.acctAddr, amt)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Queue up withdraw requests
|
||||
queueReady := make(chan struct{})
|
||||
fa := s.fm.getFundedAddress(s.acctAddr)
|
||||
withdrawalReqTotal := 3
|
||||
withdrawalReqEnqueued := 0
|
||||
withdrawalReqQueue := make(chan func(), withdrawalReqTotal)
|
||||
fa.onProcessStart(func() bool {
|
||||
// If a new withdrawal request was enqueued
|
||||
if len(fa.withdrawals) > withdrawalReqEnqueued {
|
||||
withdrawalReqEnqueued++
|
||||
|
||||
// Pop the next request and run it
|
||||
select {
|
||||
case fn := <-withdrawalReqQueue:
|
||||
go fn()
|
||||
default:
|
||||
}
|
||||
}
|
||||
// Once all the requests have arrived, we're ready to process the queue
|
||||
if withdrawalReqEnqueued == withdrawalReqTotal {
|
||||
close(queueReady)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
type withdrawResult struct {
|
||||
reqIndex int
|
||||
ws cid.Cid
|
||||
err error
|
||||
}
|
||||
withdrawRes := make(chan *withdrawResult)
|
||||
|
||||
// Queue up three "Withdraw 5" requests
|
||||
enqueuedCount := 0
|
||||
for i := 0; i < withdrawalReqTotal; i++ {
|
||||
withdrawalReqQueue <- func() {
|
||||
idx := enqueuedCount
|
||||
enqueuedCount++
|
||||
|
||||
amt := abi.NewTokenAmount(5)
|
||||
ws, err := s.fm.Withdraw(s.ctx, s.walletAddr, s.acctAddr, amt)
|
||||
withdrawRes <- &withdrawResult{reqIndex: idx, ws: ws, err: err}
|
||||
}
|
||||
}
|
||||
// Start the first request
|
||||
fn := <-withdrawalReqQueue
|
||||
go fn()
|
||||
|
||||
// All withdrawal requests are queued up and ready to be processed
|
||||
<-queueReady
|
||||
|
||||
// Organize results in request order
|
||||
results := make([]*withdrawResult, withdrawalReqTotal)
|
||||
for i := 0; i < 3; i++ {
|
||||
res := <-withdrawRes
|
||||
results[res.reqIndex] = res
|
||||
}
|
||||
|
||||
// Available 10
|
||||
// Withdraw 5
|
||||
// Expect Success
|
||||
require.NoError(t, results[0].err)
|
||||
// Available 5
|
||||
// Withdraw 5
|
||||
// Expect Success
|
||||
require.NoError(t, results[1].err)
|
||||
// Available 0
|
||||
// Withdraw 5
|
||||
// Expect FAIL
|
||||
require.Error(t, results[2].err)
|
||||
|
||||
// Expect withdrawal requests that fit under reserved amount to be combined
|
||||
// into a single message on-chain
|
||||
require.Equal(t, results[0].ws, results[1].ws)
|
||||
}
|
||||
|
||||
// TestFundManagerWithdrawByWallet verifies that withdraw requests are grouped by wallet
|
||||
func TestFundManagerWithdrawByWallet(t *testing.T) {
|
||||
s := setup(t)
|
||||
defer s.fm.Stop()
|
||||
|
||||
walletAddrA, err := s.wllt.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
require.NoError(t, err)
|
||||
walletAddrB, err := s.wllt.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Reserve 10
|
||||
reserveAmt := abi.NewTokenAmount(10)
|
||||
sentinelReserve, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, reserveAmt)
|
||||
require.NoError(t, err)
|
||||
s.mockApi.completeMsg(sentinelReserve)
|
||||
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Release 10
|
||||
err = s.fm.Release(s.acctAddr, reserveAmt)
|
||||
require.NoError(t, err)
|
||||
|
||||
type withdrawResult struct {
|
||||
ws cid.Cid
|
||||
err error
|
||||
}
|
||||
results := make(chan *withdrawResult)
|
||||
|
||||
// Wait until withdrawals are queued up
|
||||
walletAQueuedUp := make(chan struct{})
|
||||
queueReady := make(chan struct{})
|
||||
withdrawalCount := 0
|
||||
fa := s.fm.getFundedAddress(s.acctAddr)
|
||||
fa.onProcessStart(func() bool {
|
||||
if len(fa.withdrawals) == withdrawalCount {
|
||||
return false
|
||||
}
|
||||
withdrawalCount = len(fa.withdrawals)
|
||||
|
||||
if withdrawalCount == 1 {
|
||||
close(walletAQueuedUp)
|
||||
} else if withdrawalCount == 3 {
|
||||
close(queueReady)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
amtA1 := abi.NewTokenAmount(1)
|
||||
go func() {
|
||||
// Wallet A: Withdraw 1
|
||||
sentinelA1, err := s.fm.Withdraw(s.ctx, walletAddrA, s.acctAddr, amtA1)
|
||||
results <- &withdrawResult{
|
||||
ws: sentinelA1,
|
||||
err: err,
|
||||
}
|
||||
}()
|
||||
|
||||
amtB1 := abi.NewTokenAmount(2)
|
||||
amtB2 := abi.NewTokenAmount(3)
|
||||
go func() {
|
||||
// Wait until withdraw for wallet A is queued up
|
||||
<-walletAQueuedUp
|
||||
|
||||
// Wallet B: Withdraw 2
|
||||
go func() {
|
||||
sentinelB1, err := s.fm.Withdraw(s.ctx, walletAddrB, s.acctAddr, amtB1)
|
||||
results <- &withdrawResult{
|
||||
ws: sentinelB1,
|
||||
err: err,
|
||||
}
|
||||
}()
|
||||
|
||||
// Wallet B: Withdraw 3
|
||||
sentinelB2, err := s.fm.Withdraw(s.ctx, walletAddrB, s.acctAddr, amtB2)
|
||||
results <- &withdrawResult{
|
||||
ws: sentinelB2,
|
||||
err: err,
|
||||
}
|
||||
}()
|
||||
|
||||
// Withdrawals are queued up
|
||||
<-queueReady
|
||||
|
||||
// Should withdraw from wallet A first
|
||||
resA1 := <-results
|
||||
sentinelA1 := resA1.ws
|
||||
msg := s.mockApi.getSentMessage(sentinelA1)
|
||||
checkWithdrawMessageFields(t, msg, walletAddrA, s.acctAddr, amtA1)
|
||||
|
||||
// Complete wallet A message
|
||||
s.mockApi.completeMsg(sentinelA1)
|
||||
|
||||
resB1 := <-results
|
||||
resB2 := <-results
|
||||
require.NoError(t, resB1.err)
|
||||
require.NoError(t, resB2.err)
|
||||
sentinelB1 := resB1.ws
|
||||
sentinelB2 := resB2.ws
|
||||
|
||||
// Should send different message for wallet B from wallet A
|
||||
require.NotEqual(t, sentinelA1, sentinelB1)
|
||||
// Should be single message combining amount 1 and 2
|
||||
require.Equal(t, sentinelB1, sentinelB2)
|
||||
msg = s.mockApi.getSentMessage(sentinelB1)
|
||||
checkWithdrawMessageFields(t, msg, walletAddrB, s.acctAddr, types.BigAdd(amtB1, amtB2))
|
||||
}
|
||||
|
||||
// TestFundManagerRestart verifies that waiting for incomplete requests resumes
|
||||
// on restart
|
||||
func TestFundManagerRestart(t *testing.T) {
|
||||
s := setup(t)
|
||||
defer s.fm.Stop()
|
||||
|
||||
acctAddr2 := tutils.NewActorAddr(t, "addr2")
|
||||
|
||||
// Address 1: Reserve 10
|
||||
amt := abi.NewTokenAmount(10)
|
||||
sentinelAddr1, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt)
|
||||
require.NoError(t, err)
|
||||
|
||||
msg := s.mockApi.getSentMessage(sentinelAddr1)
|
||||
checkAddMessageFields(t, msg, s.walletAddr, s.acctAddr, amt)
|
||||
|
||||
// Address 2: Reserve 7
|
||||
amt2 := abi.NewTokenAmount(7)
|
||||
sentinelAddr2Res7, err := s.fm.Reserve(s.ctx, s.walletAddr, acctAddr2, amt2)
|
||||
require.NoError(t, err)
|
||||
|
||||
msg2 := s.mockApi.getSentMessage(sentinelAddr2Res7)
|
||||
checkAddMessageFields(t, msg2, s.walletAddr, acctAddr2, amt2)
|
||||
|
||||
// Complete "Address 1: Reserve 10"
|
||||
s.mockApi.completeMsg(sentinelAddr1)
|
||||
|
||||
// Give the completed state a moment to be stored before restart
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
|
||||
// Restart
|
||||
mockApiAfter := s.mockApi
|
||||
fmAfter := newFundManager(mockApiAfter, s.ds)
|
||||
err = fmAfter.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
amt3 := abi.NewTokenAmount(9)
|
||||
reserveSentinel := make(chan cid.Cid)
|
||||
go func() {
|
||||
// Address 2: Reserve 9
|
||||
sentinel3, err := fmAfter.Reserve(s.ctx, s.walletAddr, acctAddr2, amt3)
|
||||
require.NoError(t, err)
|
||||
reserveSentinel <- sentinel3
|
||||
}()
|
||||
|
||||
// Expect no message to be sent, because still waiting for previous
|
||||
// message "Address 2: Reserve 7" to complete on-chain
|
||||
select {
|
||||
case <-reserveSentinel:
|
||||
require.Fail(t, "Expected no message to be sent")
|
||||
case <-time.After(10 * time.Millisecond):
|
||||
}
|
||||
|
||||
// Complete "Address 2: Reserve 7"
|
||||
mockApiAfter.completeMsg(sentinelAddr2Res7)
|
||||
|
||||
// Expect waiting message to now be sent
|
||||
sentinel3 := <-reserveSentinel
|
||||
msg3 := mockApiAfter.getSentMessage(sentinel3)
|
||||
checkAddMessageFields(t, msg3, s.walletAddr, acctAddr2, amt3)
|
||||
}
|
||||
|
||||
// TestFundManagerReleaseAfterPublish verifies that release is successful in
|
||||
// the following scenario:
|
||||
// 1. Deal A adds 5 to addr1: reserved 0 -> 5 available 0 -> 5
|
||||
// 2. Deal B adds 7 to addr1: reserved 5 -> 12 available 5 -> 12
|
||||
// 3. Deal B completes, reducing addr1 by 7: reserved 12 available 12 -> 5
|
||||
// 4. Deal A releases 5 from addr1: reserved 12 -> 7 available 5
|
||||
func TestFundManagerReleaseAfterPublish(t *testing.T) {
|
||||
s := setup(t)
|
||||
defer s.fm.Stop()
|
||||
|
||||
// Deal A: Reserve 5
|
||||
// balance: 0 -> 5
|
||||
// reserved: 0 -> 5
|
||||
amt := abi.NewTokenAmount(5)
|
||||
sentinel, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt)
|
||||
require.NoError(t, err)
|
||||
s.mockApi.completeMsg(sentinel)
|
||||
|
||||
// Deal B: Reserve 7
|
||||
// balance: 5 -> 12
|
||||
// reserved: 5 -> 12
|
||||
amt = abi.NewTokenAmount(7)
|
||||
sentinel, err = s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt)
|
||||
require.NoError(t, err)
|
||||
s.mockApi.completeMsg(sentinel)
|
||||
|
||||
// Deal B: Publish (removes Deal B amount from balance)
|
||||
// balance: 12 -> 5
|
||||
// reserved: 12
|
||||
amt = abi.NewTokenAmount(7)
|
||||
s.mockApi.publish(s.acctAddr, amt)
|
||||
|
||||
// Deal A: Release 5
|
||||
// balance: 5
|
||||
// reserved: 12 -> 7
|
||||
amt = abi.NewTokenAmount(5)
|
||||
err = s.fm.Release(s.acctAddr, amt)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Deal B: Release 7
|
||||
// balance: 5
|
||||
// reserved: 12 -> 7
|
||||
amt = abi.NewTokenAmount(5)
|
||||
err = s.fm.Release(s.acctAddr, amt)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
type scaffold struct {
|
||||
ctx context.Context
|
||||
ds *ds_sync.MutexDatastore
|
||||
wllt *wallet.LocalWallet
|
||||
walletAddr address.Address
|
||||
acctAddr address.Address
|
||||
mockApi *mockFundManagerAPI
|
||||
fm *FundManager
|
||||
}
|
||||
|
||||
func setup(t *testing.T) *scaffold {
|
||||
ctx := context.Background()
|
||||
|
||||
wllt, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
walletAddr, err := wllt.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
acctAddr := tutils.NewActorAddr(t, "addr")
|
||||
|
||||
mockApi := newMockFundManagerAPI(walletAddr)
|
||||
dstore := ds_sync.MutexWrap(ds.NewMapDatastore())
|
||||
fm := newFundManager(mockApi, dstore)
|
||||
return &scaffold{
|
||||
ctx: ctx,
|
||||
ds: dstore,
|
||||
wllt: wllt,
|
||||
walletAddr: walletAddr,
|
||||
acctAddr: acctAddr,
|
||||
mockApi: mockApi,
|
||||
fm: fm,
|
||||
}
|
||||
}
|
||||
|
||||
func checkAddMessageFields(t *testing.T, msg *types.Message, from address.Address, to address.Address, amt abi.TokenAmount) {
|
||||
require.Equal(t, from, msg.From)
|
||||
require.Equal(t, market.Address, msg.To)
|
||||
require.Equal(t, amt, msg.Value)
|
||||
|
||||
var paramsTo address.Address
|
||||
err := paramsTo.UnmarshalCBOR(bytes.NewReader(msg.Params))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, to, paramsTo)
|
||||
}
|
||||
|
||||
func checkWithdrawMessageFields(t *testing.T, msg *types.Message, from address.Address, addr address.Address, amt abi.TokenAmount) {
|
||||
require.Equal(t, from, msg.From)
|
||||
require.Equal(t, market.Address, msg.To)
|
||||
require.Equal(t, abi.NewTokenAmount(0), msg.Value)
|
||||
|
||||
var params market.WithdrawBalanceParams
|
||||
err := params.UnmarshalCBOR(bytes.NewReader(msg.Params))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, addr, params.ProviderOrClientAddress)
|
||||
require.Equal(t, amt, params.Amount)
|
||||
}
|
||||
|
||||
type sentMsg struct {
|
||||
msg *types.SignedMessage
|
||||
ready chan struct{}
|
||||
}
|
||||
|
||||
type mockFundManagerAPI struct {
|
||||
wallet address.Address
|
||||
|
||||
lk sync.Mutex
|
||||
escrow map[address.Address]abi.TokenAmount
|
||||
sentMsgs map[cid.Cid]*sentMsg
|
||||
completedMsgs map[cid.Cid]struct{}
|
||||
waitingFor map[cid.Cid]chan struct{}
|
||||
}
|
||||
|
||||
func newMockFundManagerAPI(wallet address.Address) *mockFundManagerAPI {
|
||||
return &mockFundManagerAPI{
|
||||
wallet: wallet,
|
||||
escrow: make(map[address.Address]abi.TokenAmount),
|
||||
sentMsgs: make(map[cid.Cid]*sentMsg),
|
||||
completedMsgs: make(map[cid.Cid]struct{}),
|
||||
waitingFor: make(map[cid.Cid]chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (mapi *mockFundManagerAPI) MpoolPushMessage(ctx context.Context, message *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) {
|
||||
mapi.lk.Lock()
|
||||
defer mapi.lk.Unlock()
|
||||
|
||||
smsg := &types.SignedMessage{Message: *message}
|
||||
mapi.sentMsgs[smsg.Cid()] = &sentMsg{msg: smsg, ready: make(chan struct{})}
|
||||
|
||||
return smsg, nil
|
||||
}
|
||||
|
||||
func (mapi *mockFundManagerAPI) getSentMessage(c cid.Cid) *types.Message {
|
||||
mapi.lk.Lock()
|
||||
defer mapi.lk.Unlock()
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
if pending, ok := mapi.sentMsgs[c]; ok {
|
||||
return &pending.msg.Message
|
||||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
panic("expected message to be sent")
|
||||
}
|
||||
|
||||
func (mapi *mockFundManagerAPI) messageCount() int {
|
||||
mapi.lk.Lock()
|
||||
defer mapi.lk.Unlock()
|
||||
|
||||
return len(mapi.sentMsgs)
|
||||
}
|
||||
|
||||
func (mapi *mockFundManagerAPI) completeMsg(msgCid cid.Cid) {
|
||||
mapi.lk.Lock()
|
||||
|
||||
pmsg, ok := mapi.sentMsgs[msgCid]
|
||||
if ok {
|
||||
if pmsg.msg.Message.Method == market.Methods.AddBalance {
|
||||
var escrowAcct address.Address
|
||||
err := escrowAcct.UnmarshalCBOR(bytes.NewReader(pmsg.msg.Message.Params))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
escrow := mapi.getEscrow(escrowAcct)
|
||||
before := escrow
|
||||
escrow = types.BigAdd(escrow, pmsg.msg.Message.Value)
|
||||
mapi.escrow[escrowAcct] = escrow
|
||||
log.Debugf("%s: escrow %d -> %d", escrowAcct, before, escrow)
|
||||
} else {
|
||||
var params market.WithdrawBalanceParams
|
||||
err := params.UnmarshalCBOR(bytes.NewReader(pmsg.msg.Message.Params))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
escrowAcct := params.ProviderOrClientAddress
|
||||
|
||||
escrow := mapi.getEscrow(escrowAcct)
|
||||
before := escrow
|
||||
escrow = types.BigSub(escrow, params.Amount)
|
||||
mapi.escrow[escrowAcct] = escrow
|
||||
log.Debugf("%s: escrow %d -> %d", escrowAcct, before, escrow)
|
||||
}
|
||||
}
|
||||
|
||||
mapi.completedMsgs[msgCid] = struct{}{}
|
||||
|
||||
ready, ok := mapi.waitingFor[msgCid]
|
||||
|
||||
mapi.lk.Unlock()
|
||||
|
||||
if ok {
|
||||
close(ready)
|
||||
}
|
||||
}
|
||||
|
||||
func (mapi *mockFundManagerAPI) StateMarketBalance(ctx context.Context, a address.Address, key types.TipSetKey) (api.MarketBalance, error) {
|
||||
mapi.lk.Lock()
|
||||
defer mapi.lk.Unlock()
|
||||
|
||||
return api.MarketBalance{
|
||||
Locked: abi.NewTokenAmount(0),
|
||||
Escrow: mapi.getEscrow(a),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (mapi *mockFundManagerAPI) getEscrow(a address.Address) abi.TokenAmount {
|
||||
escrow := mapi.escrow[a]
|
||||
if escrow.Nil() {
|
||||
return abi.NewTokenAmount(0)
|
||||
}
|
||||
return escrow
|
||||
}
|
||||
|
||||
func (mapi *mockFundManagerAPI) publish(addr address.Address, amt abi.TokenAmount) {
|
||||
mapi.lk.Lock()
|
||||
defer mapi.lk.Unlock()
|
||||
|
||||
escrow := mapi.escrow[addr]
|
||||
if escrow.Nil() {
|
||||
return
|
||||
}
|
||||
escrow = types.BigSub(escrow, amt)
|
||||
if escrow.LessThan(abi.NewTokenAmount(0)) {
|
||||
escrow = abi.NewTokenAmount(0)
|
||||
}
|
||||
mapi.escrow[addr] = escrow
|
||||
}
|
||||
|
||||
func (mapi *mockFundManagerAPI) StateWaitMsg(ctx context.Context, c cid.Cid, confidence uint64) (*api.MsgLookup, error) {
|
||||
res := &api.MsgLookup{
|
||||
Message: c,
|
||||
Receipt: types.MessageReceipt{
|
||||
ExitCode: 0,
|
||||
Return: nil,
|
||||
},
|
||||
}
|
||||
ready := make(chan struct{})
|
||||
|
||||
mapi.lk.Lock()
|
||||
_, ok := mapi.completedMsgs[c]
|
||||
if !ok {
|
||||
mapi.waitingFor[c] = ready
|
||||
}
|
||||
mapi.lk.Unlock()
|
||||
|
||||
if !ok {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-ready:
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
}
|
@ -1,166 +0,0 @@
|
||||
package market
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/ipfs/go-cid"
|
||||
logging "github.com/ipfs/go-log"
|
||||
"go.uber.org/fx"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
||||
"github.com/filecoin-project/lotus/chain/events"
|
||||
"github.com/filecoin-project/lotus/chain/events/state"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/node/impl/full"
|
||||
)
|
||||
|
||||
var log = logging.Logger("market_adapter")
|
||||
|
||||
// API is the dependencies need to run a fund manager
|
||||
type API struct {
|
||||
fx.In
|
||||
|
||||
full.ChainAPI
|
||||
full.StateAPI
|
||||
full.MpoolAPI
|
||||
}
|
||||
|
||||
// FundMgr monitors available balances and adds funds when EnsureAvailable is called
|
||||
type FundMgr struct {
|
||||
api fundMgrAPI
|
||||
|
||||
lk sync.RWMutex
|
||||
available map[address.Address]types.BigInt
|
||||
}
|
||||
|
||||
// StartFundManager creates a new fund manager and sets up event hooks to manage state changes
|
||||
func StartFundManager(lc fx.Lifecycle, api API) *FundMgr {
|
||||
fm := newFundMgr(&api)
|
||||
lc.Append(fx.Hook{
|
||||
OnStart: func(ctx context.Context) error {
|
||||
ev := events.NewEvents(ctx, &api)
|
||||
preds := state.NewStatePredicates(&api)
|
||||
dealDiffFn := preds.OnStorageMarketActorChanged(preds.OnBalanceChanged(preds.AvailableBalanceChangedForAddresses(fm.getAddresses)))
|
||||
match := func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) {
|
||||
return dealDiffFn(ctx, oldTs.Key(), newTs.Key())
|
||||
}
|
||||
return ev.StateChanged(fm.checkFunc, fm.stateChanged, fm.revert, 0, events.NoTimeout, match)
|
||||
},
|
||||
})
|
||||
return fm
|
||||
}
|
||||
|
||||
type fundMgrAPI interface {
|
||||
StateMarketBalance(context.Context, address.Address, types.TipSetKey) (api.MarketBalance, error)
|
||||
MpoolPushMessage(context.Context, *types.Message, *api.MessageSendSpec) (*types.SignedMessage, error)
|
||||
StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error)
|
||||
}
|
||||
|
||||
func newFundMgr(api fundMgrAPI) *FundMgr {
|
||||
return &FundMgr{
|
||||
api: api,
|
||||
available: map[address.Address]types.BigInt{},
|
||||
}
|
||||
}
|
||||
|
||||
// checkFunc tells the events api to simply proceed (we always want to watch)
|
||||
func (fm *FundMgr) checkFunc(ts *types.TipSet) (done bool, more bool, err error) {
|
||||
return false, true, nil
|
||||
}
|
||||
|
||||
// revert handles reverts to balances
|
||||
func (fm *FundMgr) revert(ctx context.Context, ts *types.TipSet) error {
|
||||
// TODO: Is it ok to just ignore this?
|
||||
log.Warn("balance change reverted; TODO: actually handle this!")
|
||||
return nil
|
||||
}
|
||||
|
||||
// stateChanged handles balance changes monitored on the chain from one tipset to the next
|
||||
func (fm *FundMgr) stateChanged(ts *types.TipSet, ts2 *types.TipSet, states events.StateChange, h abi.ChainEpoch) (more bool, err error) {
|
||||
changedBalances, ok := states.(state.ChangedBalances)
|
||||
if !ok {
|
||||
panic("Expected state.ChangedBalances")
|
||||
}
|
||||
// overwrite our in memory cache with new values from chain (chain is canonical)
|
||||
fm.lk.Lock()
|
||||
for addr, balanceChange := range changedBalances {
|
||||
if fm.available[addr].Int != nil {
|
||||
log.Infof("State balance change recorded, prev: %s, new: %s", fm.available[addr].String(), balanceChange.To.String())
|
||||
}
|
||||
|
||||
fm.available[addr] = balanceChange.To
|
||||
}
|
||||
fm.lk.Unlock()
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (fm *FundMgr) getAddresses() []address.Address {
|
||||
fm.lk.RLock()
|
||||
defer fm.lk.RUnlock()
|
||||
addrs := make([]address.Address, 0, len(fm.available))
|
||||
for addr := range fm.available {
|
||||
addrs = append(addrs, addr)
|
||||
}
|
||||
return addrs
|
||||
}
|
||||
|
||||
// EnsureAvailable looks at the available balance in escrow for a given
|
||||
// address, and if less than the passed in amount, adds the difference
|
||||
func (fm *FundMgr) EnsureAvailable(ctx context.Context, addr, wallet address.Address, amt types.BigInt) (cid.Cid, error) {
|
||||
idAddr, err := fm.api.StateLookupID(ctx, addr, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
fm.lk.Lock()
|
||||
defer fm.lk.Unlock()
|
||||
|
||||
bal, err := fm.api.StateMarketBalance(ctx, addr, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
||||
stateAvail := types.BigSub(bal.Escrow, bal.Locked)
|
||||
|
||||
avail, ok := fm.available[idAddr]
|
||||
if !ok {
|
||||
avail = stateAvail
|
||||
}
|
||||
|
||||
toAdd := types.BigSub(amt, avail)
|
||||
if toAdd.LessThan(types.NewInt(0)) {
|
||||
toAdd = types.NewInt(0)
|
||||
}
|
||||
fm.available[idAddr] = big.Add(avail, toAdd)
|
||||
|
||||
log.Infof("Funds operation w/ Expected Balance: %s, In State: %s, Requested: %s, Adding: %s", avail.String(), stateAvail.String(), amt.String(), toAdd.String())
|
||||
|
||||
if toAdd.LessThanEqual(big.Zero()) {
|
||||
return cid.Undef, nil
|
||||
}
|
||||
|
||||
params, err := actors.SerializeParams(&addr)
|
||||
if err != nil {
|
||||
fm.available[idAddr] = avail
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
||||
smsg, err := fm.api.MpoolPushMessage(ctx, &types.Message{
|
||||
To: market.Address,
|
||||
From: wallet,
|
||||
Value: toAdd,
|
||||
Method: market.Methods.AddBalance,
|
||||
Params: params,
|
||||
}, nil)
|
||||
if err != nil {
|
||||
fm.available[idAddr] = avail
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
||||
return smsg.Cid(), nil
|
||||
}
|
@ -1,199 +0,0 @@
|
||||
package market
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
|
||||
tutils "github.com/filecoin-project/specs-actors/v2/support/testing"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
type fakeAPI struct {
|
||||
returnedBalance api.MarketBalance
|
||||
returnedBalanceErr error
|
||||
signature crypto.Signature
|
||||
receivedMessage *types.Message
|
||||
pushMessageErr error
|
||||
lookupIDErr error
|
||||
}
|
||||
|
||||
func (fapi *fakeAPI) StateLookupID(_ context.Context, addr address.Address, _ types.TipSetKey) (address.Address, error) {
|
||||
return addr, fapi.lookupIDErr
|
||||
}
|
||||
func (fapi *fakeAPI) StateMarketBalance(context.Context, address.Address, types.TipSetKey) (api.MarketBalance, error) {
|
||||
return fapi.returnedBalance, fapi.returnedBalanceErr
|
||||
}
|
||||
|
||||
func (fapi *fakeAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) {
|
||||
fapi.receivedMessage = msg
|
||||
return &types.SignedMessage{
|
||||
Message: *msg,
|
||||
Signature: fapi.signature,
|
||||
}, fapi.pushMessageErr
|
||||
}
|
||||
|
||||
func addFundsMsg(toAdd abi.TokenAmount, addr address.Address, wallet address.Address) *types.Message {
|
||||
params, _ := actors.SerializeParams(&addr)
|
||||
return &types.Message{
|
||||
To: market.Address,
|
||||
From: wallet,
|
||||
Value: toAdd,
|
||||
Method: market.Methods.AddBalance,
|
||||
Params: params,
|
||||
}
|
||||
}
|
||||
|
||||
type expectedResult struct {
|
||||
addAmt abi.TokenAmount
|
||||
shouldAdd bool
|
||||
err error
|
||||
cachedAvailable abi.TokenAmount
|
||||
}
|
||||
|
||||
func TestAddFunds(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
testCases := map[string]struct {
|
||||
returnedBalanceErr error
|
||||
returnedBalance api.MarketBalance
|
||||
addAmounts []abi.TokenAmount
|
||||
pushMessageErr error
|
||||
expectedResults []expectedResult
|
||||
lookupIDErr error
|
||||
}{
|
||||
"succeeds, trivial case": {
|
||||
returnedBalance: api.MarketBalance{Escrow: abi.NewTokenAmount(0), Locked: abi.NewTokenAmount(0)},
|
||||
addAmounts: []abi.TokenAmount{abi.NewTokenAmount(100)},
|
||||
expectedResults: []expectedResult{
|
||||
{
|
||||
addAmt: abi.NewTokenAmount(100),
|
||||
shouldAdd: true,
|
||||
err: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
"succeeds, money already present": {
|
||||
returnedBalance: api.MarketBalance{Escrow: abi.NewTokenAmount(150), Locked: abi.NewTokenAmount(50)},
|
||||
addAmounts: []abi.TokenAmount{abi.NewTokenAmount(100)},
|
||||
expectedResults: []expectedResult{
|
||||
{
|
||||
shouldAdd: false,
|
||||
err: nil,
|
||||
cachedAvailable: abi.NewTokenAmount(100),
|
||||
},
|
||||
},
|
||||
},
|
||||
"succeeds, multiple adds": {
|
||||
returnedBalance: api.MarketBalance{Escrow: abi.NewTokenAmount(150), Locked: abi.NewTokenAmount(50)},
|
||||
addAmounts: []abi.TokenAmount{abi.NewTokenAmount(100), abi.NewTokenAmount(200), abi.NewTokenAmount(250), abi.NewTokenAmount(250)},
|
||||
expectedResults: []expectedResult{
|
||||
{
|
||||
shouldAdd: false,
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
addAmt: abi.NewTokenAmount(100),
|
||||
shouldAdd: true,
|
||||
err: nil,
|
||||
cachedAvailable: abi.NewTokenAmount(200),
|
||||
},
|
||||
{
|
||||
addAmt: abi.NewTokenAmount(50),
|
||||
shouldAdd: true,
|
||||
err: nil,
|
||||
cachedAvailable: abi.NewTokenAmount(250),
|
||||
},
|
||||
{
|
||||
shouldAdd: false,
|
||||
err: nil,
|
||||
cachedAvailable: abi.NewTokenAmount(250),
|
||||
},
|
||||
},
|
||||
},
|
||||
"error on market balance": {
|
||||
returnedBalanceErr: errors.New("something went wrong"),
|
||||
addAmounts: []abi.TokenAmount{abi.NewTokenAmount(100)},
|
||||
expectedResults: []expectedResult{
|
||||
{
|
||||
err: errors.New("something went wrong"),
|
||||
},
|
||||
},
|
||||
},
|
||||
"error on push message": {
|
||||
returnedBalance: api.MarketBalance{Escrow: abi.NewTokenAmount(0), Locked: abi.NewTokenAmount(0)},
|
||||
pushMessageErr: errors.New("something went wrong"),
|
||||
addAmounts: []abi.TokenAmount{abi.NewTokenAmount(100)},
|
||||
expectedResults: []expectedResult{
|
||||
{
|
||||
err: errors.New("something went wrong"),
|
||||
cachedAvailable: abi.NewTokenAmount(0),
|
||||
},
|
||||
},
|
||||
},
|
||||
"error looking up address": {
|
||||
lookupIDErr: errors.New("something went wrong"),
|
||||
addAmounts: []abi.TokenAmount{abi.NewTokenAmount(100)},
|
||||
expectedResults: []expectedResult{
|
||||
{
|
||||
err: errors.New("something went wrong"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for testCase, data := range testCases {
|
||||
//nolint:scopelint
|
||||
t.Run(testCase, func(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
sig := make([]byte, 100)
|
||||
_, err := rand.Read(sig)
|
||||
require.NoError(t, err)
|
||||
fapi := &fakeAPI{
|
||||
returnedBalance: data.returnedBalance,
|
||||
returnedBalanceErr: data.returnedBalanceErr,
|
||||
signature: crypto.Signature{
|
||||
Type: crypto.SigTypeUnknown,
|
||||
Data: sig,
|
||||
},
|
||||
pushMessageErr: data.pushMessageErr,
|
||||
lookupIDErr: data.lookupIDErr,
|
||||
}
|
||||
fundMgr := newFundMgr(fapi)
|
||||
addr := tutils.NewIDAddr(t, uint64(rand.Uint32()))
|
||||
wallet := tutils.NewIDAddr(t, uint64(rand.Uint32()))
|
||||
for i, amount := range data.addAmounts {
|
||||
fapi.receivedMessage = nil
|
||||
_, err := fundMgr.EnsureAvailable(ctx, addr, wallet, amount)
|
||||
expected := data.expectedResults[i]
|
||||
if expected.err == nil {
|
||||
require.NoError(t, err)
|
||||
if expected.shouldAdd {
|
||||
expectedMessage := addFundsMsg(expected.addAmt, addr, wallet)
|
||||
require.Equal(t, expectedMessage, fapi.receivedMessage)
|
||||
} else {
|
||||
require.Nil(t, fapi.receivedMessage)
|
||||
}
|
||||
} else {
|
||||
require.EqualError(t, err, expected.err.Error())
|
||||
}
|
||||
|
||||
if !expected.cachedAvailable.Nil() {
|
||||
require.Equal(t, expected.cachedAvailable, fundMgr.available[addr])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
90
chain/market/store.go
Normal file
90
chain/market/store.go
Normal file
@ -0,0 +1,90 @@
|
||||
package market
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
cborrpc "github.com/filecoin-project/go-cbor-util"
|
||||
"github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-datastore/namespace"
|
||||
dsq "github.com/ipfs/go-datastore/query"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
)
|
||||
|
||||
const dsKeyAddr = "Addr"
|
||||
|
||||
type Store struct {
|
||||
ds datastore.Batching
|
||||
}
|
||||
|
||||
func newStore(ds dtypes.MetadataDS) *Store {
|
||||
ds = namespace.Wrap(ds, datastore.NewKey("/fundmgr/"))
|
||||
return &Store{
|
||||
ds: ds,
|
||||
}
|
||||
}
|
||||
|
||||
// save the state to the datastore
|
||||
func (ps *Store) save(state *FundedAddressState) error {
|
||||
k := dskeyForAddr(state.Addr)
|
||||
|
||||
b, err := cborrpc.Dump(state)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ps.ds.Put(k, b)
|
||||
}
|
||||
|
||||
// get the state for the given address
|
||||
func (ps *Store) get(addr address.Address) (*FundedAddressState, error) {
|
||||
k := dskeyForAddr(addr)
|
||||
|
||||
data, err := ps.ds.Get(k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var state FundedAddressState
|
||||
err = cborrpc.ReadCborRPC(bytes.NewReader(data), &state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &state, nil
|
||||
}
|
||||
|
||||
// forEach calls iter with each address in the datastore
|
||||
func (ps *Store) forEach(iter func(*FundedAddressState)) error {
|
||||
res, err := ps.ds.Query(dsq.Query{Prefix: dsKeyAddr})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Close() //nolint:errcheck
|
||||
|
||||
for {
|
||||
res, ok := res.NextSync()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
|
||||
if res.Error != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var stored FundedAddressState
|
||||
if err := stored.UnmarshalCBOR(bytes.NewReader(res.Value)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
iter(&stored)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// The datastore key used to identify the address state
|
||||
func dskeyForAddr(addr address.Address) datastore.Key {
|
||||
return datastore.KeyWithNamespaces([]string{dsKeyAddr, addr.String()})
|
||||
}
|
@ -264,7 +264,7 @@ func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool, strict, untrusted
|
||||
}
|
||||
|
||||
if strict && nonceGap {
|
||||
log.Warnf("adding nonce-gapped message from %s (nonce: %d, nextNonce: %d)",
|
||||
log.Debugf("adding nonce-gapped message from %s (nonce: %d, nextNonce: %d)",
|
||||
m.Message.From, m.Message.Nonce, nextNonce)
|
||||
}
|
||||
|
||||
@ -465,7 +465,7 @@ func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, curTs *types.T
|
||||
epoch := curTs.Height()
|
||||
minGas := vm.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength())
|
||||
|
||||
if err := m.VMMessage().ValidForBlockInclusion(minGas.Total()); err != nil {
|
||||
if err := m.VMMessage().ValidForBlockInclusion(minGas.Total(), build.NewestNetworkVersion); err != nil {
|
||||
return false, xerrors.Errorf("message will not be included in a block: %w", err)
|
||||
}
|
||||
|
||||
@ -546,7 +546,7 @@ func (mp *MessagePool) checkMessage(m *types.SignedMessage) error {
|
||||
}
|
||||
|
||||
// Perform syntactic validation, minGas=0 as we check the actual mingas before we add it
|
||||
if err := m.Message.ValidForBlockInclusion(0); err != nil {
|
||||
if err := m.Message.ValidForBlockInclusion(0, build.NewestNetworkVersion); err != nil {
|
||||
return xerrors.Errorf("message not valid for block inclusion: %w", err)
|
||||
}
|
||||
|
||||
@ -1219,7 +1219,7 @@ func (mp *MessagePool) MessagesForBlocks(blks []*types.BlockHeader) ([]*types.Si
|
||||
if smsg != nil {
|
||||
out = append(out, smsg)
|
||||
} else {
|
||||
log.Warnf("could not recover signature for bls message %s", msg.Cid())
|
||||
log.Debugf("could not recover signature for bls message %s", msg.Cid())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2,6 +2,7 @@ package messagepool
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
@ -9,9 +10,16 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
var (
|
||||
HeadChangeCoalesceMinDelay = 2 * time.Second
|
||||
HeadChangeCoalesceMaxDelay = 6 * time.Second
|
||||
HeadChangeCoalesceMergeInterval = time.Second
|
||||
)
|
||||
|
||||
type Provider interface {
|
||||
SubscribeHeadChanges(func(rev, app []*types.TipSet) error) *types.TipSet
|
||||
PutMessage(m types.ChainMsg) (cid.Cid, error)
|
||||
@ -34,7 +42,13 @@ func NewProvider(sm *stmgr.StateManager, ps *pubsub.PubSub) Provider {
|
||||
}
|
||||
|
||||
func (mpp *mpoolProvider) SubscribeHeadChanges(cb func(rev, app []*types.TipSet) error) *types.TipSet {
|
||||
mpp.sm.ChainStore().SubscribeHeadChanges(cb)
|
||||
mpp.sm.ChainStore().SubscribeHeadChanges(
|
||||
store.WrapHeadChangeCoalescer(
|
||||
cb,
|
||||
HeadChangeCoalesceMinDelay,
|
||||
HeadChangeCoalesceMaxDelay,
|
||||
HeadChangeCoalesceMergeInterval,
|
||||
))
|
||||
return mpp.sm.ChainStore().GetHeaviestTipSet()
|
||||
}
|
||||
|
||||
|
@ -21,6 +21,8 @@ import (
|
||||
|
||||
var bigBlockGasLimit = big.NewInt(build.BlockGasLimit)
|
||||
|
||||
var MaxBlockMessages = 16000
|
||||
|
||||
// this is *temporary* mutilation until we have implemented uncapped miner penalties -- it will go
|
||||
// away in the next fork.
|
||||
func allowNegativeChains(epoch abi.ChainEpoch) bool {
|
||||
@ -43,7 +45,7 @@ type msgChain struct {
|
||||
prev *msgChain
|
||||
}
|
||||
|
||||
func (mp *MessagePool) SelectMessages(ts *types.TipSet, tq float64) ([]*types.SignedMessage, error) {
|
||||
func (mp *MessagePool) SelectMessages(ts *types.TipSet, tq float64) (msgs []*types.SignedMessage, err error) {
|
||||
mp.curTsLk.Lock()
|
||||
defer mp.curTsLk.Unlock()
|
||||
|
||||
@ -54,10 +56,20 @@ func (mp *MessagePool) SelectMessages(ts *types.TipSet, tq float64) ([]*types.Si
|
||||
// than any other block, then we don't bother with optimal selection because the
|
||||
// first block will always have higher effective performance
|
||||
if tq > 0.84 {
|
||||
return mp.selectMessagesGreedy(mp.curTs, ts)
|
||||
msgs, err = mp.selectMessagesGreedy(mp.curTs, ts)
|
||||
} else {
|
||||
msgs, err = mp.selectMessagesOptimal(mp.curTs, ts, tq)
|
||||
}
|
||||
|
||||
return mp.selectMessagesOptimal(mp.curTs, ts, tq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(msgs) > MaxBlockMessages {
|
||||
msgs = msgs[:MaxBlockMessages]
|
||||
}
|
||||
|
||||
return msgs, nil
|
||||
}
|
||||
|
||||
func (mp *MessagePool) selectMessagesOptimal(curTs, ts *types.TipSet, tq float64) ([]*types.SignedMessage, error) {
|
||||
|
@ -6,6 +6,10 @@ import (
|
||||
"encoding/binary"
|
||||
"math"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/v2/actors/migration/nv7"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/v2/actors/migration/nv4"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
@ -23,7 +27,6 @@ import (
|
||||
adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/migration/nv3"
|
||||
m2 "github.com/filecoin-project/specs-actors/v2/actors/migration"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
@ -90,6 +93,14 @@ func DefaultUpgradeSchedule() UpgradeSchedule {
|
||||
Height: build.UpgradeKumquatHeight,
|
||||
Network: network.Version6,
|
||||
Migration: nil,
|
||||
}, {
|
||||
Height: build.UpgradeCalicoHeight,
|
||||
Network: network.Version7,
|
||||
Migration: UpgradeCalico,
|
||||
}, {
|
||||
Height: build.UpgradePersianHeight,
|
||||
Network: network.Version8,
|
||||
Migration: nil,
|
||||
}}
|
||||
|
||||
if build.UpgradeActorsV2Height == math.MaxInt64 { // disable actors upgrade
|
||||
@ -601,7 +612,7 @@ func UpgradeActorsV2(ctx context.Context, sm *StateManager, cb ExecCallback, roo
|
||||
return cid.Undef, xerrors.Errorf("failed to create new state info for actors v2: %w", err)
|
||||
}
|
||||
|
||||
newHamtRoot, err := m2.MigrateStateTree(ctx, store, root, epoch, m2.DefaultConfig())
|
||||
newHamtRoot, err := nv4.MigrateStateTree(ctx, store, root, epoch, nv4.DefaultConfig())
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("upgrading to actors v2: %w", err)
|
||||
}
|
||||
@ -652,6 +663,48 @@ func UpgradeLiftoff(ctx context.Context, sm *StateManager, cb ExecCallback, root
|
||||
return tree.Flush(ctx)
|
||||
}
|
||||
|
||||
func UpgradeCalico(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
store := sm.cs.Store(ctx)
|
||||
var stateRoot types.StateRoot
|
||||
if err := store.Get(ctx, root, &stateRoot); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
|
||||
}
|
||||
|
||||
if stateRoot.Version != types.StateTreeVersion1 {
|
||||
return cid.Undef, xerrors.Errorf(
|
||||
"expected state root version 1 for calico upgrade, got %d",
|
||||
stateRoot.Version,
|
||||
)
|
||||
}
|
||||
|
||||
newHamtRoot, err := nv7.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, nv7.DefaultConfig())
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("running nv7 migration: %w", err)
|
||||
}
|
||||
|
||||
newRoot, err := store.Put(ctx, &types.StateRoot{
|
||||
Version: stateRoot.Version,
|
||||
Actors: newHamtRoot,
|
||||
Info: stateRoot.Info,
|
||||
})
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
|
||||
}
|
||||
|
||||
// perform some basic sanity checks to make sure everything still works.
|
||||
if newSm, err := state.LoadStateTree(store, newRoot); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("state tree sanity load failed: %w", err)
|
||||
} else if newRoot2, err := newSm.Flush(ctx); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("state tree sanity flush failed: %w", err)
|
||||
} else if newRoot2 != newRoot {
|
||||
return cid.Undef, xerrors.Errorf("state-root mismatch: %s != %s", newRoot, newRoot2)
|
||||
} else if _, err := newSm.GetActor(builtin0.InitActorAddr); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to load init actor after upgrade: %w", err)
|
||||
}
|
||||
|
||||
return newRoot, nil
|
||||
}
|
||||
|
||||
func setNetworkName(ctx context.Context, store adt.Store, tree *state.StateTree, name string) error {
|
||||
ia, err := tree.GetActor(builtin0.InitActorAddr)
|
||||
if err != nil {
|
||||
|
@ -72,13 +72,17 @@ type StateManager struct {
|
||||
// ErrExpensiveFork.
|
||||
expensiveUpgrades map[abi.ChainEpoch]struct{}
|
||||
|
||||
stCache map[string][]cid.Cid
|
||||
compWait map[string]chan struct{}
|
||||
stlk sync.Mutex
|
||||
genesisMsigLk sync.Mutex
|
||||
newVM func(context.Context, *vm.VMOpts) (*vm.VM, error)
|
||||
preIgnitionGenInfos *genesisInfo
|
||||
postIgnitionGenInfos *genesisInfo
|
||||
stCache map[string][]cid.Cid
|
||||
compWait map[string]chan struct{}
|
||||
stlk sync.Mutex
|
||||
genesisMsigLk sync.Mutex
|
||||
newVM func(context.Context, *vm.VMOpts) (*vm.VM, error)
|
||||
preIgnitionVesting []msig0.State
|
||||
postIgnitionVesting []msig0.State
|
||||
postCalicoVesting []msig0.State
|
||||
|
||||
genesisPledge abi.TokenAmount
|
||||
genesisMarketFunds abi.TokenAmount
|
||||
}
|
||||
|
||||
func NewStateManager(cs *store.ChainStore) *StateManager {
|
||||
@ -889,23 +893,8 @@ func (sm *StateManager) SetVMConstructor(nvm func(context.Context, *vm.VMOpts) (
|
||||
sm.newVM = nvm
|
||||
}
|
||||
|
||||
type genesisInfo struct {
|
||||
genesisMsigs []msig0.State
|
||||
// info about the Accounts in the genesis state
|
||||
genesisActors []genesisActor
|
||||
genesisPledge abi.TokenAmount
|
||||
genesisMarketFunds abi.TokenAmount
|
||||
}
|
||||
|
||||
type genesisActor struct {
|
||||
addr address.Address
|
||||
initBal abi.TokenAmount
|
||||
}
|
||||
|
||||
// sets up information about the actors in the genesis state
|
||||
func (sm *StateManager) setupGenesisActors(ctx context.Context) error {
|
||||
|
||||
gi := genesisInfo{}
|
||||
// sets up information about the vesting schedule
|
||||
func (sm *StateManager) setupGenesisVestingSchedule(ctx context.Context) error {
|
||||
|
||||
gb, err := sm.cs.GetGenesis()
|
||||
if err != nil {
|
||||
@ -928,127 +917,18 @@ func (sm *StateManager) setupGenesisActors(ctx context.Context) error {
|
||||
return xerrors.Errorf("loading state tree: %w", err)
|
||||
}
|
||||
|
||||
gi.genesisMarketFunds, err = getFilMarketLocked(ctx, sTree)
|
||||
gmf, err := getFilMarketLocked(ctx, sTree)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("setting up genesis market funds: %w", err)
|
||||
}
|
||||
|
||||
gi.genesisPledge, err = getFilPowerLocked(ctx, sTree)
|
||||
gp, err := getFilPowerLocked(ctx, sTree)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("setting up genesis pledge: %w", err)
|
||||
}
|
||||
|
||||
totalsByEpoch := make(map[abi.ChainEpoch]abi.TokenAmount)
|
||||
err = sTree.ForEach(func(kaddr address.Address, act *types.Actor) error {
|
||||
if builtin.IsMultisigActor(act.Code) {
|
||||
s, err := multisig.Load(sm.cs.Store(ctx), act)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
se, err := s.StartEpoch()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if se != 0 {
|
||||
return xerrors.New("genesis multisig doesn't start vesting at epoch 0!")
|
||||
}
|
||||
|
||||
ud, err := s.UnlockDuration()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ib, err := s.InitialBalance()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ot, f := totalsByEpoch[ud]
|
||||
if f {
|
||||
totalsByEpoch[ud] = big.Add(ot, ib)
|
||||
} else {
|
||||
totalsByEpoch[ud] = ib
|
||||
}
|
||||
|
||||
} else if builtin.IsAccountActor(act.Code) {
|
||||
// should exclude burnt funds actor and "remainder account actor"
|
||||
// should only ever be "faucet" accounts in testnets
|
||||
if kaddr == builtin.BurntFundsActorAddr {
|
||||
return nil
|
||||
}
|
||||
|
||||
kid, err := sTree.LookupID(kaddr)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("resolving address: %w", err)
|
||||
}
|
||||
|
||||
gi.genesisActors = append(gi.genesisActors, genesisActor{
|
||||
addr: kid,
|
||||
initBal: act.Balance,
|
||||
})
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error setting up genesis infos: %w", err)
|
||||
}
|
||||
|
||||
// TODO: use network upgrade abstractions or always start at actors v0?
|
||||
gi.genesisMsigs = make([]msig0.State, 0, len(totalsByEpoch))
|
||||
for k, v := range totalsByEpoch {
|
||||
ns := msig0.State{
|
||||
InitialBalance: v,
|
||||
UnlockDuration: k,
|
||||
PendingTxns: cid.Undef,
|
||||
}
|
||||
gi.genesisMsigs = append(gi.genesisMsigs, ns)
|
||||
}
|
||||
|
||||
sm.preIgnitionGenInfos = &gi
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// sets up information about the actors in the genesis state
|
||||
// For testnet we use a hardcoded set of multisig states, instead of what's actually in the genesis multisigs
|
||||
// We also do not consider ANY account actors (including the faucet)
|
||||
func (sm *StateManager) setupPreIgnitionGenesisActorsTestnet(ctx context.Context) error {
|
||||
|
||||
gi := genesisInfo{}
|
||||
|
||||
gb, err := sm.cs.GetGenesis()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting genesis block: %w", err)
|
||||
}
|
||||
|
||||
gts, err := types.NewTipSet([]*types.BlockHeader{gb})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting genesis tipset: %w", err)
|
||||
}
|
||||
|
||||
st, _, err := sm.TipSetState(ctx, gts)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting genesis tipset state: %w", err)
|
||||
}
|
||||
|
||||
cst := cbor.NewCborStore(sm.cs.Blockstore())
|
||||
sTree, err := state.LoadStateTree(cst, st)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("loading state tree: %w", err)
|
||||
}
|
||||
|
||||
gi.genesisMarketFunds, err = getFilMarketLocked(ctx, sTree)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("setting up genesis market funds: %w", err)
|
||||
}
|
||||
|
||||
gi.genesisPledge, err = getFilPowerLocked(ctx, sTree)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("setting up genesis pledge: %w", err)
|
||||
}
|
||||
sm.genesisMarketFunds = gmf
|
||||
sm.genesisPledge = gp
|
||||
|
||||
totalsByEpoch := make(map[abi.ChainEpoch]abi.TokenAmount)
|
||||
|
||||
@ -1074,58 +954,21 @@ func (sm *StateManager) setupPreIgnitionGenesisActorsTestnet(ctx context.Context
|
||||
totalsByEpoch[sixYears] = big.NewInt(100_000_000)
|
||||
totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(300_000_000))
|
||||
|
||||
gi.genesisMsigs = make([]msig0.State, 0, len(totalsByEpoch))
|
||||
sm.preIgnitionVesting = make([]msig0.State, 0, len(totalsByEpoch))
|
||||
for k, v := range totalsByEpoch {
|
||||
ns := msig0.State{
|
||||
InitialBalance: v,
|
||||
UnlockDuration: k,
|
||||
PendingTxns: cid.Undef,
|
||||
}
|
||||
gi.genesisMsigs = append(gi.genesisMsigs, ns)
|
||||
sm.preIgnitionVesting = append(sm.preIgnitionVesting, ns)
|
||||
}
|
||||
|
||||
sm.preIgnitionGenInfos = &gi
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// sets up information about the actors in the genesis state, post the ignition fork
|
||||
func (sm *StateManager) setupPostIgnitionGenesisActors(ctx context.Context) error {
|
||||
|
||||
gi := genesisInfo{}
|
||||
|
||||
gb, err := sm.cs.GetGenesis()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting genesis block: %w", err)
|
||||
}
|
||||
|
||||
gts, err := types.NewTipSet([]*types.BlockHeader{gb})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting genesis tipset: %w", err)
|
||||
}
|
||||
|
||||
st, _, err := sm.TipSetState(ctx, gts)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting genesis tipset state: %w", err)
|
||||
}
|
||||
|
||||
cst := cbor.NewCborStore(sm.cs.Blockstore())
|
||||
sTree, err := state.LoadStateTree(cst, st)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("loading state tree: %w", err)
|
||||
}
|
||||
|
||||
// Unnecessary, should be removed
|
||||
gi.genesisMarketFunds, err = getFilMarketLocked(ctx, sTree)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("setting up genesis market funds: %w", err)
|
||||
}
|
||||
|
||||
// Unnecessary, should be removed
|
||||
gi.genesisPledge, err = getFilPowerLocked(ctx, sTree)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("setting up genesis pledge: %w", err)
|
||||
}
|
||||
// sets up information about the vesting schedule post the ignition upgrade
|
||||
func (sm *StateManager) setupPostIgnitionVesting(ctx context.Context) error {
|
||||
|
||||
totalsByEpoch := make(map[abi.ChainEpoch]abi.TokenAmount)
|
||||
|
||||
@ -1151,7 +994,7 @@ func (sm *StateManager) setupPostIgnitionGenesisActors(ctx context.Context) erro
|
||||
totalsByEpoch[sixYears] = big.NewInt(100_000_000)
|
||||
totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(300_000_000))
|
||||
|
||||
gi.genesisMsigs = make([]msig0.State, 0, len(totalsByEpoch))
|
||||
sm.postIgnitionVesting = make([]msig0.State, 0, len(totalsByEpoch))
|
||||
for k, v := range totalsByEpoch {
|
||||
ns := msig0.State{
|
||||
// In the pre-ignition logic, we incorrectly set this value in Fil, not attoFil, an off-by-10^18 error
|
||||
@ -1161,10 +1004,56 @@ func (sm *StateManager) setupPostIgnitionGenesisActors(ctx context.Context) erro
|
||||
// In the pre-ignition logic, the start epoch was 0. This changes in the fork logic of the Ignition upgrade itself.
|
||||
StartEpoch: build.UpgradeLiftoffHeight,
|
||||
}
|
||||
gi.genesisMsigs = append(gi.genesisMsigs, ns)
|
||||
sm.postIgnitionVesting = append(sm.postIgnitionVesting, ns)
|
||||
}
|
||||
|
||||
sm.postIgnitionGenInfos = &gi
|
||||
return nil
|
||||
}
|
||||
|
||||
// sets up information about the vesting schedule post the calico upgrade
|
||||
func (sm *StateManager) setupPostCalicoVesting(ctx context.Context) error {
|
||||
|
||||
totalsByEpoch := make(map[abi.ChainEpoch]abi.TokenAmount)
|
||||
|
||||
// 0 days
|
||||
zeroDays := abi.ChainEpoch(0)
|
||||
totalsByEpoch[zeroDays] = big.NewInt(10_632_000)
|
||||
|
||||
// 6 months
|
||||
sixMonths := abi.ChainEpoch(183 * builtin.EpochsInDay)
|
||||
totalsByEpoch[sixMonths] = big.NewInt(19_015_887)
|
||||
totalsByEpoch[sixMonths] = big.Add(totalsByEpoch[sixMonths], big.NewInt(32_787_700))
|
||||
|
||||
// 1 year
|
||||
oneYear := abi.ChainEpoch(365 * builtin.EpochsInDay)
|
||||
totalsByEpoch[oneYear] = big.NewInt(22_421_712)
|
||||
totalsByEpoch[oneYear] = big.Add(totalsByEpoch[oneYear], big.NewInt(9_400_000))
|
||||
|
||||
// 2 years
|
||||
twoYears := abi.ChainEpoch(2 * 365 * builtin.EpochsInDay)
|
||||
totalsByEpoch[twoYears] = big.NewInt(7_223_364)
|
||||
|
||||
// 3 years
|
||||
threeYears := abi.ChainEpoch(3 * 365 * builtin.EpochsInDay)
|
||||
totalsByEpoch[threeYears] = big.NewInt(87_637_883)
|
||||
totalsByEpoch[threeYears] = big.Add(totalsByEpoch[threeYears], big.NewInt(898_958))
|
||||
|
||||
// 6 years
|
||||
sixYears := abi.ChainEpoch(6 * 365 * builtin.EpochsInDay)
|
||||
totalsByEpoch[sixYears] = big.NewInt(100_000_000)
|
||||
totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(300_000_000))
|
||||
totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(9_805_053))
|
||||
|
||||
sm.postCalicoVesting = make([]msig0.State, 0, len(totalsByEpoch))
|
||||
for k, v := range totalsByEpoch {
|
||||
ns := msig0.State{
|
||||
InitialBalance: big.Mul(v, big.NewInt(int64(build.FilecoinPrecision))),
|
||||
UnlockDuration: k,
|
||||
PendingTxns: cid.Undef,
|
||||
StartEpoch: build.UpgradeLiftoffHeight,
|
||||
}
|
||||
sm.postCalicoVesting = append(sm.postCalicoVesting, ns)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -1175,12 +1064,19 @@ func (sm *StateManager) setupPostIgnitionGenesisActors(ctx context.Context) erro
|
||||
func (sm *StateManager) GetFilVested(ctx context.Context, height abi.ChainEpoch, st *state.StateTree) (abi.TokenAmount, error) {
|
||||
vf := big.Zero()
|
||||
if height <= build.UpgradeIgnitionHeight {
|
||||
for _, v := range sm.preIgnitionGenInfos.genesisMsigs {
|
||||
for _, v := range sm.preIgnitionVesting {
|
||||
au := big.Sub(v.InitialBalance, v.AmountLocked(height))
|
||||
vf = big.Add(vf, au)
|
||||
}
|
||||
} else if height <= build.UpgradeCalicoHeight {
|
||||
for _, v := range sm.postIgnitionVesting {
|
||||
// In the pre-ignition logic, we simply called AmountLocked(height), assuming startEpoch was 0.
|
||||
// The start epoch changed in the Ignition upgrade.
|
||||
au := big.Sub(v.InitialBalance, v.AmountLocked(height-v.StartEpoch))
|
||||
vf = big.Add(vf, au)
|
||||
}
|
||||
} else {
|
||||
for _, v := range sm.postIgnitionGenInfos.genesisMsigs {
|
||||
for _, v := range sm.postCalicoVesting {
|
||||
// In the pre-ignition logic, we simply called AmountLocked(height), assuming startEpoch was 0.
|
||||
// The start epoch changed in the Ignition upgrade.
|
||||
au := big.Sub(v.InitialBalance, v.AmountLocked(height-v.StartEpoch))
|
||||
@ -1188,26 +1084,12 @@ func (sm *StateManager) GetFilVested(ctx context.Context, height abi.ChainEpoch,
|
||||
}
|
||||
}
|
||||
|
||||
// there should not be any such accounts in testnet (and also none in mainnet?)
|
||||
// continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch
|
||||
for _, v := range sm.preIgnitionGenInfos.genesisActors {
|
||||
act, err := st.GetActor(v.addr)
|
||||
if err != nil {
|
||||
return big.Zero(), xerrors.Errorf("failed to get actor: %w", err)
|
||||
}
|
||||
|
||||
diff := big.Sub(v.initBal, act.Balance)
|
||||
if diff.GreaterThan(big.Zero()) {
|
||||
vf = big.Add(vf, diff)
|
||||
}
|
||||
}
|
||||
|
||||
// After UpgradeActorsV2Height these funds are accounted for in GetFilReserveDisbursed
|
||||
if height <= build.UpgradeActorsV2Height {
|
||||
// continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch
|
||||
vf = big.Add(vf, sm.preIgnitionGenInfos.genesisPledge)
|
||||
vf = big.Add(vf, sm.genesisPledge)
|
||||
// continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch
|
||||
vf = big.Add(vf, sm.preIgnitionGenInfos.genesisMarketFunds)
|
||||
vf = big.Add(vf, sm.genesisMarketFunds)
|
||||
}
|
||||
|
||||
return vf, nil
|
||||
@ -1301,16 +1183,22 @@ func (sm *StateManager) GetVMCirculatingSupply(ctx context.Context, height abi.C
|
||||
func (sm *StateManager) GetVMCirculatingSupplyDetailed(ctx context.Context, height abi.ChainEpoch, st *state.StateTree) (api.CirculatingSupply, error) {
|
||||
sm.genesisMsigLk.Lock()
|
||||
defer sm.genesisMsigLk.Unlock()
|
||||
if sm.preIgnitionGenInfos == nil {
|
||||
err := sm.setupPreIgnitionGenesisActorsTestnet(ctx)
|
||||
if sm.preIgnitionVesting == nil || sm.genesisPledge.IsZero() || sm.genesisMarketFunds.IsZero() {
|
||||
err := sm.setupGenesisVestingSchedule(ctx)
|
||||
if err != nil {
|
||||
return api.CirculatingSupply{}, xerrors.Errorf("failed to setup pre-ignition genesis information: %w", err)
|
||||
return api.CirculatingSupply{}, xerrors.Errorf("failed to setup pre-ignition vesting schedule: %w", err)
|
||||
}
|
||||
}
|
||||
if sm.postIgnitionGenInfos == nil {
|
||||
err := sm.setupPostIgnitionGenesisActors(ctx)
|
||||
if sm.postIgnitionVesting == nil {
|
||||
err := sm.setupPostIgnitionVesting(ctx)
|
||||
if err != nil {
|
||||
return api.CirculatingSupply{}, xerrors.Errorf("failed to setup post-ignition genesis information: %w", err)
|
||||
return api.CirculatingSupply{}, xerrors.Errorf("failed to setup post-ignition vesting schedule: %w", err)
|
||||
}
|
||||
}
|
||||
if sm.postCalicoVesting == nil {
|
||||
err := sm.setupPostCalicoVesting(ctx)
|
||||
if err != nil {
|
||||
return api.CirculatingSupply{}, xerrors.Errorf("failed to setup post-calico vesting schedule: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -158,7 +158,7 @@ func GetMinerSectorSet(ctx context.Context, sm *StateManager, ts *types.TipSet,
|
||||
return mas.LoadSectors(snos)
|
||||
}
|
||||
|
||||
func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *StateManager, st cid.Cid, maddr address.Address, rand abi.PoStRandomness) ([]builtin.SectorInfo, error) {
|
||||
func GetSectorsForWinningPoSt(ctx context.Context, nv network.Version, pv ffiwrapper.Verifier, sm *StateManager, st cid.Cid, maddr address.Address, rand abi.PoStRandomness) ([]builtin.SectorInfo, error) {
|
||||
act, err := sm.LoadActorRaw(ctx, maddr, st)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to load miner actor: %w", err)
|
||||
@ -169,21 +169,27 @@ func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *S
|
||||
return nil, xerrors.Errorf("failed to load miner actor state: %w", err)
|
||||
}
|
||||
|
||||
// TODO (!!): Actor Update: Make this active sectors
|
||||
var provingSectors bitfield.BitField
|
||||
if nv < network.Version7 {
|
||||
allSectors, err := miner.AllPartSectors(mas, miner.Partition.AllSectors)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get all sectors: %w", err)
|
||||
}
|
||||
|
||||
allSectors, err := miner.AllPartSectors(mas, miner.Partition.AllSectors)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get all sectors: %w", err)
|
||||
}
|
||||
faultySectors, err := miner.AllPartSectors(mas, miner.Partition.FaultySectors)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get faulty sectors: %w", err)
|
||||
}
|
||||
|
||||
faultySectors, err := miner.AllPartSectors(mas, miner.Partition.FaultySectors)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get faulty sectors: %w", err)
|
||||
}
|
||||
|
||||
provingSectors, err := bitfield.SubtractBitField(allSectors, faultySectors) // TODO: This is wrong, as it can contain faaults, change to just ActiveSectors in an upgrade
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("calc proving sectors: %w", err)
|
||||
provingSectors, err = bitfield.SubtractBitField(allSectors, faultySectors)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("calc proving sectors: %w", err)
|
||||
}
|
||||
} else {
|
||||
provingSectors, err = miner.AllPartSectors(mas, miner.Partition.ActiveSectors)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get active sectors sectors: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
numProvSect, err := provingSectors.Count()
|
||||
@ -201,12 +207,7 @@ func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *S
|
||||
return nil, xerrors.Errorf("getting miner info: %w", err)
|
||||
}
|
||||
|
||||
spt, err := ffiwrapper.SealProofTypeFromSectorSize(info.SectorSize)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting seal proof type: %w", err)
|
||||
}
|
||||
|
||||
wpt, err := spt.RegisteredWinningPoStProof()
|
||||
wpt, err := info.SealProofType.RegisteredWinningPoStProof()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting window proof type: %w", err)
|
||||
}
|
||||
@ -246,7 +247,7 @@ func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *S
|
||||
out := make([]builtin.SectorInfo, len(sectors))
|
||||
for i, sinfo := range sectors {
|
||||
out[i] = builtin.SectorInfo{
|
||||
SealProof: spt,
|
||||
SealProof: sinfo.SealProof,
|
||||
SectorNumber: sinfo.SectorNumber,
|
||||
SealedCID: sinfo.SealedCID,
|
||||
}
|
||||
@ -497,7 +498,9 @@ func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcs beacon.Schedule
|
||||
return nil, xerrors.Errorf("failed to get randomness for winning post: %w", err)
|
||||
}
|
||||
|
||||
sectors, err := GetSectorsForWinningPoSt(ctx, pv, sm, lbst, maddr, prand)
|
||||
nv := sm.GetNtwkVersion(ctx, ts.Height())
|
||||
|
||||
sectors, err := GetSectorsForWinningPoSt(ctx, nv, pv, sm, lbst, maddr, prand)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting winning post proving set: %w", err)
|
||||
}
|
||||
|
214
chain/store/coalescer.go
Normal file
214
chain/store/coalescer.go
Normal file
@ -0,0 +1,214 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
// WrapHeadChangeCoalescer wraps a ReorgNotifee with a head change coalescer.
|
||||
// minDelay is the minimum coalesce delay; when a head change is first received, the coalescer will
|
||||
// wait for that long to coalesce more head changes.
|
||||
// maxDelay is the maximum coalesce delay; the coalescer will not delay delivery of a head change
|
||||
// more than that.
|
||||
// mergeInterval is the interval that triggers additional coalesce delay; if the last head change was
|
||||
// within the merge interval when the coalesce timer fires, then the coalesce time is extended
|
||||
// by min delay and up to max delay total.
|
||||
func WrapHeadChangeCoalescer(fn ReorgNotifee, minDelay, maxDelay, mergeInterval time.Duration) ReorgNotifee {
|
||||
c := NewHeadChangeCoalescer(fn, minDelay, maxDelay, mergeInterval)
|
||||
return c.HeadChange
|
||||
}
|
||||
|
||||
// HeadChangeCoalescer is a stateful reorg notifee which coalesces incoming head changes
|
||||
// with pending head changes to reduce state computations from head change notifications.
|
||||
type HeadChangeCoalescer struct {
|
||||
notify ReorgNotifee
|
||||
|
||||
ctx context.Context
|
||||
cancel func()
|
||||
|
||||
eventq chan headChange
|
||||
|
||||
revert []*types.TipSet
|
||||
apply []*types.TipSet
|
||||
}
|
||||
|
||||
type headChange struct {
|
||||
revert, apply []*types.TipSet
|
||||
}
|
||||
|
||||
// NewHeadChangeCoalescer creates a HeadChangeCoalescer.
|
||||
func NewHeadChangeCoalescer(fn ReorgNotifee, minDelay, maxDelay, mergeInterval time.Duration) *HeadChangeCoalescer {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
c := &HeadChangeCoalescer{
|
||||
notify: fn,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
eventq: make(chan headChange),
|
||||
}
|
||||
|
||||
go c.background(minDelay, maxDelay, mergeInterval)
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// HeadChange is the ReorgNotifee callback for the stateful coalescer; it receives an incoming
|
||||
// head change and schedules dispatch of a coalesced head change in the background.
|
||||
func (c *HeadChangeCoalescer) HeadChange(revert, apply []*types.TipSet) error {
|
||||
select {
|
||||
case c.eventq <- headChange{revert: revert, apply: apply}:
|
||||
return nil
|
||||
case <-c.ctx.Done():
|
||||
return c.ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the coalescer and cancels the background dispatch goroutine.
|
||||
// Any further notification will result in an error.
|
||||
func (c *HeadChangeCoalescer) Close() error {
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
default:
|
||||
c.cancel()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Implementation details
|
||||
|
||||
func (c *HeadChangeCoalescer) background(minDelay, maxDelay, mergeInterval time.Duration) {
|
||||
var timerC <-chan time.Time
|
||||
var first, last time.Time
|
||||
|
||||
for {
|
||||
select {
|
||||
case evt := <-c.eventq:
|
||||
c.coalesce(evt.revert, evt.apply)
|
||||
|
||||
now := time.Now()
|
||||
last = now
|
||||
if first.IsZero() {
|
||||
first = now
|
||||
}
|
||||
|
||||
if timerC == nil {
|
||||
timerC = time.After(minDelay)
|
||||
}
|
||||
|
||||
case now := <-timerC:
|
||||
sinceFirst := now.Sub(first)
|
||||
sinceLast := now.Sub(last)
|
||||
|
||||
if sinceLast < mergeInterval && sinceFirst < maxDelay {
|
||||
// coalesce some more
|
||||
maxWait := maxDelay - sinceFirst
|
||||
wait := minDelay
|
||||
if maxWait < wait {
|
||||
wait = maxWait
|
||||
}
|
||||
|
||||
timerC = time.After(wait)
|
||||
} else {
|
||||
// dispatch
|
||||
c.dispatch()
|
||||
|
||||
first = time.Time{}
|
||||
last = time.Time{}
|
||||
timerC = nil
|
||||
}
|
||||
|
||||
case <-c.ctx.Done():
|
||||
if c.revert != nil || c.apply != nil {
|
||||
c.dispatch()
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *HeadChangeCoalescer) coalesce(revert, apply []*types.TipSet) {
|
||||
// newly reverted tipsets cancel out with pending applys.
|
||||
// similarly, newly applied tipsets cancel out with pending reverts.
|
||||
|
||||
// pending tipsets
|
||||
pendRevert := make(map[types.TipSetKey]struct{}, len(c.revert))
|
||||
for _, ts := range c.revert {
|
||||
pendRevert[ts.Key()] = struct{}{}
|
||||
}
|
||||
|
||||
pendApply := make(map[types.TipSetKey]struct{}, len(c.apply))
|
||||
for _, ts := range c.apply {
|
||||
pendApply[ts.Key()] = struct{}{}
|
||||
}
|
||||
|
||||
// incoming tipsets
|
||||
reverting := make(map[types.TipSetKey]struct{}, len(revert))
|
||||
for _, ts := range revert {
|
||||
reverting[ts.Key()] = struct{}{}
|
||||
}
|
||||
|
||||
applying := make(map[types.TipSetKey]struct{}, len(apply))
|
||||
for _, ts := range apply {
|
||||
applying[ts.Key()] = struct{}{}
|
||||
}
|
||||
|
||||
// coalesced revert set
|
||||
// - pending reverts are cancelled by incoming applys
|
||||
// - incoming reverts are cancelled by pending applys
|
||||
newRevert := make([]*types.TipSet, 0, len(c.revert)+len(revert))
|
||||
for _, ts := range c.revert {
|
||||
_, cancel := applying[ts.Key()]
|
||||
if cancel {
|
||||
continue
|
||||
}
|
||||
|
||||
newRevert = append(newRevert, ts)
|
||||
}
|
||||
|
||||
for _, ts := range revert {
|
||||
_, cancel := pendApply[ts.Key()]
|
||||
if cancel {
|
||||
continue
|
||||
}
|
||||
|
||||
newRevert = append(newRevert, ts)
|
||||
}
|
||||
|
||||
// coalesced apply set
|
||||
// - pending applys are cancelled by incoming reverts
|
||||
// - incoming applys are cancelled by pending reverts
|
||||
newApply := make([]*types.TipSet, 0, len(c.apply)+len(apply))
|
||||
for _, ts := range c.apply {
|
||||
_, cancel := reverting[ts.Key()]
|
||||
if cancel {
|
||||
continue
|
||||
}
|
||||
|
||||
newApply = append(newApply, ts)
|
||||
}
|
||||
|
||||
for _, ts := range apply {
|
||||
_, cancel := pendRevert[ts.Key()]
|
||||
if cancel {
|
||||
continue
|
||||
}
|
||||
|
||||
newApply = append(newApply, ts)
|
||||
}
|
||||
|
||||
// commit the coalesced sets
|
||||
c.revert = newRevert
|
||||
c.apply = newApply
|
||||
}
|
||||
|
||||
func (c *HeadChangeCoalescer) dispatch() {
|
||||
err := c.notify(c.revert, c.apply)
|
||||
if err != nil {
|
||||
log.Errorf("error dispatching coalesced head change notification: %s", err)
|
||||
}
|
||||
|
||||
c.revert = nil
|
||||
c.apply = nil
|
||||
}
|
72
chain/store/coalescer_test.go
Normal file
72
chain/store/coalescer_test.go
Normal file
@ -0,0 +1,72 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/types/mock"
|
||||
)
|
||||
|
||||
func TestHeadChangeCoalescer(t *testing.T) {
|
||||
notif := make(chan headChange, 1)
|
||||
c := NewHeadChangeCoalescer(func(revert, apply []*types.TipSet) error {
|
||||
notif <- headChange{apply: apply, revert: revert}
|
||||
return nil
|
||||
},
|
||||
100*time.Millisecond,
|
||||
200*time.Millisecond,
|
||||
10*time.Millisecond,
|
||||
)
|
||||
defer c.Close() //nolint
|
||||
|
||||
b0 := mock.MkBlock(nil, 0, 0)
|
||||
root := mock.TipSet(b0)
|
||||
bA := mock.MkBlock(root, 1, 1)
|
||||
tA := mock.TipSet(bA)
|
||||
bB := mock.MkBlock(root, 1, 2)
|
||||
tB := mock.TipSet(bB)
|
||||
tAB := mock.TipSet(bA, bB)
|
||||
bC := mock.MkBlock(root, 1, 3)
|
||||
tABC := mock.TipSet(bA, bB, bC)
|
||||
bD := mock.MkBlock(root, 1, 4)
|
||||
tABCD := mock.TipSet(bA, bB, bC, bD)
|
||||
bE := mock.MkBlock(root, 1, 5)
|
||||
tABCDE := mock.TipSet(bA, bB, bC, bD, bE)
|
||||
|
||||
c.HeadChange(nil, []*types.TipSet{tA}) //nolint
|
||||
c.HeadChange(nil, []*types.TipSet{tB}) //nolint
|
||||
c.HeadChange([]*types.TipSet{tA, tB}, []*types.TipSet{tAB}) //nolint
|
||||
c.HeadChange([]*types.TipSet{tAB}, []*types.TipSet{tABC}) //nolint
|
||||
|
||||
change := <-notif
|
||||
|
||||
if len(change.revert) != 0 {
|
||||
t.Fatalf("expected empty revert set but got %d elements", len(change.revert))
|
||||
}
|
||||
if len(change.apply) != 1 {
|
||||
t.Fatalf("expected single element apply set but got %d elements", len(change.apply))
|
||||
}
|
||||
if change.apply[0] != tABC {
|
||||
t.Fatalf("expected to apply tABC")
|
||||
}
|
||||
|
||||
c.HeadChange([]*types.TipSet{tABC}, []*types.TipSet{tABCD}) //nolint
|
||||
c.HeadChange([]*types.TipSet{tABCD}, []*types.TipSet{tABCDE}) //nolint
|
||||
|
||||
change = <-notif
|
||||
|
||||
if len(change.revert) != 1 {
|
||||
t.Fatalf("expected single element revert set but got %d elements", len(change.revert))
|
||||
}
|
||||
if change.revert[0] != tABC {
|
||||
t.Fatalf("expected to revert tABC")
|
||||
}
|
||||
if len(change.apply) != 1 {
|
||||
t.Fatalf("expected single element apply set but got %d elements", len(change.apply))
|
||||
}
|
||||
if change.apply[0] != tABCDE {
|
||||
t.Fatalf("expected to revert tABC")
|
||||
}
|
||||
|
||||
}
|
@ -31,7 +31,8 @@ func TestIndexSeeks(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
|
||||
nbs := blockstore.NewTemporarySync()
|
||||
cs := store.NewChainStore(nbs, syncds.MutexWrap(datastore.NewMapDatastore()), nil, nil)
|
||||
cs := store.NewChainStore(nbs, nbs, syncds.MutexWrap(datastore.NewMapDatastore()), nil, nil)
|
||||
defer cs.Close() //nolint:errcheck
|
||||
|
||||
_, err = cs.Import(bytes.NewReader(gencar))
|
||||
if err != nil {
|
||||
|
@ -5,6 +5,7 @@ import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
@ -44,10 +45,10 @@ import (
|
||||
"github.com/ipfs/go-datastore/query"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
car "github.com/ipld/go-car"
|
||||
"github.com/ipld/go-car"
|
||||
carutil "github.com/ipld/go-car/util"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
pubsub "github.com/whyrusleeping/pubsub"
|
||||
"github.com/whyrusleeping/pubsub"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
@ -59,6 +60,8 @@ var blockValidationCacheKeyPrefix = dstore.NewKey("blockValidation")
|
||||
var DefaultTipSetCacheSize = 8192
|
||||
var DefaultMsgMetaCacheSize = 2048
|
||||
|
||||
var ErrNotifeeDone = errors.New("notifee is done and should be removed")
|
||||
|
||||
func init() {
|
||||
if s := os.Getenv("LOTUS_CHAIN_TIPSET_CACHE"); s != "" {
|
||||
tscs, err := strconv.Atoi(s)
|
||||
@ -104,8 +107,11 @@ type HeadChangeEvt struct {
|
||||
// 1. a tipset cache
|
||||
// 2. a block => messages references cache.
|
||||
type ChainStore struct {
|
||||
bs bstore.Blockstore
|
||||
ds dstore.Batching
|
||||
bs bstore.Blockstore
|
||||
localbs bstore.Blockstore
|
||||
ds dstore.Batching
|
||||
|
||||
localviewer bstore.Viewer
|
||||
|
||||
heaviestLk sync.Mutex
|
||||
heaviest *types.TipSet
|
||||
@ -128,25 +134,37 @@ type ChainStore struct {
|
||||
|
||||
evtTypes [1]journal.EventType
|
||||
journal journal.Journal
|
||||
|
||||
cancelFn context.CancelFunc
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
func NewChainStore(bs bstore.Blockstore, ds dstore.Batching, vmcalls vm.SyscallBuilder, j journal.Journal) *ChainStore {
|
||||
c, _ := lru.NewARC(DefaultMsgMetaCacheSize)
|
||||
tsc, _ := lru.NewARC(DefaultTipSetCacheSize)
|
||||
// localbs is guaranteed to fail Get* if requested block isn't stored locally
|
||||
func NewChainStore(bs bstore.Blockstore, localbs bstore.Blockstore, ds dstore.Batching, vmcalls vm.SyscallBuilder, j journal.Journal) *ChainStore {
|
||||
mmCache, _ := lru.NewARC(DefaultMsgMetaCacheSize)
|
||||
tsCache, _ := lru.NewARC(DefaultTipSetCacheSize)
|
||||
if j == nil {
|
||||
j = journal.NilJournal()
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cs := &ChainStore{
|
||||
bs: bs,
|
||||
localbs: localbs,
|
||||
ds: ds,
|
||||
bestTips: pubsub.New(64),
|
||||
tipsets: make(map[abi.ChainEpoch][]cid.Cid),
|
||||
mmCache: c,
|
||||
tsCache: tsc,
|
||||
mmCache: mmCache,
|
||||
tsCache: tsCache,
|
||||
vmcalls: vmcalls,
|
||||
cancelFn: cancel,
|
||||
journal: j,
|
||||
}
|
||||
|
||||
if v, ok := localbs.(bstore.Viewer); ok {
|
||||
cs.localviewer = v
|
||||
}
|
||||
|
||||
cs.evtTypes = [1]journal.EventType{
|
||||
evtTypeHeadChange: j.RegisterEventType("sync", "head_change"),
|
||||
}
|
||||
@ -179,19 +197,24 @@ func NewChainStore(bs bstore.Blockstore, ds dstore.Batching, vmcalls vm.SyscallB
|
||||
}
|
||||
|
||||
hcmetric := func(rev, app []*types.TipSet) error {
|
||||
ctx := context.Background()
|
||||
for _, r := range app {
|
||||
stats.Record(ctx, metrics.ChainNodeHeight.M(int64(r.Height())))
|
||||
stats.Record(context.Background(), metrics.ChainNodeHeight.M(int64(r.Height())))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
cs.reorgNotifeeCh = make(chan ReorgNotifee)
|
||||
cs.reorgCh = cs.reorgWorker(context.TODO(), []ReorgNotifee{hcnf, hcmetric})
|
||||
cs.reorgCh = cs.reorgWorker(ctx, []ReorgNotifee{hcnf, hcmetric})
|
||||
|
||||
return cs
|
||||
}
|
||||
|
||||
func (cs *ChainStore) Close() error {
|
||||
cs.cancelFn()
|
||||
cs.wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cs *ChainStore) Load() error {
|
||||
head, err := cs.ds.Get(chainHeadKey)
|
||||
if err == dstore.ErrNotFound {
|
||||
@ -259,7 +282,7 @@ func (cs *ChainStore) SubHeadChanges(ctx context.Context) chan []*api.HeadChange
|
||||
log.Warn("chain head sub exit loop")
|
||||
return
|
||||
}
|
||||
if len(out) > 0 {
|
||||
if len(out) > 5 {
|
||||
log.Warnf("head change sub is slow, has %d buffered entries", len(out))
|
||||
}
|
||||
select {
|
||||
@ -358,10 +381,32 @@ func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipS
|
||||
// difference between 'bootstrap sync' and 'caught up' sync, we need
|
||||
// some other heuristic.
|
||||
return cs.takeHeaviestTipSet(ctx, ts)
|
||||
} else if w.Equals(heaviestW) && !ts.Equals(cs.heaviest) {
|
||||
log.Errorw("weight draw", "currTs", cs.heaviest, "ts", ts)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ForceHeadSilent forces a chain head tipset without triggering a reorg
|
||||
// operation.
|
||||
//
|
||||
// CAUTION: Use it only for testing, such as to teleport the chain to a
|
||||
// particular tipset to carry out a benchmark, verification, etc. on a chain
|
||||
// segment.
|
||||
func (cs *ChainStore) ForceHeadSilent(_ context.Context, ts *types.TipSet) error {
|
||||
log.Warnf("(!!!) forcing a new head silently; new head: %s", ts)
|
||||
|
||||
cs.heaviestLk.Lock()
|
||||
defer cs.heaviestLk.Unlock()
|
||||
cs.heaviest = ts
|
||||
|
||||
err := cs.writeHead(ts)
|
||||
if err != nil {
|
||||
err = xerrors.Errorf("failed to write chain head: %s", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type reorg struct {
|
||||
old *types.TipSet
|
||||
new *types.TipSet
|
||||
@ -372,7 +417,9 @@ func (cs *ChainStore) reorgWorker(ctx context.Context, initialNotifees []ReorgNo
|
||||
notifees := make([]ReorgNotifee, len(initialNotifees))
|
||||
copy(notifees, initialNotifees)
|
||||
|
||||
cs.wg.Add(1)
|
||||
go func() {
|
||||
defer cs.wg.Done()
|
||||
defer log.Warn("reorgWorker quit")
|
||||
|
||||
for {
|
||||
@ -404,11 +451,36 @@ func (cs *ChainStore) reorgWorker(ctx context.Context, initialNotifees []ReorgNo
|
||||
apply[i], apply[opp] = apply[opp], apply[i]
|
||||
}
|
||||
|
||||
for _, hcf := range notifees {
|
||||
if err := hcf(revert, apply); err != nil {
|
||||
var toremove map[int]struct{}
|
||||
for i, hcf := range notifees {
|
||||
err := hcf(revert, apply)
|
||||
|
||||
switch err {
|
||||
case nil:
|
||||
|
||||
case ErrNotifeeDone:
|
||||
if toremove == nil {
|
||||
toremove = make(map[int]struct{})
|
||||
}
|
||||
toremove[i] = struct{}{}
|
||||
|
||||
default:
|
||||
log.Error("head change func errored (BAD): ", err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(toremove) > 0 {
|
||||
newNotifees := make([]ReorgNotifee, 0, len(notifees)-len(toremove))
|
||||
for i, hcf := range notifees {
|
||||
_, remove := toremove[i]
|
||||
if remove {
|
||||
continue
|
||||
}
|
||||
newNotifees = append(newNotifees, hcf)
|
||||
}
|
||||
notifees = newNotifees
|
||||
}
|
||||
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
@ -522,12 +594,20 @@ func (cs *ChainStore) Contains(ts *types.TipSet) (bool, error) {
|
||||
// GetBlock fetches a BlockHeader with the supplied CID. It returns
|
||||
// blockstore.ErrNotFound if the block was not found in the BlockStore.
|
||||
func (cs *ChainStore) GetBlock(c cid.Cid) (*types.BlockHeader, error) {
|
||||
sb, err := cs.bs.Get(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if cs.localviewer == nil {
|
||||
sb, err := cs.localbs.Get(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.DecodeBlock(sb.RawData())
|
||||
}
|
||||
|
||||
return types.DecodeBlock(sb.RawData())
|
||||
var blk *types.BlockHeader
|
||||
err := cs.localviewer.View(c, func(b []byte) (err error) {
|
||||
blk, err = types.DecodeBlock(b)
|
||||
return err
|
||||
})
|
||||
return blk, err
|
||||
}
|
||||
|
||||
func (cs *ChainStore) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) {
|
||||
@ -772,12 +852,7 @@ func (cs *ChainStore) GetGenesis() (*types.BlockHeader, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
genb, err := cs.bs.Get(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return types.DecodeBlock(genb.RawData())
|
||||
return cs.GetBlock(c)
|
||||
}
|
||||
|
||||
func (cs *ChainStore) GetCMessage(c cid.Cid) (types.ChainMsg, error) {
|
||||
@ -793,23 +868,39 @@ func (cs *ChainStore) GetCMessage(c cid.Cid) (types.ChainMsg, error) {
|
||||
}
|
||||
|
||||
func (cs *ChainStore) GetMessage(c cid.Cid) (*types.Message, error) {
|
||||
sb, err := cs.bs.Get(c)
|
||||
if err != nil {
|
||||
log.Errorf("get message get failed: %s: %s", c, err)
|
||||
return nil, err
|
||||
if cs.localviewer == nil {
|
||||
sb, err := cs.localbs.Get(c)
|
||||
if err != nil {
|
||||
log.Errorf("get message get failed: %s: %s", c, err)
|
||||
return nil, err
|
||||
}
|
||||
return types.DecodeMessage(sb.RawData())
|
||||
}
|
||||
|
||||
return types.DecodeMessage(sb.RawData())
|
||||
var msg *types.Message
|
||||
err := cs.localviewer.View(c, func(b []byte) (err error) {
|
||||
msg, err = types.DecodeMessage(b)
|
||||
return err
|
||||
})
|
||||
return msg, err
|
||||
}
|
||||
|
||||
func (cs *ChainStore) GetSignedMessage(c cid.Cid) (*types.SignedMessage, error) {
|
||||
sb, err := cs.bs.Get(c)
|
||||
if err != nil {
|
||||
log.Errorf("get message get failed: %s: %s", c, err)
|
||||
return nil, err
|
||||
if cs.localviewer == nil {
|
||||
sb, err := cs.localbs.Get(c)
|
||||
if err != nil {
|
||||
log.Errorf("get message get failed: %s: %s", c, err)
|
||||
return nil, err
|
||||
}
|
||||
return types.DecodeSignedMessage(sb.RawData())
|
||||
}
|
||||
|
||||
return types.DecodeSignedMessage(sb.RawData())
|
||||
var msg *types.SignedMessage
|
||||
err := cs.localviewer.View(c, func(b []byte) (err error) {
|
||||
msg, err = types.DecodeSignedMessage(b)
|
||||
return err
|
||||
})
|
||||
return msg, err
|
||||
}
|
||||
|
||||
func (cs *ChainStore) readAMTCids(root cid.Cid) ([]cid.Cid, error) {
|
||||
@ -939,7 +1030,7 @@ func (cs *ChainStore) ReadMsgMetaCids(mmc cid.Cid) ([]cid.Cid, []cid.Cid, error)
|
||||
return mmcids.bls, mmcids.secpk, nil
|
||||
}
|
||||
|
||||
cst := cbor.NewCborStore(cs.bs)
|
||||
cst := cbor.NewCborStore(cs.localbs)
|
||||
var msgmeta types.MsgMeta
|
||||
if err := cst.Get(context.TODO(), mmc, &msgmeta); err != nil {
|
||||
return nil, nil, xerrors.Errorf("failed to load msgmeta (%s): %w", mmc, err)
|
||||
|
@ -3,6 +3,7 @@ package store_test
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
datastore "github.com/ipfs/go-datastore"
|
||||
@ -51,19 +52,26 @@ func BenchmarkGetRandomness(b *testing.B) {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
bds, err := lr.Datastore("/chain")
|
||||
bs, err := lr.Blockstore(repo.BlockstoreChain)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if c, ok := bs.(io.Closer); ok {
|
||||
if err := c.Close(); err != nil {
|
||||
b.Logf("WARN: failed to close blockstore: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
mds, err := lr.Datastore("/metadata")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
bs := blockstore.NewBlockstore(bds)
|
||||
|
||||
cs := store.NewChainStore(bs, mds, nil, nil)
|
||||
cs := store.NewChainStore(bs, bs, mds, nil, nil)
|
||||
defer cs.Close() //nolint:errcheck
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
@ -97,7 +105,8 @@ func TestChainExportImport(t *testing.T) {
|
||||
}
|
||||
|
||||
nbs := blockstore.NewTemporary()
|
||||
cs := store.NewChainStore(nbs, datastore.NewMapDatastore(), nil, nil)
|
||||
cs := store.NewChainStore(nbs, nbs, datastore.NewMapDatastore(), nil, nil)
|
||||
defer cs.Close() //nolint:errcheck
|
||||
|
||||
root, err := cs.Import(buf)
|
||||
if err != nil {
|
||||
@ -131,7 +140,9 @@ func TestChainExportImportFull(t *testing.T) {
|
||||
}
|
||||
|
||||
nbs := blockstore.NewTemporary()
|
||||
cs := store.NewChainStore(nbs, datastore.NewMapDatastore(), nil, nil)
|
||||
cs := store.NewChainStore(nbs, nbs, datastore.NewMapDatastore(), nil, nil)
|
||||
defer cs.Close() //nolint:errcheck
|
||||
|
||||
root, err := cs.Import(buf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -95,7 +95,10 @@ func HandleIncomingBlocks(ctx context.Context, bsub *pubsub.Subscription, s *cha
|
||||
}
|
||||
|
||||
took := build.Clock.Since(start)
|
||||
log.Infow("new block over pubsub", "cid", blk.Header.Cid(), "source", msg.GetFrom(), "msgfetch", took)
|
||||
log.Debugw("new block over pubsub", "cid", blk.Header.Cid(), "source", msg.GetFrom(), "msgfetch", took)
|
||||
if took > 3*time.Second {
|
||||
log.Warnw("Slow msg fetch", "cid", blk.Header.Cid(), "source", msg.GetFrom(), "msgfetch", took)
|
||||
}
|
||||
if delay := build.Clock.Now().Unix() - int64(blk.Header.Timestamp); delay > 5 {
|
||||
log.Warnf("Received block with large delay %d from miner %s", delay, blk.Header.Miner)
|
||||
}
|
||||
@ -337,6 +340,13 @@ func (bv *BlockValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub
|
||||
func (bv *BlockValidator) validateLocalBlock(ctx context.Context, msg *pubsub.Message) pubsub.ValidationResult {
|
||||
stats.Record(ctx, metrics.BlockPublished.M(1))
|
||||
|
||||
if size := msg.Size(); size > 1<<20-1<<15 {
|
||||
log.Errorf("ignoring oversize block (%dB)", size)
|
||||
ctx, _ = tag.New(ctx, tag.Insert(metrics.FailureType, "oversize_block"))
|
||||
stats.Record(ctx, metrics.BlockValidationFailure.M(1))
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
|
||||
blk, what, err := bv.decodeAndCheckBlock(msg)
|
||||
if err != nil {
|
||||
log.Errorf("got invalid local block: %s", err)
|
||||
|
@ -32,6 +32,7 @@ import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
blst "github.com/supranational/blst/bindings/go"
|
||||
|
||||
@ -278,7 +279,7 @@ func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool {
|
||||
for _, blk := range fts.TipSet().Blocks() {
|
||||
miners = append(miners, blk.Miner.String())
|
||||
}
|
||||
log.Infow("incoming tipset does not appear to be better than our best chain, ignoring for now", "miners", miners, "bestPweight", bestPweight, "bestTS", hts.Cids(), "incomingWeight", targetWeight, "incomingTS", fts.TipSet().Cids())
|
||||
log.Debugw("incoming tipset does not appear to be better than our best chain, ignoring for now", "miners", miners, "bestPweight", bestPweight, "bestTS", hts.Cids(), "incomingWeight", targetWeight, "incomingTS", fts.TipSet().Cids())
|
||||
return false
|
||||
}
|
||||
|
||||
@ -563,15 +564,16 @@ func (syncer *Syncer) Sync(ctx context.Context, maybeHead *types.TipSet) error {
|
||||
)
|
||||
}
|
||||
|
||||
if syncer.store.GetHeaviestTipSet().ParentWeight().GreaterThan(maybeHead.ParentWeight()) {
|
||||
hts := syncer.store.GetHeaviestTipSet()
|
||||
|
||||
if hts.ParentWeight().GreaterThan(maybeHead.ParentWeight()) {
|
||||
return nil
|
||||
}
|
||||
if syncer.Genesis.Equals(maybeHead) || hts.Equals(maybeHead) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if syncer.Genesis.Equals(maybeHead) || syncer.store.GetHeaviestTipSet().Equals(maybeHead) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := syncer.collectChain(ctx, maybeHead); err != nil {
|
||||
if err := syncer.collectChain(ctx, maybeHead, hts); err != nil {
|
||||
span.AddAttributes(trace.StringAttribute("col_error", err.Error()))
|
||||
span.SetStatus(trace.Status{
|
||||
Code: 13,
|
||||
@ -730,6 +732,8 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock, use
|
||||
return xerrors.Errorf("load parent tipset failed (%s): %w", h.Parents, err)
|
||||
}
|
||||
|
||||
winPoStNv := syncer.sm.GetNtwkVersion(ctx, baseTs.Height())
|
||||
|
||||
lbts, lbst, err := stmgr.GetLookbackTipSetForRound(ctx, syncer.sm, baseTs, h.Height)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to get lookback tipset for block: %w", err)
|
||||
@ -923,7 +927,7 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock, use
|
||||
})
|
||||
|
||||
wproofCheck := async.Err(func() error {
|
||||
if err := syncer.VerifyWinningPoStProof(ctx, h, *prevBeacon, lbst, waddr); err != nil {
|
||||
if err := syncer.VerifyWinningPoStProof(ctx, winPoStNv, h, *prevBeacon, lbst, waddr); err != nil {
|
||||
return xerrors.Errorf("invalid election post: %w", err)
|
||||
}
|
||||
return nil
|
||||
@ -975,7 +979,7 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock, use
|
||||
return nil
|
||||
}
|
||||
|
||||
func (syncer *Syncer) VerifyWinningPoStProof(ctx context.Context, h *types.BlockHeader, prevBeacon types.BeaconEntry, lbst cid.Cid, waddr address.Address) error {
|
||||
func (syncer *Syncer) VerifyWinningPoStProof(ctx context.Context, nv network.Version, h *types.BlockHeader, prevBeacon types.BeaconEntry, lbst cid.Cid, waddr address.Address) error {
|
||||
if build.InsecurePoStValidation {
|
||||
if len(h.WinPoStProof) == 0 {
|
||||
return xerrors.Errorf("[INSECURE-POST-VALIDATION] No winning post proof given")
|
||||
@ -1007,7 +1011,7 @@ func (syncer *Syncer) VerifyWinningPoStProof(ctx context.Context, h *types.Block
|
||||
return xerrors.Errorf("failed to get ID from miner address %s: %w", h.Miner, err)
|
||||
}
|
||||
|
||||
sectors, err := stmgr.GetSectorsForWinningPoSt(ctx, syncer.verifier, syncer.sm, lbst, h.Miner, rand)
|
||||
sectors, err := stmgr.GetSectorsForWinningPoSt(ctx, nv, syncer.verifier, syncer.sm, lbst, h.Miner, rand)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting winning post sector set: %w", err)
|
||||
}
|
||||
@ -1071,7 +1075,7 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock
|
||||
|
||||
// Phase 1: syntactic validation, as defined in the spec
|
||||
minGas := pl.OnChainMessage(msg.ChainLength())
|
||||
if err := m.ValidForBlockInclusion(minGas.Total()); err != nil {
|
||||
if err := m.ValidForBlockInclusion(minGas.Total(), syncer.sm.GetNtwkVersion(ctx, b.Header.Height)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1684,14 +1688,14 @@ func persistMessages(ctx context.Context, bs bstore.Blockstore, bst *exchange.Co
|
||||
//
|
||||
// 3. StageMessages: having acquired the headers and found a common tipset,
|
||||
// we then move forward, requesting the full blocks, including the messages.
|
||||
func (syncer *Syncer) collectChain(ctx context.Context, ts *types.TipSet) error {
|
||||
func (syncer *Syncer) collectChain(ctx context.Context, ts *types.TipSet, hts *types.TipSet) error {
|
||||
ctx, span := trace.StartSpan(ctx, "collectChain")
|
||||
defer span.End()
|
||||
ss := extractSyncState(ctx)
|
||||
|
||||
ss.Init(syncer.store.GetHeaviestTipSet(), ts)
|
||||
ss.Init(hts, ts)
|
||||
|
||||
headers, err := syncer.collectHeaders(ctx, ts, syncer.store.GetHeaviestTipSet())
|
||||
headers, err := syncer.collectHeaders(ctx, ts, hts)
|
||||
if err != nil {
|
||||
ss.Error(err)
|
||||
return err
|
||||
|
@ -4,30 +4,43 @@ import (
|
||||
"context"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
|
||||
peer "github.com/libp2p/go-libp2p-core/peer"
|
||||
)
|
||||
|
||||
const BootstrapPeerThreshold = 2
|
||||
var (
|
||||
BootstrapPeerThreshold = build.BootstrapPeerThreshold
|
||||
|
||||
var coalesceForksParents = false
|
||||
RecentSyncBufferSize = 10
|
||||
MaxSyncWorkers = 5
|
||||
SyncWorkerHistory = 3
|
||||
|
||||
InitialSyncTimeThreshold = 15 * time.Minute
|
||||
|
||||
coalesceTipsets = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
if os.Getenv("LOTUS_SYNC_REL_PARENT") == "yes" {
|
||||
coalesceForksParents = true
|
||||
coalesceTipsets = os.Getenv("LOTUS_SYNC_FORMTS_PEND") == "yes"
|
||||
|
||||
if bootstrapPeerThreshold := os.Getenv("LOTUS_SYNC_BOOTSTRAP_PEERS"); bootstrapPeerThreshold != "" {
|
||||
threshold, err := strconv.Atoi(bootstrapPeerThreshold)
|
||||
if err != nil {
|
||||
log.Errorf("failed to parse 'LOTUS_SYNC_BOOTSTRAP_PEERS' env var: %s", err)
|
||||
} else {
|
||||
BootstrapPeerThreshold = threshold
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
BSStateInit = 0
|
||||
BSStateSelected = 1
|
||||
BSStateScheduled = 2
|
||||
BSStateComplete = 3
|
||||
)
|
||||
|
||||
type SyncFunc func(context.Context, *types.TipSet) error
|
||||
|
||||
// SyncManager manages the chain synchronization process, both at bootstrap time
|
||||
@ -52,108 +65,467 @@ type SyncManager interface {
|
||||
}
|
||||
|
||||
type syncManager struct {
|
||||
lk sync.Mutex
|
||||
peerHeads map[peer.ID]*types.TipSet
|
||||
ctx context.Context
|
||||
cancel func()
|
||||
|
||||
bssLk sync.Mutex
|
||||
bootstrapState int
|
||||
workq chan peerHead
|
||||
statusq chan workerStatus
|
||||
|
||||
bspThresh int
|
||||
nextWorker uint64
|
||||
pend syncBucketSet
|
||||
deferred syncBucketSet
|
||||
heads map[peer.ID]*types.TipSet
|
||||
recent *syncBuffer
|
||||
|
||||
incomingTipSets chan *types.TipSet
|
||||
syncTargets chan *types.TipSet
|
||||
syncResults chan *syncResult
|
||||
initialSyncDone bool
|
||||
|
||||
syncStates []*SyncerState
|
||||
mx sync.Mutex
|
||||
state map[uint64]*workerState
|
||||
|
||||
history []*workerState
|
||||
historyI int
|
||||
|
||||
// Normally this handler is set to `(*Syncer).Sync()`.
|
||||
doSync func(context.Context, *types.TipSet) error
|
||||
|
||||
stop chan struct{}
|
||||
|
||||
// Sync Scheduler fields
|
||||
activeSyncs map[types.TipSetKey]*types.TipSet
|
||||
syncQueue syncBucketSet
|
||||
activeSyncTips syncBucketSet
|
||||
nextSyncTarget *syncTargetBucket
|
||||
workerChan chan *types.TipSet
|
||||
}
|
||||
|
||||
var _ SyncManager = (*syncManager)(nil)
|
||||
|
||||
type syncResult struct {
|
||||
ts *types.TipSet
|
||||
success bool
|
||||
type peerHead struct {
|
||||
p peer.ID
|
||||
ts *types.TipSet
|
||||
}
|
||||
|
||||
const syncWorkerCount = 3
|
||||
type workerState struct {
|
||||
id uint64
|
||||
ts *types.TipSet
|
||||
ss *SyncerState
|
||||
dt time.Duration
|
||||
}
|
||||
|
||||
type workerStatus struct {
|
||||
id uint64
|
||||
err error
|
||||
}
|
||||
|
||||
// sync manager interface
|
||||
func NewSyncManager(sync SyncFunc) SyncManager {
|
||||
sm := &syncManager{
|
||||
bspThresh: 1,
|
||||
peerHeads: make(map[peer.ID]*types.TipSet),
|
||||
syncTargets: make(chan *types.TipSet),
|
||||
syncResults: make(chan *syncResult),
|
||||
syncStates: make([]*SyncerState, syncWorkerCount),
|
||||
incomingTipSets: make(chan *types.TipSet),
|
||||
activeSyncs: make(map[types.TipSetKey]*types.TipSet),
|
||||
doSync: sync,
|
||||
stop: make(chan struct{}),
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
return &syncManager{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
|
||||
workq: make(chan peerHead),
|
||||
statusq: make(chan workerStatus),
|
||||
|
||||
heads: make(map[peer.ID]*types.TipSet),
|
||||
state: make(map[uint64]*workerState),
|
||||
recent: newSyncBuffer(RecentSyncBufferSize),
|
||||
history: make([]*workerState, SyncWorkerHistory),
|
||||
|
||||
doSync: sync,
|
||||
}
|
||||
for i := range sm.syncStates {
|
||||
sm.syncStates[i] = new(SyncerState)
|
||||
}
|
||||
return sm
|
||||
}
|
||||
|
||||
func (sm *syncManager) Start() {
|
||||
go sm.syncScheduler()
|
||||
for i := 0; i < syncWorkerCount; i++ {
|
||||
go sm.syncWorker(i)
|
||||
}
|
||||
go sm.scheduler()
|
||||
}
|
||||
|
||||
func (sm *syncManager) Stop() {
|
||||
close(sm.stop)
|
||||
select {
|
||||
case <-sm.ctx.Done():
|
||||
default:
|
||||
sm.cancel()
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *syncManager) SetPeerHead(ctx context.Context, p peer.ID, ts *types.TipSet) {
|
||||
sm.lk.Lock()
|
||||
defer sm.lk.Unlock()
|
||||
sm.peerHeads[p] = ts
|
||||
|
||||
if sm.getBootstrapState() == BSStateInit {
|
||||
spc := sm.syncedPeerCount()
|
||||
if spc >= sm.bspThresh {
|
||||
// Its go time!
|
||||
target, err := sm.selectSyncTarget()
|
||||
if err != nil {
|
||||
log.Error("failed to select sync target: ", err)
|
||||
return
|
||||
}
|
||||
sm.setBootstrapState(BSStateSelected)
|
||||
|
||||
sm.incomingTipSets <- target
|
||||
}
|
||||
log.Infof("sync bootstrap has %d peers", spc)
|
||||
return
|
||||
select {
|
||||
case sm.workq <- peerHead{p: p, ts: ts}:
|
||||
case <-sm.ctx.Done():
|
||||
case <-ctx.Done():
|
||||
}
|
||||
|
||||
sm.incomingTipSets <- ts
|
||||
}
|
||||
|
||||
func (sm *syncManager) State() []SyncerStateSnapshot {
|
||||
ret := make([]SyncerStateSnapshot, 0, len(sm.syncStates))
|
||||
for _, s := range sm.syncStates {
|
||||
ret = append(ret, s.Snapshot())
|
||||
sm.mx.Lock()
|
||||
workerStates := make([]*workerState, 0, len(sm.state)+len(sm.history))
|
||||
for _, ws := range sm.state {
|
||||
workerStates = append(workerStates, ws)
|
||||
}
|
||||
return ret
|
||||
for _, ws := range sm.history {
|
||||
if ws != nil {
|
||||
workerStates = append(workerStates, ws)
|
||||
}
|
||||
}
|
||||
sm.mx.Unlock()
|
||||
|
||||
sort.Slice(workerStates, func(i, j int) bool {
|
||||
return workerStates[i].id < workerStates[j].id
|
||||
})
|
||||
|
||||
result := make([]SyncerStateSnapshot, 0, len(workerStates))
|
||||
for _, ws := range workerStates {
|
||||
result = append(result, ws.ss.Snapshot())
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// sync manager internals
|
||||
func (sm *syncManager) scheduler() {
|
||||
ticker := time.NewTicker(time.Minute)
|
||||
tickerC := ticker.C
|
||||
for {
|
||||
select {
|
||||
case head := <-sm.workq:
|
||||
sm.handlePeerHead(head)
|
||||
case status := <-sm.statusq:
|
||||
sm.handleWorkerStatus(status)
|
||||
case <-tickerC:
|
||||
if sm.initialSyncDone {
|
||||
ticker.Stop()
|
||||
tickerC = nil
|
||||
sm.handleInitialSyncDone()
|
||||
}
|
||||
case <-sm.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *syncManager) handlePeerHead(head peerHead) {
|
||||
log.Debugf("new peer head: %s %s", head.p, head.ts)
|
||||
|
||||
// have we started syncing yet?
|
||||
if sm.nextWorker == 0 {
|
||||
// track the peer head until we start syncing
|
||||
sm.heads[head.p] = head.ts
|
||||
|
||||
// not yet; do we have enough peers?
|
||||
if len(sm.heads) < BootstrapPeerThreshold {
|
||||
// not enough peers; track it and wait
|
||||
return
|
||||
}
|
||||
|
||||
// we are ready to start syncing; select the sync target and spawn a worker
|
||||
target, err := sm.selectInitialSyncTarget()
|
||||
if err != nil {
|
||||
log.Errorf("failed to select initial sync target: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("selected initial sync target: %s", target)
|
||||
sm.spawnWorker(target)
|
||||
return
|
||||
}
|
||||
|
||||
// we have started syncing, add peer head to the queue if applicable and maybe spawn a worker
|
||||
// if there is work to do (possibly in a fork)
|
||||
target, work, err := sm.addSyncTarget(head.ts)
|
||||
if err != nil {
|
||||
log.Warnf("failed to add sync target: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
if work {
|
||||
log.Infof("selected sync target: %s", target)
|
||||
sm.spawnWorker(target)
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *syncManager) handleWorkerStatus(status workerStatus) {
|
||||
log.Debugf("worker %d done; status error: %s", status.id, status.err)
|
||||
|
||||
sm.mx.Lock()
|
||||
ws := sm.state[status.id]
|
||||
delete(sm.state, status.id)
|
||||
|
||||
// we track the last few workers for debug purposes
|
||||
sm.history[sm.historyI] = ws
|
||||
sm.historyI++
|
||||
sm.historyI %= len(sm.history)
|
||||
sm.mx.Unlock()
|
||||
|
||||
if status.err != nil {
|
||||
// we failed to sync this target -- log it and try to work on an extended chain
|
||||
// if there is nothing related to be worked on, we stop working on this chain.
|
||||
log.Errorf("error during sync in %s: %s", ws.ts, status.err)
|
||||
} else {
|
||||
// add to the recently synced buffer
|
||||
sm.recent.Push(ws.ts)
|
||||
// if we are still in initial sync and this was fast enough, mark the end of the initial sync
|
||||
if !sm.initialSyncDone && ws.dt < InitialSyncTimeThreshold {
|
||||
sm.initialSyncDone = true
|
||||
}
|
||||
}
|
||||
|
||||
// we are done with this target, select the next sync target and spawn a worker if there is work
|
||||
// to do, because of an extension of this chain.
|
||||
target, work, err := sm.selectSyncTarget(ws.ts)
|
||||
if err != nil {
|
||||
log.Warnf("failed to select sync target: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
if work {
|
||||
log.Infof("selected sync target: %s", target)
|
||||
sm.spawnWorker(target)
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *syncManager) handleInitialSyncDone() {
|
||||
// we have just finished the initial sync; spawn some additional workers in deferred syncs
|
||||
// as needed (and up to MaxSyncWorkers) to ramp up chain sync
|
||||
for len(sm.state) < MaxSyncWorkers {
|
||||
target, work, err := sm.selectDeferredSyncTarget()
|
||||
if err != nil {
|
||||
log.Errorf("error selecting deferred sync target: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
if !work {
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("selected deferred sync target: %s", target)
|
||||
sm.spawnWorker(target)
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *syncManager) spawnWorker(target *types.TipSet) {
|
||||
id := sm.nextWorker
|
||||
sm.nextWorker++
|
||||
ws := &workerState{
|
||||
id: id,
|
||||
ts: target,
|
||||
ss: new(SyncerState),
|
||||
}
|
||||
ws.ss.data.WorkerID = id
|
||||
|
||||
sm.mx.Lock()
|
||||
sm.state[id] = ws
|
||||
sm.mx.Unlock()
|
||||
|
||||
go sm.worker(ws)
|
||||
}
|
||||
|
||||
func (sm *syncManager) worker(ws *workerState) {
|
||||
log.Infof("worker %d syncing in %s", ws.id, ws.ts)
|
||||
|
||||
start := build.Clock.Now()
|
||||
|
||||
ctx := context.WithValue(sm.ctx, syncStateKey{}, ws.ss)
|
||||
err := sm.doSync(ctx, ws.ts)
|
||||
|
||||
ws.dt = build.Clock.Since(start)
|
||||
log.Infof("worker %d done; took %s", ws.id, ws.dt)
|
||||
select {
|
||||
case sm.statusq <- workerStatus{id: ws.id, err: err}:
|
||||
case <-sm.ctx.Done():
|
||||
}
|
||||
}
|
||||
|
||||
// selects the initial sync target by examining known peer heads; only called once for the initial
|
||||
// sync.
|
||||
func (sm *syncManager) selectInitialSyncTarget() (*types.TipSet, error) {
|
||||
var buckets syncBucketSet
|
||||
|
||||
var peerHeads []*types.TipSet
|
||||
for _, ts := range sm.heads {
|
||||
peerHeads = append(peerHeads, ts)
|
||||
}
|
||||
// clear the map, we don't use it any longer
|
||||
sm.heads = nil
|
||||
|
||||
sort.Slice(peerHeads, func(i, j int) bool {
|
||||
return peerHeads[i].Height() < peerHeads[j].Height()
|
||||
})
|
||||
|
||||
for _, ts := range peerHeads {
|
||||
buckets.Insert(ts)
|
||||
}
|
||||
|
||||
if len(buckets.buckets) > 1 {
|
||||
log.Warn("caution, multiple distinct chains seen during head selections")
|
||||
// TODO: we *could* refuse to sync here without user intervention.
|
||||
// For now, just select the best cluster
|
||||
}
|
||||
|
||||
return buckets.Heaviest(), nil
|
||||
}
|
||||
|
||||
// adds a tipset to the potential sync targets; returns true if there is a a tipset to work on.
|
||||
// this could be either a restart, eg because there is no currently scheduled sync work or a worker
|
||||
// failed or a potential fork.
|
||||
func (sm *syncManager) addSyncTarget(ts *types.TipSet) (*types.TipSet, bool, error) {
|
||||
// Note: we don't need the state lock here to access the active worker states, as the only
|
||||
// competing threads that may access it do so through State() which is read only.
|
||||
|
||||
// if we have recently synced this or any heavier tipset we just ignore it; this can happen
|
||||
// with an empty worker set after we just finished syncing to a target
|
||||
if sm.recent.Synced(ts) {
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
// if the worker set is empty, we have finished syncing and were waiting for the next tipset
|
||||
// in this case, we just return the tipset as work to be done
|
||||
if len(sm.state) == 0 {
|
||||
return ts, true, nil
|
||||
}
|
||||
|
||||
// check if it is related to any active sync; if so insert into the pending sync queue
|
||||
for _, ws := range sm.state {
|
||||
if ts.Equals(ws.ts) {
|
||||
// ignore it, we are already syncing it
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
if ts.Parents() == ws.ts.Key() {
|
||||
// schedule for syncing next; it's an extension of an active sync
|
||||
sm.pend.Insert(ts)
|
||||
return nil, false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// check to see if it is related to any pending sync; if so insert it into the pending sync queue
|
||||
if sm.pend.RelatedToAny(ts) {
|
||||
sm.pend.Insert(ts)
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
// it's not related to any active or pending sync; this could be a fork in which case we
|
||||
// start a new worker to sync it, if it is *heavier* than any active or pending set;
|
||||
// if it is not, we ignore it.
|
||||
for _, ws := range sm.state {
|
||||
if isHeavier(ws.ts, ts) {
|
||||
return nil, false, nil
|
||||
}
|
||||
}
|
||||
|
||||
pendHeaviest := sm.pend.Heaviest()
|
||||
if pendHeaviest != nil && isHeavier(pendHeaviest, ts) {
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
// if we have not finished the initial sync or have too many workers, add it to the deferred queue;
|
||||
// it will be processed once a worker is freed from syncing a chain (or the initial sync finishes)
|
||||
if !sm.initialSyncDone || len(sm.state) >= MaxSyncWorkers {
|
||||
log.Debugf("deferring sync on %s", ts)
|
||||
sm.deferred.Insert(ts)
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
// start a new worker, seems heavy enough and unrelated to active or pending syncs
|
||||
return ts, true, nil
|
||||
}
|
||||
|
||||
// selects the next sync target after a worker sync has finished; returns true and a target
|
||||
// TipSet if this chain should continue to sync because there is a heavier related tipset.
|
||||
func (sm *syncManager) selectSyncTarget(done *types.TipSet) (*types.TipSet, bool, error) {
|
||||
// we pop the related bucket and if there is any related tipset, we work on the heaviest one next
|
||||
// if we are not already working on a heavier tipset
|
||||
related := sm.pend.PopRelated(done)
|
||||
if related == nil {
|
||||
return sm.selectDeferredSyncTarget()
|
||||
}
|
||||
|
||||
heaviest := related.heaviestTipSet()
|
||||
if isHeavier(done, heaviest) {
|
||||
return sm.selectDeferredSyncTarget()
|
||||
}
|
||||
|
||||
for _, ws := range sm.state {
|
||||
if isHeavier(ws.ts, heaviest) {
|
||||
return sm.selectDeferredSyncTarget()
|
||||
}
|
||||
}
|
||||
|
||||
if sm.recent.Synced(heaviest) {
|
||||
return sm.selectDeferredSyncTarget()
|
||||
}
|
||||
|
||||
return heaviest, true, nil
|
||||
}
|
||||
|
||||
// selects a deferred sync target if there is any; these are sync targets that were not related to
|
||||
// active syncs and were deferred because there were too many workers running
|
||||
func (sm *syncManager) selectDeferredSyncTarget() (*types.TipSet, bool, error) {
|
||||
deferredLoop:
|
||||
for !sm.deferred.Empty() {
|
||||
bucket := sm.deferred.Pop()
|
||||
heaviest := bucket.heaviestTipSet()
|
||||
|
||||
if sm.recent.Synced(heaviest) {
|
||||
// we have synced it or something heavier recently, skip it
|
||||
continue deferredLoop
|
||||
}
|
||||
|
||||
if sm.pend.RelatedToAny(heaviest) {
|
||||
// this has converged to a pending sync, insert it to the pending queue
|
||||
sm.pend.Insert(heaviest)
|
||||
continue deferredLoop
|
||||
}
|
||||
|
||||
for _, ws := range sm.state {
|
||||
if ws.ts.Equals(heaviest) || isHeavier(ws.ts, heaviest) {
|
||||
// we have converged and are already syncing it or we are syncing on something heavier
|
||||
// ignore it and pop the next deferred bucket
|
||||
continue deferredLoop
|
||||
}
|
||||
|
||||
if heaviest.Parents() == ws.ts.Key() {
|
||||
// we have converged and we are syncing its parent; insert it to the pending queue
|
||||
sm.pend.Insert(heaviest)
|
||||
continue deferredLoop
|
||||
}
|
||||
|
||||
// it's not related to any active or pending sync and this worker is free, so sync it!
|
||||
return heaviest, true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
func isHeavier(a, b *types.TipSet) bool {
|
||||
return a.ParentWeight().GreaterThan(b.ParentWeight())
|
||||
}
|
||||
|
||||
// sync buffer -- this is a circular buffer of recently synced tipsets
|
||||
type syncBuffer struct {
|
||||
buf []*types.TipSet
|
||||
next int
|
||||
}
|
||||
|
||||
func newSyncBuffer(size int) *syncBuffer {
|
||||
return &syncBuffer{buf: make([]*types.TipSet, size)}
|
||||
}
|
||||
|
||||
func (sb *syncBuffer) Push(ts *types.TipSet) {
|
||||
sb.buf[sb.next] = ts
|
||||
sb.next++
|
||||
sb.next %= len(sb.buf)
|
||||
}
|
||||
|
||||
func (sb *syncBuffer) Synced(ts *types.TipSet) bool {
|
||||
for _, rts := range sb.buf {
|
||||
if rts != nil && (rts.Equals(ts) || isHeavier(rts, ts)) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// sync buckets and related utilities
|
||||
type syncBucketSet struct {
|
||||
buckets []*syncTargetBucket
|
||||
}
|
||||
|
||||
type syncTargetBucket struct {
|
||||
tips []*types.TipSet
|
||||
}
|
||||
|
||||
func newSyncTargetBucket(tipsets ...*types.TipSet) *syncTargetBucket {
|
||||
var stb syncTargetBucket
|
||||
for _, ts := range tipsets {
|
||||
@ -250,10 +622,6 @@ func (sbs *syncBucketSet) Empty() bool {
|
||||
return len(sbs.buckets) == 0
|
||||
}
|
||||
|
||||
type syncTargetBucket struct {
|
||||
tips []*types.TipSet
|
||||
}
|
||||
|
||||
func (stb *syncTargetBucket) sameChainAs(ts *types.TipSet) bool {
|
||||
for _, t := range stb.tips {
|
||||
if ts.Equals(t) {
|
||||
@ -265,19 +633,43 @@ func (stb *syncTargetBucket) sameChainAs(ts *types.TipSet) bool {
|
||||
if ts.Parents() == t.Key() {
|
||||
return true
|
||||
}
|
||||
if coalesceForksParents && ts.Parents() == t.Parents() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (stb *syncTargetBucket) add(ts *types.TipSet) {
|
||||
|
||||
for _, t := range stb.tips {
|
||||
for i, t := range stb.tips {
|
||||
if t.Equals(ts) {
|
||||
return
|
||||
}
|
||||
if coalesceTipsets && t.Height() == ts.Height() &&
|
||||
types.CidArrsEqual(t.Blocks()[0].Parents, ts.Blocks()[0].Parents) {
|
||||
miners := make(map[address.Address]struct{})
|
||||
newTs := []*types.BlockHeader{}
|
||||
for _, b := range t.Blocks() {
|
||||
_, have := miners[b.Miner]
|
||||
if !have {
|
||||
newTs = append(newTs, b)
|
||||
miners[b.Miner] = struct{}{}
|
||||
}
|
||||
}
|
||||
for _, b := range ts.Blocks() {
|
||||
_, have := miners[b.Miner]
|
||||
if !have {
|
||||
newTs = append(newTs, b)
|
||||
miners[b.Miner] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
ts2, err := types.NewTipSet(newTs)
|
||||
if err != nil {
|
||||
log.Warnf("error while trying to recombine a tipset in a bucket: %+v", err)
|
||||
continue
|
||||
}
|
||||
stb.tips[i] = ts2
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
stb.tips = append(stb.tips, ts)
|
||||
@ -296,196 +688,3 @@ func (stb *syncTargetBucket) heaviestTipSet() *types.TipSet {
|
||||
}
|
||||
return best
|
||||
}
|
||||
|
||||
func (sm *syncManager) selectSyncTarget() (*types.TipSet, error) {
|
||||
var buckets syncBucketSet
|
||||
|
||||
var peerHeads []*types.TipSet
|
||||
for _, ts := range sm.peerHeads {
|
||||
peerHeads = append(peerHeads, ts)
|
||||
}
|
||||
sort.Slice(peerHeads, func(i, j int) bool {
|
||||
return peerHeads[i].Height() < peerHeads[j].Height()
|
||||
})
|
||||
|
||||
for _, ts := range peerHeads {
|
||||
buckets.Insert(ts)
|
||||
}
|
||||
|
||||
if len(buckets.buckets) > 1 {
|
||||
log.Warn("caution, multiple distinct chains seen during head selections")
|
||||
// TODO: we *could* refuse to sync here without user intervention.
|
||||
// For now, just select the best cluster
|
||||
}
|
||||
|
||||
return buckets.Heaviest(), nil
|
||||
}
|
||||
|
||||
func (sm *syncManager) syncScheduler() {
|
||||
for {
|
||||
select {
|
||||
case ts, ok := <-sm.incomingTipSets:
|
||||
if !ok {
|
||||
log.Info("shutting down sync scheduler")
|
||||
return
|
||||
}
|
||||
|
||||
sm.scheduleIncoming(ts)
|
||||
case res := <-sm.syncResults:
|
||||
sm.scheduleProcessResult(res)
|
||||
case sm.workerChan <- sm.nextSyncTarget.heaviestTipSet():
|
||||
sm.scheduleWorkSent()
|
||||
case <-sm.stop:
|
||||
log.Info("sync scheduler shutting down")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *syncManager) scheduleIncoming(ts *types.TipSet) {
|
||||
log.Debug("scheduling incoming tipset sync: ", ts.Cids())
|
||||
if sm.getBootstrapState() == BSStateSelected {
|
||||
sm.setBootstrapState(BSStateScheduled)
|
||||
sm.syncTargets <- ts
|
||||
return
|
||||
}
|
||||
|
||||
var relatedToActiveSync bool
|
||||
for _, acts := range sm.activeSyncs {
|
||||
if ts.Equals(acts) {
|
||||
// ignore, we are already syncing it
|
||||
return
|
||||
}
|
||||
|
||||
if ts.Parents() == acts.Key() {
|
||||
// sync this next, after that sync process finishes
|
||||
relatedToActiveSync = true
|
||||
}
|
||||
}
|
||||
|
||||
if !relatedToActiveSync && sm.activeSyncTips.RelatedToAny(ts) {
|
||||
relatedToActiveSync = true
|
||||
}
|
||||
|
||||
// if this is related to an active sync process, immediately bucket it
|
||||
// we don't want to start a parallel sync process that duplicates work
|
||||
if relatedToActiveSync {
|
||||
sm.activeSyncTips.Insert(ts)
|
||||
return
|
||||
}
|
||||
|
||||
if sm.getBootstrapState() == BSStateScheduled {
|
||||
sm.syncQueue.Insert(ts)
|
||||
return
|
||||
}
|
||||
|
||||
if sm.nextSyncTarget != nil && sm.nextSyncTarget.sameChainAs(ts) {
|
||||
sm.nextSyncTarget.add(ts)
|
||||
} else {
|
||||
sm.syncQueue.Insert(ts)
|
||||
|
||||
if sm.nextSyncTarget == nil {
|
||||
sm.nextSyncTarget = sm.syncQueue.Pop()
|
||||
sm.workerChan = sm.syncTargets
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *syncManager) scheduleProcessResult(res *syncResult) {
|
||||
if res.success && sm.getBootstrapState() != BSStateComplete {
|
||||
sm.setBootstrapState(BSStateComplete)
|
||||
}
|
||||
|
||||
delete(sm.activeSyncs, res.ts.Key())
|
||||
relbucket := sm.activeSyncTips.PopRelated(res.ts)
|
||||
if relbucket != nil {
|
||||
if res.success {
|
||||
if sm.nextSyncTarget == nil {
|
||||
sm.nextSyncTarget = relbucket
|
||||
sm.workerChan = sm.syncTargets
|
||||
} else {
|
||||
for _, t := range relbucket.tips {
|
||||
sm.syncQueue.Insert(t)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
// TODO: this is the case where we try to sync a chain, and
|
||||
// fail, and we have more blocks on top of that chain that
|
||||
// have come in since. The question is, should we try to
|
||||
// sync these? or just drop them?
|
||||
log.Error("failed to sync chain but have new unconnected blocks from chain")
|
||||
}
|
||||
|
||||
if sm.nextSyncTarget == nil && !sm.syncQueue.Empty() {
|
||||
next := sm.syncQueue.Pop()
|
||||
if next != nil {
|
||||
sm.nextSyncTarget = next
|
||||
sm.workerChan = sm.syncTargets
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *syncManager) scheduleWorkSent() {
|
||||
hts := sm.nextSyncTarget.heaviestTipSet()
|
||||
sm.activeSyncs[hts.Key()] = hts
|
||||
|
||||
if !sm.syncQueue.Empty() {
|
||||
sm.nextSyncTarget = sm.syncQueue.Pop()
|
||||
} else {
|
||||
sm.nextSyncTarget = nil
|
||||
sm.workerChan = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *syncManager) syncWorker(id int) {
|
||||
ss := sm.syncStates[id]
|
||||
for {
|
||||
select {
|
||||
case ts, ok := <-sm.syncTargets:
|
||||
if !ok {
|
||||
log.Info("sync manager worker shutting down")
|
||||
return
|
||||
}
|
||||
|
||||
ctx := context.WithValue(context.TODO(), syncStateKey{}, ss)
|
||||
err := sm.doSync(ctx, ts)
|
||||
if err != nil {
|
||||
log.Errorf("sync error: %+v", err)
|
||||
}
|
||||
|
||||
sm.syncResults <- &syncResult{
|
||||
ts: ts,
|
||||
success: err == nil,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *syncManager) syncedPeerCount() int {
|
||||
var count int
|
||||
for _, ts := range sm.peerHeads {
|
||||
if ts.Height() > 0 {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func (sm *syncManager) getBootstrapState() int {
|
||||
sm.bssLk.Lock()
|
||||
defer sm.bssLk.Unlock()
|
||||
return sm.bootstrapState
|
||||
}
|
||||
|
||||
func (sm *syncManager) setBootstrapState(v int) {
|
||||
sm.bssLk.Lock()
|
||||
defer sm.bssLk.Unlock()
|
||||
sm.bootstrapState = v
|
||||
}
|
||||
|
||||
func (sm *syncManager) IsBootstrapped() bool {
|
||||
sm.bssLk.Lock()
|
||||
defer sm.bssLk.Unlock()
|
||||
return sm.bootstrapState == BSStateComplete
|
||||
}
|
||||
|
@ -10,6 +10,10 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/types/mock"
|
||||
)
|
||||
|
||||
func init() {
|
||||
BootstrapPeerThreshold = 1
|
||||
}
|
||||
|
||||
var genTs = mock.TipSet(mock.MkBlock(nil, 0, 0))
|
||||
|
||||
type syncOp struct {
|
||||
@ -28,7 +32,12 @@ func runSyncMgrTest(t *testing.T, tname string, thresh int, tf func(*testing.T,
|
||||
<-ch
|
||||
return nil
|
||||
}).(*syncManager)
|
||||
sm.bspThresh = thresh
|
||||
|
||||
oldBootstrapPeerThreshold := BootstrapPeerThreshold
|
||||
BootstrapPeerThreshold = thresh
|
||||
defer func() {
|
||||
BootstrapPeerThreshold = oldBootstrapPeerThreshold
|
||||
}()
|
||||
|
||||
sm.Start()
|
||||
defer sm.Stop()
|
||||
@ -87,49 +96,59 @@ func TestSyncManagerEdgeCase(t *testing.T) {
|
||||
|
||||
runSyncMgrTest(t, "edgeCase", 1, func(t *testing.T, sm *syncManager, stc chan *syncOp) {
|
||||
sm.SetPeerHead(ctx, "peer1", a)
|
||||
assertGetSyncOp(t, stc, a)
|
||||
|
||||
sm.SetPeerHead(ctx, "peer1", b1)
|
||||
sm.SetPeerHead(ctx, "peer1", b2)
|
||||
// b1 and b2 are being processed
|
||||
|
||||
b1op := <-stc
|
||||
b2op := <-stc
|
||||
if !b1op.ts.Equals(b1) {
|
||||
b1op, b2op = b2op, b1op
|
||||
assertGetSyncOp(t, stc, a)
|
||||
|
||||
// b1 and b2 are in queue after a; the sync manager should pick the heaviest one which is b2
|
||||
bop := <-stc
|
||||
if !bop.ts.Equals(b2) {
|
||||
t.Fatalf("Expected tipset %s to sync, but got %s", b2, bop.ts)
|
||||
}
|
||||
|
||||
sm.SetPeerHead(ctx, "peer2", c2) // c2 is put into activeSyncTips at index 0
|
||||
sm.SetPeerHead(ctx, "peer2", c1) // c1 is put into activeSyncTips at index 1
|
||||
sm.SetPeerHead(ctx, "peer3", b2) // b2 is related to c2 and even though it is actively synced it is put into activeSyncTips index 0
|
||||
sm.SetPeerHead(ctx, "peer1", a) // a is related to b2 and is put into activeSyncTips index 0
|
||||
sm.SetPeerHead(ctx, "peer2", c2)
|
||||
sm.SetPeerHead(ctx, "peer2", c1)
|
||||
sm.SetPeerHead(ctx, "peer3", b2)
|
||||
sm.SetPeerHead(ctx, "peer1", a)
|
||||
|
||||
b1op.done() // b1 completes first, is related to a, so it pops activeSyncTips index 0
|
||||
// even though correct one is index 1
|
||||
bop.done()
|
||||
|
||||
b2op.done()
|
||||
// b2 completes and is not related to c1, so it leaves activeSyncTips as it is
|
||||
// get the next sync target; it should be c1 as the heaviest tipset but added last (same weight as c2)
|
||||
bop = <-stc
|
||||
if !bop.ts.Equals(c1) {
|
||||
t.Fatalf("Expected tipset %s to sync, but got %s", c1, bop.ts)
|
||||
}
|
||||
|
||||
waitUntilAllWorkersAreDone(stc)
|
||||
sm.SetPeerHead(ctx, "peer4", d1)
|
||||
sm.SetPeerHead(ctx, "peer5", e1)
|
||||
bop.done()
|
||||
|
||||
if len(sm.activeSyncTips.buckets) != 0 {
|
||||
t.Errorf("activeSyncTips expected empty but got: %s", sm.activeSyncTips.String())
|
||||
// get the last sync target; it should be e1
|
||||
var last *types.TipSet
|
||||
for i := 0; i < 10; {
|
||||
select {
|
||||
case bop = <-stc:
|
||||
bop.done()
|
||||
if last == nil || bop.ts.Height() > last.Height() {
|
||||
last = bop.ts
|
||||
}
|
||||
default:
|
||||
i++
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
if !last.Equals(e1) {
|
||||
t.Fatalf("Expected tipset %s to sync, but got %s", e1, last)
|
||||
}
|
||||
|
||||
if len(sm.state) != 0 {
|
||||
t.Errorf("active syncs expected empty but got: %d", len(sm.state))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func waitUntilAllWorkersAreDone(stc chan *syncOp) {
|
||||
for i := 0; i < 10; {
|
||||
select {
|
||||
case so := <-stc:
|
||||
so.done()
|
||||
default:
|
||||
i++
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncManager(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
|
@ -12,13 +12,14 @@ import (
|
||||
)
|
||||
|
||||
type SyncerStateSnapshot struct {
|
||||
Target *types.TipSet
|
||||
Base *types.TipSet
|
||||
Stage api.SyncStateStage
|
||||
Height abi.ChainEpoch
|
||||
Message string
|
||||
Start time.Time
|
||||
End time.Time
|
||||
WorkerID uint64
|
||||
Target *types.TipSet
|
||||
Base *types.TipSet
|
||||
Stage api.SyncStateStage
|
||||
Height abi.ChainEpoch
|
||||
Message string
|
||||
Start time.Time
|
||||
End time.Time
|
||||
}
|
||||
|
||||
type SyncerState struct {
|
||||
|
@ -61,6 +61,10 @@ func ParseFIL(s string) (FIL, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if len(s) > 50 {
|
||||
return FIL{}, fmt.Errorf("string length too large: %d", len(s))
|
||||
}
|
||||
|
||||
r, ok := new(big.Rat).SetString(s)
|
||||
if !ok {
|
||||
return FIL{}, fmt.Errorf("failed to parse %q as a decimal number", s)
|
||||
|
@ -5,6 +5,8 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
@ -144,7 +146,7 @@ func (m *Message) EqualCall(o *Message) bool {
|
||||
return (&m1).Equals(&m2)
|
||||
}
|
||||
|
||||
func (m *Message) ValidForBlockInclusion(minGas int64) error {
|
||||
func (m *Message) ValidForBlockInclusion(minGas int64, version network.Version) error {
|
||||
if m.Version != 0 {
|
||||
return xerrors.New("'Version' unsupported")
|
||||
}
|
||||
@ -153,6 +155,10 @@ func (m *Message) ValidForBlockInclusion(minGas int64) error {
|
||||
return xerrors.New("'To' address cannot be empty")
|
||||
}
|
||||
|
||||
if m.To == build.ZeroAddress && version >= network.Version7 {
|
||||
return xerrors.New("invalid 'To' address")
|
||||
}
|
||||
|
||||
if m.From == address.Undef {
|
||||
return xerrors.New("'From' address cannot be empty")
|
||||
}
|
||||
|
@ -3,21 +3,17 @@ package vm
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
vmr2 "github.com/filecoin-project/specs-actors/v2/actors/runtime"
|
||||
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
addr "github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
vmr2 "github.com/filecoin-project/specs-actors/v2/actors/runtime"
|
||||
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
|
||||
"github.com/ipfs/go-cid"
|
||||
)
|
||||
|
||||
const (
|
||||
GasStorageMulti = 1000
|
||||
GasComputeMulti = 1
|
||||
)
|
||||
|
||||
type GasCharge struct {
|
||||
Name string
|
||||
Extra interface{}
|
||||
@ -30,7 +26,7 @@ type GasCharge struct {
|
||||
}
|
||||
|
||||
func (g GasCharge) Total() int64 {
|
||||
return g.ComputeGas*GasComputeMulti + g.StorageGas*GasStorageMulti
|
||||
return g.ComputeGas + g.StorageGas
|
||||
}
|
||||
func (g GasCharge) WithVirtual(compute, storage int64) GasCharge {
|
||||
out := g
|
||||
@ -85,6 +81,9 @@ type Pricelist interface {
|
||||
|
||||
var prices = map[abi.ChainEpoch]Pricelist{
|
||||
abi.ChainEpoch(0): &pricelistV0{
|
||||
computeGasMulti: 1,
|
||||
storageGasMulti: 1000,
|
||||
|
||||
onChainMessageComputeBase: 38863,
|
||||
onChainMessageStorageBase: 36,
|
||||
onChainMessageStoragePerByte: 1,
|
||||
@ -129,6 +128,54 @@ var prices = map[abi.ChainEpoch]Pricelist{
|
||||
verifyPostDiscount: true,
|
||||
verifyConsensusFault: 495422,
|
||||
},
|
||||
abi.ChainEpoch(build.UpgradeCalicoHeight): &pricelistV0{
|
||||
computeGasMulti: 1,
|
||||
storageGasMulti: 1300,
|
||||
|
||||
onChainMessageComputeBase: 38863,
|
||||
onChainMessageStorageBase: 36,
|
||||
onChainMessageStoragePerByte: 1,
|
||||
|
||||
onChainReturnValuePerByte: 1,
|
||||
|
||||
sendBase: 29233,
|
||||
sendTransferFunds: 27500,
|
||||
sendTransferOnlyPremium: 159672,
|
||||
sendInvokeMethod: -5377,
|
||||
|
||||
ipldGetBase: 114617,
|
||||
ipldPutBase: 353640,
|
||||
ipldPutPerByte: 1,
|
||||
|
||||
createActorCompute: 1108454,
|
||||
createActorStorage: 36 + 40,
|
||||
deleteActor: -(36 + 40), // -createActorStorage
|
||||
|
||||
verifySignature: map[crypto.SigType]int64{
|
||||
crypto.SigTypeBLS: 16598605,
|
||||
crypto.SigTypeSecp256k1: 1637292,
|
||||
},
|
||||
|
||||
hashingBase: 31355,
|
||||
computeUnsealedSectorCidBase: 98647,
|
||||
verifySealBase: 2000, // TODO gas , it VerifySeal syscall is not used
|
||||
verifyPostLookup: map[abi.RegisteredPoStProof]scalingCost{
|
||||
abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: {
|
||||
flat: 117680921,
|
||||
scale: 43780,
|
||||
},
|
||||
abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: {
|
||||
flat: 117680921,
|
||||
scale: 43780,
|
||||
},
|
||||
abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: {
|
||||
flat: 117680921,
|
||||
scale: 43780,
|
||||
},
|
||||
},
|
||||
verifyPostDiscount: false,
|
||||
verifyConsensusFault: 495422,
|
||||
},
|
||||
}
|
||||
|
||||
// PricelistByEpoch finds the latest prices for the given epoch
|
||||
|
@ -18,6 +18,8 @@ type scalingCost struct {
|
||||
}
|
||||
|
||||
type pricelistV0 struct {
|
||||
computeGasMulti int64
|
||||
storageGasMulti int64
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// System operations
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
@ -99,12 +101,12 @@ var _ Pricelist = (*pricelistV0)(nil)
|
||||
// OnChainMessage returns the gas used for storing a message of a given size in the chain.
|
||||
func (pl *pricelistV0) OnChainMessage(msgSize int) GasCharge {
|
||||
return newGasCharge("OnChainMessage", pl.onChainMessageComputeBase,
|
||||
pl.onChainMessageStorageBase+pl.onChainMessageStoragePerByte*int64(msgSize))
|
||||
(pl.onChainMessageStorageBase+pl.onChainMessageStoragePerByte*int64(msgSize))*pl.storageGasMulti)
|
||||
}
|
||||
|
||||
// OnChainReturnValue returns the gas used for storing the response of a message in the chain.
|
||||
func (pl *pricelistV0) OnChainReturnValue(dataSize int) GasCharge {
|
||||
return newGasCharge("OnChainReturnValue", 0, int64(dataSize)*pl.onChainReturnValuePerByte)
|
||||
return newGasCharge("OnChainReturnValue", 0, int64(dataSize)*pl.onChainReturnValuePerByte*pl.storageGasMulti)
|
||||
}
|
||||
|
||||
// OnMethodInvocation returns the gas used when invoking a method.
|
||||
@ -131,23 +133,23 @@ func (pl *pricelistV0) OnMethodInvocation(value abi.TokenAmount, methodNum abi.M
|
||||
|
||||
// OnIpldGet returns the gas used for storing an object
|
||||
func (pl *pricelistV0) OnIpldGet() GasCharge {
|
||||
return newGasCharge("OnIpldGet", pl.ipldGetBase, 0)
|
||||
return newGasCharge("OnIpldGet", pl.ipldGetBase, 0).WithVirtual(114617, 0)
|
||||
}
|
||||
|
||||
// OnIpldPut returns the gas used for storing an object
|
||||
func (pl *pricelistV0) OnIpldPut(dataSize int) GasCharge {
|
||||
return newGasCharge("OnIpldPut", pl.ipldPutBase, int64(dataSize)*pl.ipldPutPerByte).
|
||||
WithExtra(dataSize)
|
||||
return newGasCharge("OnIpldPut", pl.ipldPutBase, int64(dataSize)*pl.ipldPutPerByte*pl.storageGasMulti).
|
||||
WithExtra(dataSize).WithVirtual(400000, int64(dataSize)*1300)
|
||||
}
|
||||
|
||||
// OnCreateActor returns the gas used for creating an actor
|
||||
func (pl *pricelistV0) OnCreateActor() GasCharge {
|
||||
return newGasCharge("OnCreateActor", pl.createActorCompute, pl.createActorStorage)
|
||||
return newGasCharge("OnCreateActor", pl.createActorCompute, pl.createActorStorage*pl.storageGasMulti)
|
||||
}
|
||||
|
||||
// OnDeleteActor returns the gas used for deleting an actor
|
||||
func (pl *pricelistV0) OnDeleteActor() GasCharge {
|
||||
return newGasCharge("OnDeleteActor", 0, pl.deleteActor)
|
||||
return newGasCharge("OnDeleteActor", 0, pl.deleteActor*pl.storageGasMulti)
|
||||
}
|
||||
|
||||
// OnVerifySignature
|
||||
@ -207,6 +209,7 @@ func (pl *pricelistV0) OnVerifyPost(info proof2.WindowPoStVerifyInfo) GasCharge
|
||||
}
|
||||
|
||||
return newGasCharge("OnVerifyPost", gasUsed, 0).
|
||||
WithVirtual(117680921+43780*int64(len(info.ChallengedSectors)), 0).
|
||||
WithExtra(map[string]interface{}{
|
||||
"type": sectorSize,
|
||||
"size": len(info.ChallengedSectors),
|
||||
|
@ -6,6 +6,8 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
@ -173,9 +175,14 @@ func (*ActorRegistry) transform(instance invokee) (nativeCode, error) {
|
||||
paramT := meth.Type().In(1).Elem()
|
||||
param := reflect.New(paramT)
|
||||
|
||||
rt := in[0].Interface().(*Runtime)
|
||||
inBytes := in[1].Interface().([]byte)
|
||||
if err := DecodeParams(inBytes, param.Interface()); err != nil {
|
||||
aerr := aerrors.Absorb(err, 1, "failed to decode parameters")
|
||||
ec := exitcode.ErrSerialization
|
||||
if rt.NetworkVersion() < network.Version7 {
|
||||
ec = 1
|
||||
}
|
||||
aerr := aerrors.Absorb(err, ec, "failed to decode parameters")
|
||||
return []reflect.Value{
|
||||
reflect.ValueOf([]byte{}),
|
||||
// Below is a hack, fixed in Go 1.13
|
||||
@ -183,7 +190,6 @@ func (*ActorRegistry) transform(instance invokee) (nativeCode, error) {
|
||||
reflect.ValueOf(&aerr).Elem(),
|
||||
}
|
||||
}
|
||||
rt := in[0].Interface().(*Runtime)
|
||||
rval, aerror := rt.shimCall(func() interface{} {
|
||||
ret := meth.Call([]reflect.Value{
|
||||
reflect.ValueOf(rt),
|
||||
|
@ -1,10 +1,13 @@
|
||||
package vm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
"github.com/stretchr/testify/assert"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
@ -105,10 +108,27 @@ func TestInvokerBasic(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
_, aerr := code[1](&Runtime{}, []byte{99})
|
||||
if aerrors.IsFatal(aerr) {
|
||||
t.Fatal("err should not be fatal")
|
||||
{
|
||||
_, aerr := code[1](&Runtime{
|
||||
vm: &VM{ntwkVersion: func(ctx context.Context, epoch abi.ChainEpoch) network.Version {
|
||||
return network.Version0
|
||||
}},
|
||||
}, []byte{99})
|
||||
if aerrors.IsFatal(aerr) {
|
||||
t.Fatal("err should not be fatal")
|
||||
}
|
||||
assert.Equal(t, exitcode.ExitCode(1), aerrors.RetCode(aerr), "return code should be 1")
|
||||
}
|
||||
assert.Equal(t, exitcode.ExitCode(1), aerrors.RetCode(aerr), "return code should be 1")
|
||||
|
||||
{
|
||||
_, aerr := code[1](&Runtime{
|
||||
vm: &VM{ntwkVersion: func(ctx context.Context, epoch abi.ChainEpoch) network.Version {
|
||||
return network.Version7
|
||||
}},
|
||||
}, []byte{99})
|
||||
if aerrors.IsFatal(aerr) {
|
||||
t.Fatal("err should not be fatal")
|
||||
}
|
||||
assert.Equal(t, exitcode.ErrSerialization, aerrors.RetCode(aerr), "return code should be %s", 1)
|
||||
}
|
||||
}
|
||||
|
@ -244,20 +244,23 @@ func (rt *Runtime) NewActorAddress() address.Address {
|
||||
return addr
|
||||
}
|
||||
|
||||
func (rt *Runtime) CreateActor(codeID cid.Cid, address address.Address) {
|
||||
func (rt *Runtime) CreateActor(codeID cid.Cid, addr address.Address) {
|
||||
if addr == address.Undef && rt.NetworkVersion() >= network.Version7 {
|
||||
rt.Abortf(exitcode.SysErrorIllegalArgument, "CreateActor with Undef address")
|
||||
}
|
||||
act, aerr := rt.vm.areg.Create(codeID, rt)
|
||||
if aerr != nil {
|
||||
rt.Abortf(aerr.RetCode(), aerr.Error())
|
||||
}
|
||||
|
||||
_, err := rt.state.GetActor(address)
|
||||
_, err := rt.state.GetActor(addr)
|
||||
if err == nil {
|
||||
rt.Abortf(exitcode.SysErrorIllegalArgument, "Actor address already exists")
|
||||
}
|
||||
|
||||
rt.chargeGas(rt.Pricelist().OnCreateActor())
|
||||
|
||||
err = rt.state.SetActor(address, act)
|
||||
err = rt.state.SetActor(addr, act)
|
||||
if err != nil {
|
||||
panic(aerrors.Fatalf("creating actor entry: %v", err))
|
||||
}
|
||||
@ -266,7 +269,7 @@ func (rt *Runtime) CreateActor(codeID cid.Cid, address address.Address) {
|
||||
|
||||
// DeleteActor deletes the executing actor from the state tree, transferring
|
||||
// any balance to beneficiary.
|
||||
// Aborts if the beneficiary does not exist.
|
||||
// Aborts if the beneficiary does not exist or is the calling actor.
|
||||
// May only be called by the actor itself.
|
||||
func (rt *Runtime) DeleteActor(beneficiary address.Address) {
|
||||
rt.chargeGas(rt.Pricelist().OnDeleteActor())
|
||||
@ -278,6 +281,19 @@ func (rt *Runtime) DeleteActor(beneficiary address.Address) {
|
||||
panic(aerrors.Fatalf("failed to get actor: %s", err))
|
||||
}
|
||||
if !act.Balance.IsZero() {
|
||||
// TODO: Should be safe to drop the version-check,
|
||||
// since only the paych actor called this pre-version 7, but let's leave it for now
|
||||
if rt.NetworkVersion() >= network.Version7 {
|
||||
beneficiaryId, found := rt.ResolveAddress(beneficiary)
|
||||
if !found {
|
||||
rt.Abortf(exitcode.SysErrorIllegalArgument, "beneficiary doesn't exist")
|
||||
}
|
||||
|
||||
if beneficiaryId == rt.Receiver() {
|
||||
rt.Abortf(exitcode.SysErrorIllegalArgument, "benefactor cannot be beneficiary")
|
||||
}
|
||||
}
|
||||
|
||||
// Transfer the executing actor's balance to the beneficiary
|
||||
if err := rt.vm.transfer(rt.Receiver(), beneficiary, act.Balance); err != nil {
|
||||
panic(aerrors.Fatalf("failed to transfer balance to beneficiary actor: %s", err))
|
||||
@ -533,12 +549,19 @@ func (rt *Runtime) chargeGasInternal(gas GasCharge, skip int) aerrors.ActorError
|
||||
ComputeGas: gas.ComputeGas,
|
||||
StorageGas: gas.StorageGas,
|
||||
|
||||
TotalVirtualGas: gas.VirtualCompute*GasComputeMulti + gas.VirtualStorage*GasStorageMulti,
|
||||
VirtualComputeGas: gas.VirtualCompute,
|
||||
VirtualStorageGas: gas.VirtualStorage,
|
||||
|
||||
Callers: callers[:cout],
|
||||
}
|
||||
if gasTrace.VirtualStorageGas == 0 {
|
||||
gasTrace.VirtualStorageGas = gasTrace.StorageGas
|
||||
}
|
||||
if gasTrace.VirtualComputeGas == 0 {
|
||||
gasTrace.VirtualComputeGas = gasTrace.ComputeGas
|
||||
}
|
||||
gasTrace.TotalVirtualGas = gasTrace.VirtualComputeGas + gasTrace.VirtualStorageGas
|
||||
|
||||
rt.executionTrace.GasCharges = append(rt.executionTrace.GasCharges, &gasTrace)
|
||||
rt.lastGasChargeTime = now
|
||||
rt.lastGasCharge = &gasTrace
|
||||
@ -546,9 +569,10 @@ func (rt *Runtime) chargeGasInternal(gas GasCharge, skip int) aerrors.ActorError
|
||||
|
||||
// overflow safe
|
||||
if rt.gasUsed > rt.gasAvailable-toUse {
|
||||
gasUsed := rt.gasUsed
|
||||
rt.gasUsed = rt.gasAvailable
|
||||
return aerrors.Newf(exitcode.SysErrOutOfGas, "not enough gas: used=%d, available=%d",
|
||||
rt.gasUsed, rt.gasAvailable)
|
||||
return aerrors.Newf(exitcode.SysErrOutOfGas, "not enough gas: used=%d, available=%d, use=%d",
|
||||
gasUsed, rt.gasAvailable, toUse)
|
||||
}
|
||||
rt.gasUsed += toUse
|
||||
return nil
|
||||
|
@ -7,6 +7,10 @@ import (
|
||||
goruntime "runtime"
|
||||
"sync"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/ipfs/go-cid"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
@ -40,7 +44,9 @@ func Syscalls(verifier ffiwrapper.Verifier) SyscallBuilder {
|
||||
return func(ctx context.Context, rt *Runtime) runtime2.Syscalls {
|
||||
|
||||
return &syscallShim{
|
||||
ctx: ctx,
|
||||
ctx: ctx,
|
||||
epoch: rt.CurrEpoch(),
|
||||
networkVersion: rt.NetworkVersion(),
|
||||
|
||||
actor: rt.Receiver(),
|
||||
cstate: rt.state,
|
||||
@ -55,11 +61,13 @@ func Syscalls(verifier ffiwrapper.Verifier) SyscallBuilder {
|
||||
type syscallShim struct {
|
||||
ctx context.Context
|
||||
|
||||
lbState LookbackStateGetter
|
||||
actor address.Address
|
||||
cstate *state.StateTree
|
||||
cst cbor.IpldStore
|
||||
verifier ffiwrapper.Verifier
|
||||
epoch abi.ChainEpoch
|
||||
networkVersion network.Version
|
||||
lbState LookbackStateGetter
|
||||
actor address.Address
|
||||
cstate *state.StateTree
|
||||
cst cbor.IpldStore
|
||||
verifier ffiwrapper.Verifier
|
||||
}
|
||||
|
||||
func (ss *syscallShim) ComputeUnsealedSectorCID(st abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) {
|
||||
@ -202,6 +210,10 @@ func (ss *syscallShim) VerifyBlockSig(blk *types.BlockHeader) error {
|
||||
}
|
||||
|
||||
func (ss *syscallShim) workerKeyAtLookback(height abi.ChainEpoch) (address.Address, error) {
|
||||
if ss.networkVersion >= network.Version7 && height < ss.epoch-policy.ChainFinality {
|
||||
return address.Undef, xerrors.Errorf("cannot get worker key (currEpoch %d, height %d)", ss.epoch, height)
|
||||
}
|
||||
|
||||
lbState, err := ss.lbState(ss.ctx, height)
|
||||
if err != nil {
|
||||
return address.Undef, err
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
"github.com/filecoin-project/lotus/metrics"
|
||||
|
||||
block "github.com/ipfs/go-block-format"
|
||||
cid "github.com/ipfs/go-cid"
|
||||
@ -16,6 +17,7 @@ import (
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"go.opencensus.io/stats"
|
||||
"go.opencensus.io/trace"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
@ -70,6 +72,7 @@ func ResolveToKeyAddr(state types.StateTree, cst cbor.IpldStore, addr address.Ad
|
||||
}
|
||||
|
||||
var _ cbor.IpldBlockstore = (*gasChargingBlocks)(nil)
|
||||
var _ blockstore.Viewer = (*gasChargingBlocks)(nil)
|
||||
|
||||
type gasChargingBlocks struct {
|
||||
chargeGas func(GasCharge)
|
||||
@ -77,6 +80,24 @@ type gasChargingBlocks struct {
|
||||
under cbor.IpldBlockstore
|
||||
}
|
||||
|
||||
func (bs *gasChargingBlocks) View(c cid.Cid, cb func([]byte) error) error {
|
||||
if v, ok := bs.under.(blockstore.Viewer); ok {
|
||||
bs.chargeGas(bs.pricelist.OnIpldGet())
|
||||
return v.View(c, func(b []byte) error {
|
||||
// we have successfully retrieved the value; charge for it, even if the user-provided function fails.
|
||||
bs.chargeGas(newGasCharge("OnIpldViewEnd", 0, 0).WithExtra(len(b)))
|
||||
bs.chargeGas(gasOnActorExec)
|
||||
return cb(b)
|
||||
})
|
||||
}
|
||||
// the underlying blockstore doesn't implement the viewer interface, fall back to normal Get behaviour.
|
||||
blk, err := bs.Get(c)
|
||||
if err == nil && blk != nil {
|
||||
return cb(blk.RawData())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (bs *gasChargingBlocks) Get(c cid.Cid) (block.Block, error) {
|
||||
bs.chargeGas(bs.pricelist.OnIpldGet())
|
||||
blk, err := bs.under.Get(c)
|
||||
@ -119,6 +140,10 @@ func (vm *VM) makeRuntime(ctx context.Context, msg *types.Message, parent *Runti
|
||||
}
|
||||
|
||||
if parent != nil {
|
||||
// TODO: The version check here should be unnecessary, but we can wait to take it out
|
||||
if !parent.allowInternal && rt.NetworkVersion() >= network.Version7 {
|
||||
rt.Abortf(exitcode.SysErrForbidden, "internal calls currently disabled")
|
||||
}
|
||||
rt.gasUsed = parent.gasUsed
|
||||
rt.origin = parent.origin
|
||||
rt.originNonce = parent.originNonce
|
||||
@ -130,10 +155,10 @@ func (vm *VM) makeRuntime(ctx context.Context, msg *types.Message, parent *Runti
|
||||
rt.Abortf(exitcode.SysErrForbidden, "message execution exceeds call depth")
|
||||
}
|
||||
|
||||
rt.cst = &cbor.BasicIpldStore{
|
||||
Blocks: &gasChargingBlocks{rt.chargeGasFunc(2), rt.pricelist, vm.cst.Blocks},
|
||||
Atlas: vm.cst.Atlas,
|
||||
}
|
||||
cbb := &gasChargingBlocks{rt.chargeGasFunc(2), rt.pricelist, vm.cst.Blocks}
|
||||
cst := cbor.NewCborStore(cbb)
|
||||
cst.Atlas = vm.cst.Atlas // associate the atlas.
|
||||
rt.cst = cst
|
||||
|
||||
vmm := *msg
|
||||
resF, ok := rt.ResolveAddress(msg.From)
|
||||
@ -583,6 +608,8 @@ func (vm *VM) ActorBalance(addr address.Address) (types.BigInt, aerrors.ActorErr
|
||||
return act.Balance, nil
|
||||
}
|
||||
|
||||
type vmFlushKey struct{}
|
||||
|
||||
func (vm *VM) Flush(ctx context.Context) (cid.Cid, error) {
|
||||
_, span := trace.StartSpan(ctx, "vm.Flush")
|
||||
defer span.End()
|
||||
@ -595,7 +622,7 @@ func (vm *VM) Flush(ctx context.Context) (cid.Cid, error) {
|
||||
return cid.Undef, xerrors.Errorf("flushing vm: %w", err)
|
||||
}
|
||||
|
||||
if err := Copy(ctx, from, to, root); err != nil {
|
||||
if err := Copy(context.WithValue(ctx, vmFlushKey{}, true), from, to, root); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("copying tree: %w", err)
|
||||
}
|
||||
|
||||
@ -652,21 +679,48 @@ func linksForObj(blk block.Block, cb func(cid.Cid)) error {
|
||||
func Copy(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid) error {
|
||||
ctx, span := trace.StartSpan(ctx, "vm.Copy") // nolint
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
|
||||
var numBlocks int
|
||||
var totalCopySize int
|
||||
|
||||
var batch []block.Block
|
||||
const batchSize = 128
|
||||
const bufCount = 3
|
||||
freeBufs := make(chan []block.Block, bufCount)
|
||||
toFlush := make(chan []block.Block, bufCount)
|
||||
for i := 0; i < bufCount; i++ {
|
||||
freeBufs <- make([]block.Block, 0, batchSize)
|
||||
}
|
||||
|
||||
errFlushChan := make(chan error)
|
||||
|
||||
go func() {
|
||||
for b := range toFlush {
|
||||
if err := to.PutMany(b); err != nil {
|
||||
close(freeBufs)
|
||||
errFlushChan <- xerrors.Errorf("batch put in copy: %w", err)
|
||||
return
|
||||
}
|
||||
freeBufs <- b[:0]
|
||||
}
|
||||
close(errFlushChan)
|
||||
close(freeBufs)
|
||||
}()
|
||||
|
||||
var batch = <-freeBufs
|
||||
batchCp := func(blk block.Block) error {
|
||||
numBlocks++
|
||||
totalCopySize += len(blk.RawData())
|
||||
|
||||
batch = append(batch, blk)
|
||||
if len(batch) > 100 {
|
||||
if err := to.PutMany(batch); err != nil {
|
||||
return xerrors.Errorf("batch put in copy: %w", err)
|
||||
|
||||
if len(batch) >= batchSize {
|
||||
toFlush <- batch
|
||||
var ok bool
|
||||
batch, ok = <-freeBufs
|
||||
if !ok {
|
||||
return <-errFlushChan
|
||||
}
|
||||
batch = batch[:0]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -676,15 +730,22 @@ func Copy(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid) err
|
||||
}
|
||||
|
||||
if len(batch) > 0 {
|
||||
if err := to.PutMany(batch); err != nil {
|
||||
return xerrors.Errorf("batch put in copy: %w", err)
|
||||
}
|
||||
toFlush <- batch
|
||||
}
|
||||
close(toFlush) // close the toFlush triggering the loop to end
|
||||
err := <-errFlushChan // get error out or get nil if it was closed
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
span.AddAttributes(
|
||||
trace.Int64Attribute("numBlocks", int64(numBlocks)),
|
||||
trace.Int64Attribute("copySize", int64(totalCopySize)),
|
||||
)
|
||||
if yes, ok := ctx.Value(vmFlushKey{}).(bool); yes && ok {
|
||||
took := metrics.SinceInMilliseconds(start)
|
||||
stats.Record(ctx, metrics.VMFlushCopyCount.M(int64(numBlocks)), metrics.VMFlushCopyDuration.M(took))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -90,7 +90,7 @@ func (m MultiWallet) WalletHas(ctx context.Context, address address.Address) (bo
|
||||
}
|
||||
|
||||
func (m MultiWallet) WalletList(ctx context.Context) ([]address.Address, error) {
|
||||
var out []address.Address
|
||||
out := make([]address.Address, 0)
|
||||
seen := map[address.Address]struct{}{}
|
||||
|
||||
ws := nonNil(m.Remote, m.Ledger, m.Local)
|
||||
|
@ -305,6 +305,18 @@ func (w *LocalWallet) WalletDelete(ctx context.Context, addr address.Address) er
|
||||
|
||||
delete(w.keys, addr)
|
||||
|
||||
def, err := w.GetDefault()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting default address: %w", err)
|
||||
}
|
||||
|
||||
if def == addr {
|
||||
err = w.SetDefault(address.Undef)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("unsetting default address: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
30
cli/chain.go
30
cli/chain.go
@ -3,6 +3,7 @@ package cli
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@ -1246,14 +1247,19 @@ var chainDecodeCmd = &cli.Command{
|
||||
}
|
||||
|
||||
var chainDecodeParamsCmd = &cli.Command{
|
||||
Name: "params",
|
||||
Usage: "Decode message params",
|
||||
Name: "params",
|
||||
Usage: "Decode message params",
|
||||
ArgsUsage: "[toAddr method params]",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "tipset",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "encoding",
|
||||
Value: "base64",
|
||||
Usage: "specify input encoding to parse",
|
||||
},
|
||||
},
|
||||
ArgsUsage: "[toAddr method hexParams]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
@ -1276,11 +1282,21 @@ var chainDecodeParamsCmd = &cli.Command{
|
||||
return xerrors.Errorf("parsing method id: %w", err)
|
||||
}
|
||||
|
||||
params, err := hex.DecodeString(cctx.Args().Get(2))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parsing hex params: %w", err)
|
||||
var params []byte
|
||||
switch cctx.String("encoding") {
|
||||
case "base64":
|
||||
params, err = base64.StdEncoding.DecodeString(cctx.Args().Get(2))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("decoding base64 value: %w", err)
|
||||
}
|
||||
case "hex":
|
||||
params, err = hex.DecodeString(cctx.Args().Get(2))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("decoding hex value: %w", err)
|
||||
}
|
||||
default:
|
||||
return xerrors.Errorf("unrecognized encoding: %s", cctx.String("encoding"))
|
||||
}
|
||||
|
||||
ts, err := LoadTipSet(ctx, cctx, api)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@ -81,6 +82,7 @@ var clientCmd = &cli.Command{
|
||||
WithCategory("storage", clientListDeals),
|
||||
WithCategory("storage", clientGetDealCmd),
|
||||
WithCategory("storage", clientListAsksCmd),
|
||||
WithCategory("storage", clientDealStatsCmd),
|
||||
WithCategory("data", clientImportCmd),
|
||||
WithCategory("data", clientDropCmd),
|
||||
WithCategory("data", clientLocalCmd),
|
||||
@ -1112,6 +1114,80 @@ var clientRetrieveCmd = &cli.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var clientDealStatsCmd = &cli.Command{
|
||||
Name: "deal-stats",
|
||||
Usage: "Print statistics about local storage deals",
|
||||
Flags: []cli.Flag{
|
||||
&cli.DurationFlag{
|
||||
Name: "newer-than",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
localDeals, err := api.ClientListDeals(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var totalSize uint64
|
||||
byState := map[storagemarket.StorageDealStatus][]uint64{}
|
||||
for _, deal := range localDeals {
|
||||
if cctx.IsSet("newer-than") {
|
||||
if time.Now().Sub(deal.CreationTime) > cctx.Duration("newer-than") {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
totalSize += deal.Size
|
||||
byState[deal.State] = append(byState[deal.State], deal.Size)
|
||||
}
|
||||
|
||||
fmt.Printf("Total: %d deals, %s\n", len(localDeals), types.SizeStr(types.NewInt(totalSize)))
|
||||
|
||||
type stateStat struct {
|
||||
state storagemarket.StorageDealStatus
|
||||
count int
|
||||
bytes uint64
|
||||
}
|
||||
|
||||
stateStats := make([]stateStat, 0, len(byState))
|
||||
for state, deals := range byState {
|
||||
if state == storagemarket.StorageDealActive {
|
||||
state = math.MaxUint64 // for sort
|
||||
}
|
||||
|
||||
st := stateStat{
|
||||
state: state,
|
||||
count: len(deals),
|
||||
}
|
||||
for _, b := range deals {
|
||||
st.bytes += b
|
||||
}
|
||||
|
||||
stateStats = append(stateStats, st)
|
||||
}
|
||||
|
||||
sort.Slice(stateStats, func(i, j int) bool {
|
||||
return int64(stateStats[i].state) < int64(stateStats[j].state)
|
||||
})
|
||||
|
||||
for _, st := range stateStats {
|
||||
if st.state == math.MaxUint64 {
|
||||
st.state = storagemarket.StorageDealActive
|
||||
}
|
||||
fmt.Printf("%s: %d deals, %s\n", storagemarket.DealStates[st.state], st.count, types.SizeStr(types.NewInt(st.bytes)))
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var clientListAsksCmd = &cli.Command{
|
||||
Name: "list-asks",
|
||||
Usage: "List asks for top miners",
|
||||
|
@ -119,7 +119,7 @@ var stateMinerInfo = &cli.Command{
|
||||
}
|
||||
|
||||
fmt.Printf("PeerID:\t%s\n", mi.PeerId)
|
||||
fmt.Printf("Multiaddrs: \t")
|
||||
fmt.Printf("Multiaddrs:\t")
|
||||
for _, addr := range mi.Multiaddrs {
|
||||
a, err := multiaddr.NewMultiaddrBytes(addr)
|
||||
if err != nil {
|
||||
@ -127,6 +127,7 @@ var stateMinerInfo = &cli.Command{
|
||||
}
|
||||
fmt.Printf("%s ", a)
|
||||
}
|
||||
fmt.Printf("Consensus Fault End:\t%d\n", mi.ConsensusFaultElapsed)
|
||||
|
||||
fmt.Printf("SectorSize:\t%s (%d)\n", types.SizeStr(types.NewInt(uint64(mi.SectorSize))), mi.SectorSize)
|
||||
pow, err := api.StateMinerPower(ctx, addr, ts.Key())
|
||||
|
18
cli/sync.go
18
cli/sync.go
@ -45,8 +45,8 @@ var syncStatusCmd = &cli.Command{
|
||||
}
|
||||
|
||||
fmt.Println("sync status:")
|
||||
for i, ss := range state.ActiveSyncs {
|
||||
fmt.Printf("worker %d:\n", i)
|
||||
for _, ss := range state.ActiveSyncs {
|
||||
fmt.Printf("worker %d:\n", ss.WorkerID)
|
||||
var base, target []cid.Cid
|
||||
var heightDiff int64
|
||||
var theight abi.ChainEpoch
|
||||
@ -263,12 +263,17 @@ func SyncWait(ctx context.Context, napi api.FullNode, watch bool) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(state.ActiveSyncs) == 0 {
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
head, err := napi.ChainHead(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
working := 0
|
||||
working := -1
|
||||
for i, ss := range state.ActiveSyncs {
|
||||
switch ss.Stage {
|
||||
case api.StageSyncComplete:
|
||||
@ -279,7 +284,12 @@ func SyncWait(ctx context.Context, napi api.FullNode, watch bool) error {
|
||||
}
|
||||
}
|
||||
|
||||
if working == -1 {
|
||||
working = len(state.ActiveSyncs) - 1
|
||||
}
|
||||
|
||||
ss := state.ActiveSyncs[working]
|
||||
workerID := ss.WorkerID
|
||||
|
||||
var baseHeight abi.ChainEpoch
|
||||
var target []cid.Cid
|
||||
@ -302,7 +312,7 @@ func SyncWait(ctx context.Context, napi api.FullNode, watch bool) error {
|
||||
fmt.Print("\r\x1b[2K\x1b[A")
|
||||
}
|
||||
|
||||
fmt.Printf("Worker: %d; Base: %d; Target: %d (diff: %d)\n", working, baseHeight, theight, heightDiff)
|
||||
fmt.Printf("Worker: %d; Base: %d; Target: %d (diff: %d)\n", workerID, baseHeight, theight, heightDiff)
|
||||
fmt.Printf("State: %s; Current Epoch: %d; Todo: %d\n", ss.Stage, ss.Height, theight-ss.Height)
|
||||
lastLines = 2
|
||||
|
||||
|
@ -32,6 +32,7 @@ func StartOneNodeOneMiner(ctx context.Context, t *testing.T, blocktime time.Dura
|
||||
// Start mining blocks
|
||||
bm := test.NewBlockMiner(ctx, t, miner, blocktime)
|
||||
bm.MineBlocks()
|
||||
t.Cleanup(bm.Stop)
|
||||
|
||||
// Get the full node's wallet address
|
||||
fullAddr, err := full.WalletDefaultAddress(ctx)
|
||||
@ -67,6 +68,7 @@ func StartTwoNodesOneMiner(ctx context.Context, t *testing.T, blocktime time.Dur
|
||||
// Start mining blocks
|
||||
bm := test.NewBlockMiner(ctx, t, miner, blocktime)
|
||||
bm.MineBlocks()
|
||||
t.Cleanup(bm.Stop)
|
||||
|
||||
// Send some funds to register the second node
|
||||
fullNodeAddr2, err := fullNode2.WalletNew(ctx, types.KTSecp256k1)
|
||||
|
@ -16,21 +16,30 @@ import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
ocprom "contrib.go.opencensus.io/exporter/prometheus"
|
||||
"github.com/cockroachdb/pebble"
|
||||
"github.com/cockroachdb/pebble/bloom"
|
||||
"github.com/ipfs/go-cid"
|
||||
metricsi "github.com/ipfs/go-metrics-interface"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/vm"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
"github.com/filecoin-project/lotus/lib/blockstore"
|
||||
badgerbs "github.com/filecoin-project/lotus/lib/blockstore/badger"
|
||||
_ "github.com/filecoin-project/lotus/lib/sigs/bls"
|
||||
_ "github.com/filecoin-project/lotus/lib/sigs/secp"
|
||||
metricsprometheus "github.com/ipfs/go-metrics-prometheus"
|
||||
"github.com/ipld/go-car"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
metricsprometheus "github.com/ipfs/go-metrics-prometheus"
|
||||
"github.com/ipld/go-car"
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
|
||||
bdg "github.com/dgraph-io/badger/v2"
|
||||
@ -56,9 +65,25 @@ var importBenchCmd = &cli.Command{
|
||||
importAnalyzeCmd,
|
||||
},
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "start-tipset",
|
||||
Usage: "start validation at the given tipset key; in format cid1,cid2,cid3...",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "end-tipset",
|
||||
Usage: "halt validation at the given tipset key; in format cid1,cid2,cid3...",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "genesis-tipset",
|
||||
Usage: "genesis tipset key; in format cid1,cid2,cid3...",
|
||||
},
|
||||
&cli.Int64Flag{
|
||||
Name: "height",
|
||||
Usage: "halt validation after given height",
|
||||
Name: "start-height",
|
||||
Usage: "start validation at given height; beware that chain traversal by height is very slow",
|
||||
},
|
||||
&cli.Int64Flag{
|
||||
Name: "end-height",
|
||||
Usage: "halt validation after given height; beware that chain traversal by height is very slow",
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "batch-seal-verify-threads",
|
||||
@ -86,32 +111,52 @@ var importBenchCmd = &cli.Command{
|
||||
Name: "global-profile",
|
||||
Value: true,
|
||||
},
|
||||
&cli.Int64Flag{
|
||||
Name: "start-at",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "only-import",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "use-pebble",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "use-native-badger",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "car",
|
||||
Usage: "path to CAR file; required for import; on validation, either " +
|
||||
"a CAR path or the --head flag are required",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "head",
|
||||
Usage: "tipset key of the head, useful when benchmarking validation " +
|
||||
"on an existing chain store, where a CAR is not available; " +
|
||||
"if both --car and --head are provided, --head takes precedence " +
|
||||
"over the CAR root; the format is cid1,cid2,cid3...",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
metricsprometheus.Inject() //nolint:errcheck
|
||||
vm.BatchSealVerifyParallelism = cctx.Int("batch-seal-verify-threads")
|
||||
if !cctx.Args().Present() {
|
||||
fmt.Println("must pass car file of chain to benchmark importing")
|
||||
return nil
|
||||
}
|
||||
|
||||
cfi, err := os.Open(cctx.Args().First())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cfi.Close() //nolint:errcheck // read only file
|
||||
|
||||
go func() {
|
||||
http.Handle("/debug/metrics/prometheus", promhttp.Handler())
|
||||
// Prometheus globals are exposed as interfaces, but the prometheus
|
||||
// OpenCensus exporter expects a concrete *Registry. The concrete type of
|
||||
// the globals are actually *Registry, so we downcast them, staying
|
||||
// defensive in case things change under the hood.
|
||||
registry, ok := prometheus.DefaultRegisterer.(*prometheus.Registry)
|
||||
if !ok {
|
||||
log.Warnf("failed to export default prometheus registry; some metrics will be unavailable; unexpected type: %T", prometheus.DefaultRegisterer)
|
||||
return
|
||||
}
|
||||
exporter, err := ocprom.NewExporter(ocprom.Options{
|
||||
Registry: registry,
|
||||
Namespace: "lotus",
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("could not create the prometheus stats exporter: %v", err)
|
||||
}
|
||||
|
||||
http.Handle("/debug/metrics", exporter)
|
||||
|
||||
http.ListenAndServe("localhost:6060", nil) //nolint:errcheck
|
||||
}()
|
||||
|
||||
@ -126,17 +171,17 @@ var importBenchCmd = &cli.Command{
|
||||
tdir = tmp
|
||||
}
|
||||
|
||||
bdgOpt := badger.DefaultOptions
|
||||
bdgOpt.GcInterval = 0
|
||||
bdgOpt.Options = bdg.DefaultOptions("")
|
||||
bdgOpt.Options.SyncWrites = false
|
||||
bdgOpt.Options.Truncate = true
|
||||
bdgOpt.Options.DetectConflicts = false
|
||||
var (
|
||||
ds datastore.Batching
|
||||
bs blockstore.Blockstore
|
||||
err error
|
||||
)
|
||||
|
||||
var bds datastore.Batching
|
||||
if cctx.Bool("use-pebble") {
|
||||
switch {
|
||||
case cctx.Bool("use-pebble"):
|
||||
log.Info("using pebble")
|
||||
cache := 512
|
||||
bds, err = pebbleds.NewDatastore(tdir, &pebble.Options{
|
||||
ds, err = pebbleds.NewDatastore(tdir, &pebble.Options{
|
||||
// Pebble has a single combined cache area and the write
|
||||
// buffers are taken from this too. Assign all available
|
||||
// memory allowance for cache.
|
||||
@ -155,30 +200,53 @@ var importBenchCmd = &cli.Command{
|
||||
},
|
||||
Logger: log,
|
||||
})
|
||||
} else {
|
||||
bds, err = badger.NewDatastore(tdir, &bdgOpt)
|
||||
|
||||
case cctx.Bool("use-native-badger"):
|
||||
log.Info("using native badger")
|
||||
var opts badgerbs.Options
|
||||
if opts, err = repo.BadgerBlockstoreOptions(repo.BlockstoreChain, tdir, false); err != nil {
|
||||
return err
|
||||
}
|
||||
opts.SyncWrites = false
|
||||
bs, err = badgerbs.Open(opts)
|
||||
|
||||
default: // legacy badger via datastore.
|
||||
log.Info("using legacy badger")
|
||||
bdgOpt := badger.DefaultOptions
|
||||
bdgOpt.GcInterval = 0
|
||||
bdgOpt.Options = bdg.DefaultOptions("")
|
||||
bdgOpt.Options.SyncWrites = false
|
||||
bdgOpt.Options.Truncate = true
|
||||
bdgOpt.Options.DetectConflicts = false
|
||||
|
||||
ds, err = badger.NewDatastore(tdir, &bdgOpt)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer bds.Close() //nolint:errcheck
|
||||
|
||||
bds = measure.New("dsbench", bds)
|
||||
if ds != nil {
|
||||
ds = measure.New("dsbench", ds)
|
||||
defer ds.Close() //nolint:errcheck
|
||||
bs = blockstore.NewBlockstore(ds)
|
||||
}
|
||||
|
||||
bs := blockstore.NewBlockstore(bds)
|
||||
if c, ok := bs.(io.Closer); ok {
|
||||
defer c.Close() //nolint:errcheck
|
||||
}
|
||||
|
||||
ctx := metricsi.CtxScope(context.Background(), "lotus")
|
||||
cacheOpts := blockstore.DefaultCacheOpts()
|
||||
cacheOpts.HasBloomFilterSize = 0
|
||||
|
||||
cbs, err := blockstore.CachedBlockstore(context.TODO(), bs, cacheOpts)
|
||||
bs, err = blockstore.CachedBlockstore(ctx, bs, cacheOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bs = cbs
|
||||
ds := datastore.NewMapDatastore()
|
||||
|
||||
var verifier ffiwrapper.Verifier = ffiwrapper.ProofVerifier
|
||||
if cctx.IsSet("syscall-cache") {
|
||||
scds, err := badger.NewDatastore(cctx.String("syscall-cache"), &bdgOpt)
|
||||
scds, err := badger.NewDatastore(cctx.String("syscall-cache"), &badger.DefaultOptions)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("opening syscall-cache datastore: %w", err)
|
||||
}
|
||||
@ -193,11 +261,223 @@ var importBenchCmd = &cli.Command{
|
||||
return nil
|
||||
}
|
||||
|
||||
cs := store.NewChainStore(bs, ds, vm.Syscalls(verifier), nil)
|
||||
metadataDs := datastore.NewMapDatastore()
|
||||
cs := store.NewChainStore(bs, bs, metadataDs, vm.Syscalls(verifier), nil)
|
||||
defer cs.Close() //nolint:errcheck
|
||||
|
||||
stm := stmgr.NewStateManager(cs)
|
||||
|
||||
startTime := time.Now()
|
||||
|
||||
// register a gauge that reports how long since the measurable
|
||||
// operation began.
|
||||
promauto.NewGaugeFunc(prometheus.GaugeOpts{
|
||||
Name: "lotus_bench_time_taken_secs",
|
||||
}, func() float64 {
|
||||
return time.Since(startTime).Seconds()
|
||||
})
|
||||
|
||||
defer func() {
|
||||
end := time.Now().Format(time.RFC3339)
|
||||
|
||||
resp, err := http.Get("http://localhost:6060/debug/metrics")
|
||||
if err != nil {
|
||||
log.Warnf("failed to scape prometheus: %s", err)
|
||||
}
|
||||
|
||||
metricsfi, err := os.Create("bench.metrics")
|
||||
if err != nil {
|
||||
log.Warnf("failed to write prometheus data: %s", err)
|
||||
}
|
||||
|
||||
_, _ = io.Copy(metricsfi, resp.Body) //nolint:errcheck
|
||||
_ = metricsfi.Close() //nolint:errcheck
|
||||
|
||||
writeProfile := func(name string) {
|
||||
if file, err := os.Create(fmt.Sprintf("%s.%s.%s.pprof", name, startTime.Format(time.RFC3339), end)); err == nil {
|
||||
if err := pprof.Lookup(name).WriteTo(file, 0); err != nil {
|
||||
log.Warnf("failed to write %s pprof: %s", name, err)
|
||||
}
|
||||
_ = file.Close()
|
||||
} else {
|
||||
log.Warnf("failed to create %s pprof file: %s", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
writeProfile("heap")
|
||||
writeProfile("allocs")
|
||||
}()
|
||||
|
||||
var carFile *os.File
|
||||
|
||||
// open the CAR file if one is provided.
|
||||
if path := cctx.String("car"); path != "" {
|
||||
var err error
|
||||
if carFile, err = os.Open(path); err != nil {
|
||||
return xerrors.Errorf("failed to open provided CAR file: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
var head *types.TipSet
|
||||
|
||||
// --- IMPORT ---
|
||||
if !cctx.Bool("no-import") {
|
||||
if cctx.Bool("global-profile") {
|
||||
prof, err := os.Create("bench.import.pprof")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer prof.Close() //nolint:errcheck
|
||||
|
||||
if err := pprof.StartCPUProfile(prof); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// import is NOT suppressed; do it.
|
||||
if carFile == nil { // a CAR is compulsory for the import.
|
||||
return fmt.Errorf("no CAR file provided for import")
|
||||
}
|
||||
|
||||
head, err = cs.Import(carFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pprof.StopCPUProfile()
|
||||
}
|
||||
|
||||
if cctx.Bool("only-import") {
|
||||
return nil
|
||||
}
|
||||
|
||||
// --- VALIDATION ---
|
||||
//
|
||||
// we are now preparing for the validation benchmark.
|
||||
// a HEAD needs to be set; --head takes precedence over the root
|
||||
// of the CAR, if both are provided.
|
||||
if h := cctx.String("head"); h != "" {
|
||||
cids, err := lcli.ParseTipSetString(h)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to parse head tipset key: %w", err)
|
||||
}
|
||||
|
||||
head, err = cs.LoadTipSet(types.NewTipSetKey(cids...))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if carFile != nil && head == nil {
|
||||
cr, err := car.NewCarReader(carFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
head, err = cs.LoadTipSet(types.NewTipSetKey(cr.Header.Roots...))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if h == "" && carFile == nil {
|
||||
return xerrors.Errorf("neither --car nor --head flags supplied")
|
||||
}
|
||||
|
||||
log.Infof("chain head is tipset: %s", head.Key())
|
||||
|
||||
var genesis *types.TipSet
|
||||
log.Infof("getting genesis block")
|
||||
if tsk := cctx.String("genesis-tipset"); tsk != "" {
|
||||
var cids []cid.Cid
|
||||
if cids, err = lcli.ParseTipSetString(tsk); err != nil {
|
||||
return xerrors.Errorf("failed to parse genesis tipset key: %w", err)
|
||||
}
|
||||
genesis, err = cs.LoadTipSet(types.NewTipSetKey(cids...))
|
||||
} else {
|
||||
log.Warnf("getting genesis by height; this will be slow; pass in the genesis tipset through --genesis-tipset")
|
||||
// fallback to the slow path of walking the chain.
|
||||
genesis, err = cs.GetTipsetByHeight(context.TODO(), 0, head, true)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = cs.SetGenesis(genesis.Blocks()[0]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Resolve the end tipset, falling back to head if not provided.
|
||||
end := head
|
||||
if tsk := cctx.String("end-tipset"); tsk != "" {
|
||||
var cids []cid.Cid
|
||||
if cids, err = lcli.ParseTipSetString(tsk); err != nil {
|
||||
return xerrors.Errorf("failed to end genesis tipset key: %w", err)
|
||||
}
|
||||
end, err = cs.LoadTipSet(types.NewTipSetKey(cids...))
|
||||
} else if h := cctx.Int64("end-height"); h != 0 {
|
||||
log.Infof("getting end tipset at height %d...", h)
|
||||
end, err = cs.GetTipsetByHeight(context.TODO(), abi.ChainEpoch(h), head, true)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Resolve the start tipset, if provided; otherwise, fallback to
|
||||
// height 1 for a start point.
|
||||
var (
|
||||
startEpoch = abi.ChainEpoch(1)
|
||||
start *types.TipSet
|
||||
)
|
||||
|
||||
if tsk := cctx.String("start-tipset"); tsk != "" {
|
||||
var cids []cid.Cid
|
||||
if cids, err = lcli.ParseTipSetString(tsk); err != nil {
|
||||
return xerrors.Errorf("failed to start genesis tipset key: %w", err)
|
||||
}
|
||||
start, err = cs.LoadTipSet(types.NewTipSetKey(cids...))
|
||||
} else if h := cctx.Int64("start-height"); h != 0 {
|
||||
log.Infof("getting start tipset at height %d...", h)
|
||||
// lookback from the end tipset (which falls back to head if not supplied).
|
||||
start, err = cs.GetTipsetByHeight(context.TODO(), abi.ChainEpoch(h), end, true)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if start != nil {
|
||||
startEpoch = start.Height()
|
||||
if err := cs.ForceHeadSilent(context.Background(), start); err != nil {
|
||||
// if err := cs.SetHead(start); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
inverseChain := append(make([]*types.TipSet, 0, end.Height()), end)
|
||||
for ts := end; ts.Height() > startEpoch; {
|
||||
if h := ts.Height(); h%100 == 0 {
|
||||
log.Infof("walking back the chain; loaded tipset at height %d...", h)
|
||||
}
|
||||
next, err := cs.LoadTipSet(ts.Parents())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
inverseChain = append(inverseChain, next)
|
||||
ts = next
|
||||
}
|
||||
|
||||
var enc *json.Encoder
|
||||
if cctx.Bool("export-traces") {
|
||||
ibj, err := os.Create("bench.json")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer ibj.Close() //nolint:errcheck
|
||||
|
||||
enc = json.NewEncoder(ibj)
|
||||
}
|
||||
|
||||
if cctx.Bool("global-profile") {
|
||||
prof, err := os.Create("import-bench.prof")
|
||||
prof, err := os.Create("bench.validation.pprof")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -208,84 +488,8 @@ var importBenchCmd = &cli.Command{
|
||||
}
|
||||
}
|
||||
|
||||
var head *types.TipSet
|
||||
if !cctx.Bool("no-import") {
|
||||
head, err = cs.Import(cfi)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
cr, err := car.NewCarReader(cfi)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
head, err = cs.LoadTipSet(types.NewTipSetKey(cr.Header.Roots...))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if cctx.Bool("only-import") {
|
||||
return nil
|
||||
}
|
||||
|
||||
gb, err := cs.GetTipsetByHeight(context.TODO(), 0, head, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = cs.SetGenesis(gb.Blocks()[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
startEpoch := abi.ChainEpoch(1)
|
||||
if cctx.IsSet("start-at") {
|
||||
startEpoch = abi.ChainEpoch(cctx.Int64("start-at"))
|
||||
start, err := cs.GetTipsetByHeight(context.TODO(), abi.ChainEpoch(cctx.Int64("start-at")), head, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = cs.SetHead(start)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if h := cctx.Int64("height"); h != 0 {
|
||||
tsh, err := cs.GetTipsetByHeight(context.TODO(), abi.ChainEpoch(h), head, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
head = tsh
|
||||
}
|
||||
|
||||
ts := head
|
||||
tschain := []*types.TipSet{ts}
|
||||
for ts.Height() > startEpoch {
|
||||
next, err := cs.LoadTipSet(ts.Parents())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tschain = append(tschain, next)
|
||||
ts = next
|
||||
}
|
||||
|
||||
var enc *json.Encoder
|
||||
if cctx.Bool("export-traces") {
|
||||
ibj, err := os.Create("import-bench.json")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer ibj.Close() //nolint:errcheck
|
||||
|
||||
enc = json.NewEncoder(ibj)
|
||||
}
|
||||
|
||||
for i := len(tschain) - 1; i >= 1; i-- {
|
||||
cur := tschain[i]
|
||||
for i := len(inverseChain) - 1; i >= 1; i-- {
|
||||
cur := inverseChain[i]
|
||||
start := time.Now()
|
||||
log.Infof("computing state (height: %d, ts=%s)", cur.Height(), cur.Cids())
|
||||
st, trace, err := stm.ExecutionTrace(context.TODO(), cur)
|
||||
@ -304,7 +508,7 @@ var importBenchCmd = &cli.Command{
|
||||
return xerrors.Errorf("failed to write out tipsetexec: %w", err)
|
||||
}
|
||||
}
|
||||
if tschain[i-1].ParentState() != st {
|
||||
if inverseChain[i-1].ParentState() != st {
|
||||
stripCallers(tse.Trace)
|
||||
lastTrace := tse.Trace
|
||||
d, err := json.MarshalIndent(lastTrace, "", " ")
|
||||
@ -320,23 +524,7 @@ var importBenchCmd = &cli.Command{
|
||||
|
||||
pprof.StopCPUProfile()
|
||||
|
||||
if true {
|
||||
resp, err := http.Get("http://localhost:6060/debug/metrics/prometheus")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
metricsfi, err := os.Create("import-bench.metrics")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
io.Copy(metricsfi, resp.Body) //nolint:errcheck
|
||||
metricsfi.Close() //nolint:errcheck
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -31,6 +31,7 @@ import (
|
||||
|
||||
lapi "github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/genesis"
|
||||
@ -240,15 +241,6 @@ var sealBenchCmd = &cli.Command{
|
||||
}
|
||||
sectorSize := abi.SectorSize(sectorSizeInt)
|
||||
|
||||
spt, err := ffiwrapper.SealProofTypeFromSectorSize(sectorSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg := &ffiwrapper.Config{
|
||||
SealProofType: spt,
|
||||
}
|
||||
|
||||
// Only fetch parameters if actually needed
|
||||
skipc2 := c.Bool("skip-commit2")
|
||||
if !skipc2 {
|
||||
@ -261,7 +253,7 @@ var sealBenchCmd = &cli.Command{
|
||||
Root: sbdir,
|
||||
}
|
||||
|
||||
sb, err := ffiwrapper.New(sbfs, cfg)
|
||||
sb, err := ffiwrapper.New(sbfs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -329,7 +321,7 @@ var sealBenchCmd = &cli.Command{
|
||||
|
||||
if !skipc2 {
|
||||
log.Info("generating winning post candidates")
|
||||
wipt, err := spt.RegisteredWinningPoStProof()
|
||||
wipt, err := spt(sectorSize).RegisteredWinningPoStProof()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -509,11 +501,13 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
|
||||
if numSectors%par.PreCommit1 != 0 {
|
||||
return nil, nil, fmt.Errorf("parallelism factor must cleanly divide numSectors")
|
||||
}
|
||||
|
||||
for i := abi.SectorNumber(0); i < abi.SectorNumber(numSectors); i++ {
|
||||
sid := abi.SectorID{
|
||||
Miner: mid,
|
||||
Number: i,
|
||||
for i := abi.SectorNumber(1); i <= abi.SectorNumber(numSectors); i++ {
|
||||
sid := storage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: mid,
|
||||
Number: i,
|
||||
},
|
||||
ProofType: spt(sectorSize),
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
@ -540,9 +534,13 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
|
||||
start := worker * sectorsPerWorker
|
||||
end := start + sectorsPerWorker
|
||||
for i := abi.SectorNumber(start); i < abi.SectorNumber(end); i++ {
|
||||
sid := abi.SectorID{
|
||||
Miner: mid,
|
||||
Number: i,
|
||||
ix := int(i - 1)
|
||||
sid := storage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: mid,
|
||||
Number: i,
|
||||
},
|
||||
ProofType: spt(sectorSize),
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
@ -570,8 +568,8 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
|
||||
precommit2 := time.Now()
|
||||
<-preCommit2Sema
|
||||
|
||||
sealedSectors[i] = saproof2.SectorInfo{
|
||||
SealProof: sb.SealProofType(),
|
||||
sealedSectors[ix] = saproof2.SectorInfo{
|
||||
SealProof: sid.ProofType,
|
||||
SectorNumber: i,
|
||||
SealedCID: cids.Sealed,
|
||||
}
|
||||
@ -625,7 +623,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
|
||||
svi := saproof2.SealVerifyInfo{
|
||||
SectorID: sid,
|
||||
SealedCID: cids.Sealed,
|
||||
SealProof: sb.SealProofType(),
|
||||
SealProof: sid.ProofType,
|
||||
Proof: proof,
|
||||
DealIDs: nil,
|
||||
Randomness: ticket,
|
||||
@ -742,24 +740,25 @@ var proveCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
spt, err := ffiwrapper.SealProofTypeFromSectorSize(abi.SectorSize(c2in.SectorSize))
|
||||
sb, err := ffiwrapper.New(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg := &ffiwrapper.Config{
|
||||
SealProofType: spt,
|
||||
}
|
||||
start := time.Now()
|
||||
|
||||
sb, err := ffiwrapper.New(nil, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
ref := storage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: abi.SectorNumber(c2in.SectorNum),
|
||||
},
|
||||
ProofType: spt(abi.SectorSize(c2in.SectorSize)),
|
||||
}
|
||||
|
||||
fmt.Printf("----\nstart proof computation\n")
|
||||
start := time.Now()
|
||||
|
||||
proof, err := sb.SealCommit2(context.TODO(), abi.SectorID{Miner: abi.ActorID(mid), Number: abi.SectorNumber(c2in.SectorNum)}, c2in.Phase1Out)
|
||||
proof, err := sb.SealCommit2(context.TODO(), ref, c2in.Phase1Out)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -783,3 +782,12 @@ func bps(sectorSize abi.SectorSize, sectorNum int, d time.Duration) string {
|
||||
bps := bdata.Div(bdata, big.NewInt(d.Nanoseconds()))
|
||||
return types.SizeStr(types.BigInt{Int: bps}) + "/s"
|
||||
}
|
||||
|
||||
func spt(ssize abi.SectorSize) abi.RegisteredSealProof {
|
||||
spt, err := miner.SealProofTypeFromSectorSize(ssize, build.NewestNetworkVersion)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return spt
|
||||
}
|
||||
|
@ -278,6 +278,7 @@ func startNodes(
|
||||
// Start mining blocks
|
||||
bm := test.NewBlockMiner(ctx, t, miner, blocktime)
|
||||
bm.MineBlocks()
|
||||
t.Cleanup(bm.Stop)
|
||||
|
||||
return &testNodes{lite: lite, full: full, miner: miner, closer: closer}
|
||||
}
|
||||
|
@ -42,7 +42,7 @@ var infoCmd = &cli.Command{
|
||||
if err != nil {
|
||||
return xerrors.Errorf("checking worker status: %w", err)
|
||||
}
|
||||
fmt.Printf("Enabled: %t", enabled)
|
||||
fmt.Printf("Enabled: %t\n", enabled)
|
||||
|
||||
info, err := api.Info(ctx)
|
||||
if err != nil {
|
||||
@ -64,7 +64,6 @@ var infoCmd = &cli.Command{
|
||||
fmt.Printf("%s:\n", path.ID)
|
||||
fmt.Printf("\tWeight: %d; Use: ", path.Weight)
|
||||
if path.CanSeal || path.CanStore {
|
||||
fmt.Printf("Weight: %d; Use: ", path.Weight)
|
||||
if path.CanSeal {
|
||||
fmt.Print("Seal ")
|
||||
}
|
||||
|
@ -32,7 +32,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||
"github.com/filecoin-project/lotus/lib/lotuslog"
|
||||
@ -356,11 +355,6 @@ var runCmd = &cli.Command{
|
||||
}
|
||||
|
||||
// Setup remote sector store
|
||||
spt, err := ffiwrapper.SealProofTypeFromSectorSize(ssize)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting proof type: %w", err)
|
||||
}
|
||||
|
||||
sminfo, err := lcli.GetAPIInfo(cctx, repo.StorageMiner)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("could not get api info: %w", err)
|
||||
@ -374,7 +368,6 @@ var runCmd = &cli.Command{
|
||||
|
||||
workerApi := &worker{
|
||||
LocalWorker: sectorstorage.NewLocalWorker(sectorstorage.WorkerConfig{
|
||||
SealProof: spt,
|
||||
TaskTypes: taskTypes,
|
||||
NoSwap: cctx.Bool("no-swap"),
|
||||
}, remote, localStore, nodeApi, nodeApi, wsts),
|
||||
|
@ -8,8 +8,6 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/docker/go-units"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/mitchellh/go-homedir"
|
||||
"github.com/urfave/cli/v2"
|
||||
@ -19,6 +17,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/cmd/lotus-seed/seed"
|
||||
"github.com/filecoin-project/lotus/genesis"
|
||||
@ -128,12 +127,12 @@ var preSealCmd = &cli.Command{
|
||||
}
|
||||
sectorSize := abi.SectorSize(sectorSizeInt)
|
||||
|
||||
rp, err := ffiwrapper.SealProofTypeFromSectorSize(sectorSize)
|
||||
spt, err := miner.SealProofTypeFromSectorSize(sectorSize, build.NewestNetworkVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gm, key, err := seed.PreSeal(maddr, rp, abi.SectorNumber(c.Uint64("sector-offset")), c.Int("num-sectors"), sbroot, []byte(c.String("ticket-preimage")), k, c.Bool("fake-sectors"))
|
||||
gm, key, err := seed.PreSeal(maddr, spt, abi.SectorNumber(c.Uint64("sector-offset")), c.Int("num-sectors"), sbroot, []byte(c.String("ticket-preimage")), k, c.Bool("fake-sectors"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/zerocomm"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
|
||||
|
||||
@ -42,10 +43,6 @@ func PreSeal(maddr address.Address, spt abi.RegisteredSealProof, offset abi.Sect
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
cfg := &ffiwrapper.Config{
|
||||
SealProofType: spt,
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(sbroot, 0775); err != nil { //nolint:gosec
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -56,7 +53,7 @@ func PreSeal(maddr address.Address, spt abi.RegisteredSealProof, offset abi.Sect
|
||||
Root: sbroot,
|
||||
}
|
||||
|
||||
sb, err := ffiwrapper.New(sbfs, cfg)
|
||||
sb, err := ffiwrapper.New(sbfs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -69,16 +66,17 @@ func PreSeal(maddr address.Address, spt abi.RegisteredSealProof, offset abi.Sect
|
||||
var sealedSectors []*genesis.PreSeal
|
||||
for i := 0; i < sectors; i++ {
|
||||
sid := abi.SectorID{Miner: abi.ActorID(mid), Number: next}
|
||||
ref := storage.SectorRef{ID: sid, ProofType: spt}
|
||||
next++
|
||||
|
||||
var preseal *genesis.PreSeal
|
||||
if !fakeSectors {
|
||||
preseal, err = presealSector(sb, sbfs, sid, spt, ssize, preimage)
|
||||
preseal, err = presealSector(sb, sbfs, ref, ssize, preimage)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
} else {
|
||||
preseal, err = presealSectorFake(sbfs, sid, spt, ssize)
|
||||
preseal, err = presealSectorFake(sbfs, ref, ssize)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -148,7 +146,7 @@ func PreSeal(maddr address.Address, spt abi.RegisteredSealProof, offset abi.Sect
|
||||
return miner, &minerAddr.KeyInfo, nil
|
||||
}
|
||||
|
||||
func presealSector(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, sid abi.SectorID, spt abi.RegisteredSealProof, ssize abi.SectorSize, preimage []byte) (*genesis.PreSeal, error) {
|
||||
func presealSector(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, sid storage.SectorRef, ssize abi.SectorSize, preimage []byte) (*genesis.PreSeal, error) {
|
||||
pi, err := sb.AddPiece(context.TODO(), sid, nil, abi.PaddedPieceSize(ssize).Unpadded(), rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -182,12 +180,12 @@ func presealSector(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, sid abi.Sector
|
||||
return &genesis.PreSeal{
|
||||
CommR: cids.Sealed,
|
||||
CommD: cids.Unsealed,
|
||||
SectorID: sid.Number,
|
||||
ProofType: spt,
|
||||
SectorID: sid.ID.Number,
|
||||
ProofType: sid.ProofType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func presealSectorFake(sbfs *basicfs.Provider, sid abi.SectorID, spt abi.RegisteredSealProof, ssize abi.SectorSize) (*genesis.PreSeal, error) {
|
||||
func presealSectorFake(sbfs *basicfs.Provider, sid storage.SectorRef, ssize abi.SectorSize) (*genesis.PreSeal, error) {
|
||||
paths, done, err := sbfs.AcquireSector(context.TODO(), sid, 0, storiface.FTSealed|storiface.FTCache, storiface.PathSealing)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("acquire unsealed sector: %w", err)
|
||||
@ -198,7 +196,7 @@ func presealSectorFake(sbfs *basicfs.Provider, sid abi.SectorID, spt abi.Registe
|
||||
return nil, xerrors.Errorf("mkdir cache: %w", err)
|
||||
}
|
||||
|
||||
commr, err := ffi.FauxRep(spt, paths.Cache, paths.Sealed)
|
||||
commr, err := ffi.FauxRep(sid.ProofType, paths.Cache, paths.Sealed)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("fauxrep: %w", err)
|
||||
}
|
||||
@ -206,13 +204,13 @@ func presealSectorFake(sbfs *basicfs.Provider, sid abi.SectorID, spt abi.Registe
|
||||
return &genesis.PreSeal{
|
||||
CommR: commr,
|
||||
CommD: zerocomm.ZeroPieceCommitment(abi.PaddedPieceSize(ssize).Unpadded()),
|
||||
SectorID: sid.Number,
|
||||
ProofType: spt,
|
||||
SectorID: sid.ID.Number,
|
||||
ProofType: sid.ProofType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func cleanupUnsealed(sbfs *basicfs.Provider, sid abi.SectorID) error {
|
||||
paths, done, err := sbfs.AcquireSector(context.TODO(), sid, storiface.FTUnsealed, storiface.FTNone, storiface.PathSealing)
|
||||
func cleanupUnsealed(sbfs *basicfs.Provider, ref storage.SectorRef) error {
|
||||
paths, done, err := sbfs.AcquireSector(context.TODO(), ref, storiface.FTUnsealed, storiface.FTNone, storiface.PathSealing)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -3,6 +3,7 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/gen/genesis"
|
||||
@ -10,6 +11,7 @@ import (
|
||||
_init "github.com/filecoin-project/lotus/chain/actors/builtin/init"
|
||||
|
||||
"github.com/docker/go-units"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/multisig"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
|
||||
@ -24,6 +26,7 @@ import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/state"
|
||||
@ -33,7 +36,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/vm"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/lib/blockstore"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
)
|
||||
|
||||
@ -168,19 +170,26 @@ var chainBalanceStateCmd = &cli.Command{
|
||||
|
||||
defer lkrepo.Close() //nolint:errcheck
|
||||
|
||||
ds, err := lkrepo.Datastore("/chain")
|
||||
bs, err := lkrepo.Blockstore(repo.BlockstoreChain)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to open blockstore: %w", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if c, ok := bs.(io.Closer); ok {
|
||||
if err := c.Close(); err != nil {
|
||||
log.Warnf("failed to close blockstore: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
mds, err := lkrepo.Datastore("/metadata")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bs := blockstore.NewBlockstore(ds)
|
||||
|
||||
cs := store.NewChainStore(bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), nil)
|
||||
cs := store.NewChainStore(bs, bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), nil)
|
||||
defer cs.Close() //nolint:errcheck
|
||||
|
||||
cst := cbor.NewCborStore(bs)
|
||||
store := adt.WrapStore(ctx, cst)
|
||||
@ -382,19 +391,26 @@ var chainPledgeCmd = &cli.Command{
|
||||
|
||||
defer lkrepo.Close() //nolint:errcheck
|
||||
|
||||
ds, err := lkrepo.Datastore("/chain")
|
||||
bs, err := lkrepo.Blockstore(repo.BlockstoreChain)
|
||||
if err != nil {
|
||||
return err
|
||||
return xerrors.Errorf("failed to open blockstore: %w", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if c, ok := bs.(io.Closer); ok {
|
||||
if err := c.Close(); err != nil {
|
||||
log.Warnf("failed to close blockstore: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
mds, err := lkrepo.Datastore("/metadata")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bs := blockstore.NewBlockstore(ds)
|
||||
|
||||
cs := store.NewChainStore(bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), nil)
|
||||
cs := store.NewChainStore(bs, bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), nil)
|
||||
defer cs.Close() //nolint:errcheck
|
||||
|
||||
cst := cbor.NewCborStore(bs)
|
||||
store := adt.WrapStore(ctx, cst)
|
||||
|
48
cmd/lotus-shed/cid.go
Normal file
48
cmd/lotus-shed/cid.go
Normal file
@ -0,0 +1,48 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
)
|
||||
|
||||
var cidCmd = &cli.Command{
|
||||
Name: "cid",
|
||||
Subcommands: cli.Commands{
|
||||
cidIdCmd,
|
||||
},
|
||||
}
|
||||
|
||||
var cidIdCmd = &cli.Command{
|
||||
Name: "id",
|
||||
Usage: "create identity CID from hex or base64 data",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if !cctx.Args().Present() {
|
||||
return fmt.Errorf("must specify data")
|
||||
}
|
||||
|
||||
dec, err := hex.DecodeString(cctx.Args().First())
|
||||
if err != nil {
|
||||
dec, err = base64.StdEncoding.DecodeString(cctx.Args().First())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
builder := cid.V1Builder{Codec: cid.Raw, MhType: mh.IDENTITY}
|
||||
|
||||
c, err := builder.Sum(dec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println(c)
|
||||
return nil
|
||||
},
|
||||
}
|
@ -8,10 +8,10 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/dgraph-io/badger/v2"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/ipfs/go-datastore"
|
||||
dsq "github.com/ipfs/go-datastore/query"
|
||||
badgerds "github.com/ipfs/go-ds-badger2"
|
||||
logging "github.com/ipfs/go-log"
|
||||
"github.com/mitchellh/go-homedir"
|
||||
"github.com/polydawn/refmt/cbor"
|
||||
@ -312,30 +312,41 @@ var datastoreRewriteCmd = &cli.Command{
|
||||
return xerrors.Errorf("cannot get toPath: %w", err)
|
||||
}
|
||||
|
||||
opts := repo.ChainBadgerOptions()
|
||||
opts.Options = opts.Options.WithSyncWrites(false)
|
||||
to, err := badgerds.NewDatastore(toPath, &opts)
|
||||
var (
|
||||
from *badger.DB
|
||||
to *badger.DB
|
||||
)
|
||||
|
||||
// open the destination (to) store.
|
||||
opts, err := repo.BadgerBlockstoreOptions(repo.BlockstoreChain, toPath, false)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("opennig 'to' datastore: %w", err)
|
||||
return xerrors.Errorf("failed to get badger options: %w", err)
|
||||
}
|
||||
opts.SyncWrites = false
|
||||
if to, err = badger.Open(opts.Options); err != nil {
|
||||
return xerrors.Errorf("opening 'to' badger store: %w", err)
|
||||
}
|
||||
|
||||
opts.Options = opts.Options.WithReadOnly(false)
|
||||
from, err := badgerds.NewDatastore(fromPath, &opts)
|
||||
// open the source (from) store.
|
||||
opts, err = repo.BadgerBlockstoreOptions(repo.BlockstoreChain, fromPath, true)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("opennig 'from' datastore: %w", err)
|
||||
return xerrors.Errorf("failed to get badger options: %w", err)
|
||||
}
|
||||
if from, err = badger.Open(opts.Options); err != nil {
|
||||
return xerrors.Errorf("opening 'from' datastore: %w", err)
|
||||
}
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
errCh := make(chan error)
|
||||
go func() {
|
||||
bw := bufio.NewWriterSize(pw, 64<<20)
|
||||
_, err := from.DB.Backup(bw, 0)
|
||||
_, err := from.Backup(bw, 0)
|
||||
_ = bw.Flush()
|
||||
_ = pw.CloseWithError(err)
|
||||
errCh <- err
|
||||
}()
|
||||
go func() {
|
||||
err := to.DB.Load(pr, 256)
|
||||
err := to.Load(pr, 256)
|
||||
errCh <- err
|
||||
}()
|
||||
|
||||
|
@ -3,16 +3,17 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
"github.com/filecoin-project/lotus/lib/blockstore"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
)
|
||||
|
||||
@ -71,19 +72,27 @@ var exportChainCmd = &cli.Command{
|
||||
|
||||
defer fi.Close() //nolint:errcheck
|
||||
|
||||
ds, err := lr.Datastore("/chain")
|
||||
bs, err := lr.Blockstore(repo.BlockstoreChain)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to open blockstore: %w", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if c, ok := bs.(io.Closer); ok {
|
||||
if err := c.Close(); err != nil {
|
||||
log.Warnf("failed to close blockstore: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
mds, err := lr.Datastore("/metadata")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bs := blockstore.NewBlockstore(ds)
|
||||
cs := store.NewChainStore(bs, bs, mds, nil, nil)
|
||||
defer cs.Close() //nolint:errcheck
|
||||
|
||||
cs := store.NewChainStore(bs, mds, nil, nil)
|
||||
if err := cs.Load(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -52,7 +52,8 @@ var genesisVerifyCmd = &cli.Command{
|
||||
}
|
||||
bs := blockstore.NewBlockstore(datastore.NewMapDatastore())
|
||||
|
||||
cs := store.NewChainStore(bs, datastore.NewMapDatastore(), nil, nil)
|
||||
cs := store.NewChainStore(bs, bs, datastore.NewMapDatastore(), nil, nil)
|
||||
defer cs.Close() //nolint:errcheck
|
||||
|
||||
cf := cctx.Args().Get(0)
|
||||
f, err := os.Open(cf)
|
||||
|
@ -12,7 +12,6 @@ import (
|
||||
"github.com/urfave/cli/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/lib/blockstore"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
)
|
||||
|
||||
@ -45,12 +44,18 @@ var importCarCmd = &cli.Command{
|
||||
return xerrors.Errorf("opening the car file: %w", err)
|
||||
}
|
||||
|
||||
ds, err := lr.Datastore("/chain")
|
||||
bs, err := lr.Blockstore(repo.BlockstoreChain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bs := blockstore.NewBlockstore(ds)
|
||||
defer func() {
|
||||
if c, ok := bs.(io.Closer); ok {
|
||||
if err := c.Close(); err != nil {
|
||||
log.Warnf("failed to close blockstore: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
cr, err := car.NewCarReader(f)
|
||||
if err != nil {
|
||||
@ -65,7 +70,7 @@ var importCarCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
fmt.Println()
|
||||
return ds.Close()
|
||||
return nil
|
||||
default:
|
||||
if err := f.Close(); err != nil {
|
||||
return err
|
||||
@ -108,12 +113,18 @@ var importObjectCmd = &cli.Command{
|
||||
}
|
||||
defer lr.Close() //nolint:errcheck
|
||||
|
||||
ds, err := lr.Datastore("/chain")
|
||||
bs, err := lr.Blockstore(repo.BlockstoreChain)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to open blockstore: %w", err)
|
||||
}
|
||||
|
||||
bs := blockstore.NewBlockstore(ds)
|
||||
defer func() {
|
||||
if c, ok := bs.(io.Closer); ok {
|
||||
if err := c.Close(); err != nil {
|
||||
log.Warnf("failed to close blockstore: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
c, err := cid.Decode(cctx.Args().Get(0))
|
||||
if err != nil {
|
||||
|
@ -47,6 +47,8 @@ func main() {
|
||||
sectorsCmd,
|
||||
msgCmd,
|
||||
electionCmd,
|
||||
rpcCmd,
|
||||
cidCmd,
|
||||
}
|
||||
|
||||
app := &cli.App{
|
||||
|
@ -3,20 +3,19 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/ipfs/bbloom"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/urfave/cli/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/vm"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/lib/blockstore"
|
||||
badgerbs "github.com/filecoin-project/lotus/lib/blockstore/badger"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
"github.com/ipfs/bbloom"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-datastore/query"
|
||||
dshelp "github.com/ipfs/go-ipfs-ds-help"
|
||||
"github.com/urfave/cli/v2"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
type cidSet interface {
|
||||
@ -132,12 +131,25 @@ var stateTreePruneCmd = &cli.Command{
|
||||
|
||||
defer lkrepo.Close() //nolint:errcheck
|
||||
|
||||
ds, err := lkrepo.Datastore("/chain")
|
||||
bs, err := lkrepo.Blockstore(repo.BlockstoreChain)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to open blockstore: %w", err)
|
||||
}
|
||||
|
||||
defer ds.Close() //nolint:errcheck
|
||||
defer func() {
|
||||
if c, ok := bs.(io.Closer); ok {
|
||||
if err := c.Close(); err != nil {
|
||||
log.Warnf("failed to close blockstore: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// After migrating to native blockstores, this has been made
|
||||
// database-specific.
|
||||
badgbs, ok := bs.(*badgerbs.Blockstore)
|
||||
if !ok {
|
||||
return fmt.Errorf("only badger blockstores are supported")
|
||||
}
|
||||
|
||||
mds, err := lkrepo.Datastore("/metadata")
|
||||
if err != nil {
|
||||
@ -145,24 +157,21 @@ var stateTreePruneCmd = &cli.Command{
|
||||
}
|
||||
defer mds.Close() //nolint:errcheck
|
||||
|
||||
const DiscardRatio = 0.2
|
||||
if cctx.Bool("only-ds-gc") {
|
||||
gcds, ok := ds.(datastore.GCDatastore)
|
||||
if ok {
|
||||
fmt.Println("running datastore gc....")
|
||||
for i := 0; i < cctx.Int("gc-count"); i++ {
|
||||
if err := gcds.CollectGarbage(); err != nil {
|
||||
return xerrors.Errorf("datastore GC failed: %w", err)
|
||||
}
|
||||
fmt.Println("running datastore gc....")
|
||||
for i := 0; i < cctx.Int("gc-count"); i++ {
|
||||
if err := badgbs.DB.RunValueLogGC(DiscardRatio); err != nil {
|
||||
return xerrors.Errorf("datastore GC failed: %w", err)
|
||||
}
|
||||
fmt.Println("gc complete!")
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("datastore doesnt support gc")
|
||||
fmt.Println("gc complete!")
|
||||
return nil
|
||||
}
|
||||
|
||||
bs := blockstore.NewBlockstore(ds)
|
||||
cs := store.NewChainStore(bs, bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), nil)
|
||||
defer cs.Close() //nolint:errcheck
|
||||
|
||||
cs := store.NewChainStore(bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), nil)
|
||||
if err := cs.Load(); err != nil {
|
||||
return fmt.Errorf("loading chainstore: %w", err)
|
||||
}
|
||||
@ -199,63 +208,30 @@ var stateTreePruneCmd = &cli.Command{
|
||||
return nil
|
||||
}
|
||||
|
||||
var b datastore.Batch
|
||||
var batchCount int
|
||||
b := badgbs.DB.NewWriteBatch()
|
||||
defer b.Cancel()
|
||||
|
||||
markForRemoval := func(c cid.Cid) error {
|
||||
if b == nil {
|
||||
nb, err := ds.Batch()
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening batch: %w", err)
|
||||
}
|
||||
|
||||
b = nb
|
||||
}
|
||||
batchCount++
|
||||
|
||||
if err := b.Delete(dshelp.MultihashToDsKey(c.Hash())); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if batchCount > 100 {
|
||||
if err := b.Commit(); err != nil {
|
||||
return xerrors.Errorf("failed to commit batch deletes: %w", err)
|
||||
}
|
||||
b = nil
|
||||
batchCount = 0
|
||||
}
|
||||
return nil
|
||||
return b.Delete(badgbs.StorageKey(nil, c))
|
||||
}
|
||||
|
||||
res, err := ds.Query(query.Query{KeysOnly: true})
|
||||
keys, err := bs.AllKeysChan(context.Background())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to query datastore: %w", err)
|
||||
return xerrors.Errorf("failed to query blockstore: %w", err)
|
||||
}
|
||||
|
||||
dupTo := cctx.Int("delete-up-to")
|
||||
|
||||
var deleteCount int
|
||||
var goodHits int
|
||||
for {
|
||||
v, ok := res.NextSync()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
|
||||
bk, err := dshelp.BinaryFromDsKey(datastore.RawKey(v.Key[len("/blocks"):]))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to parse key: %w", err)
|
||||
}
|
||||
|
||||
if goodSet.HasRaw(bk) {
|
||||
for k := range keys {
|
||||
if goodSet.HasRaw(k.Bytes()) {
|
||||
goodHits++
|
||||
continue
|
||||
}
|
||||
|
||||
nc := cid.NewCidV1(cid.Raw, bk)
|
||||
|
||||
deleteCount++
|
||||
if err := markForRemoval(nc); err != nil {
|
||||
return fmt.Errorf("failed to remove cid %s: %w", nc, err)
|
||||
if err := markForRemoval(k); err != nil {
|
||||
return fmt.Errorf("failed to remove cid %s: %w", k, err)
|
||||
}
|
||||
|
||||
if deleteCount%20 == 0 {
|
||||
@ -267,22 +243,17 @@ var stateTreePruneCmd = &cli.Command{
|
||||
}
|
||||
}
|
||||
|
||||
if b != nil {
|
||||
if err := b.Commit(); err != nil {
|
||||
return xerrors.Errorf("failed to commit final batch delete: %w", err)
|
||||
}
|
||||
if err := b.Flush(); err != nil {
|
||||
return xerrors.Errorf("failed to flush final batch delete: %w", err)
|
||||
}
|
||||
|
||||
gcds, ok := ds.(datastore.GCDatastore)
|
||||
if ok {
|
||||
fmt.Println("running datastore gc....")
|
||||
for i := 0; i < cctx.Int("gc-count"); i++ {
|
||||
if err := gcds.CollectGarbage(); err != nil {
|
||||
return xerrors.Errorf("datastore GC failed: %w", err)
|
||||
}
|
||||
fmt.Println("running datastore gc....")
|
||||
for i := 0; i < cctx.Int("gc-count"); i++ {
|
||||
if err := badgbs.DB.RunValueLogGC(DiscardRatio); err != nil {
|
||||
return xerrors.Errorf("datastore GC failed: %w", err)
|
||||
}
|
||||
fmt.Println("gc complete!")
|
||||
}
|
||||
fmt.Println("gc complete!")
|
||||
|
||||
return nil
|
||||
},
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user