diff --git a/.circleci/config.yml b/.circleci/config.yml index 0364e99fa..443207945 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -5,7 +5,7 @@ orbs: executors: golang: docker: - - image: circleci/golang:1.14.6 + - image: circleci/golang:1.15.5 resource_class: 2xlarge ubuntu: docker: @@ -276,7 +276,7 @@ jobs: - run: cd extern/filecoin-ffi && make - run: name: "replace lotus, filecoin-ffi, blst and fil-blst deps" - command: cd extern/oni/lotus-soup && go mod edit -replace github.com/filecoin-project/lotus=../../../ && go mod edit -replace github.com/filecoin-project/filecoin-ffi=../../filecoin-ffi && go mod edit -replace github.com/supranational/blst=../../fil-blst/blst && go mod edit -replace github.com/filecoin-project/fil-blst=../../fil-blst + command: cd extern/oni/lotus-soup && go mod edit -replace github.com/filecoin-project/lotus=../../../ && go mod edit -replace github.com/filecoin-project/filecoin-ffi=../../filecoin-ffi && go mod edit -replace github.com/supranational/blst=../../blst - run: name: "build lotus-soup testplan" command: pushd extern/oni/lotus-soup && go build -tags=testground . @@ -294,8 +294,8 @@ jobs: - run: name: Install go command: | - curl -O https://dl.google.com/go/go1.14.2.darwin-amd64.pkg && \ - sudo installer -pkg go1.14.2.darwin-amd64.pkg -target / + curl -O https://dl.google.com/go/go1.15.5.darwin-amd64.pkg && \ + sudo installer -pkg go1.15.5.darwin-amd64.pkg -target / - run: name: Install pkg-config command: HOMEBREW_NO_AUTO_UPDATE=1 brew install pkg-config @@ -359,6 +359,7 @@ jobs: steps: - install-deps - prepare + - run: make deps - run: make docsgen - run: git --no-pager diff - run: git --no-pager diff --quiet diff --git a/.gitmodules b/.gitmodules index 35f5a3d3f..5d82758a2 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,16 +1,15 @@ [submodule "extern/filecoin-ffi"] path = extern/filecoin-ffi url = https://github.com/filecoin-project/filecoin-ffi.git - branch = master [submodule "extern/serialization-vectors"] path = extern/serialization-vectors - url = https://github.com/filecoin-project/serialization-vectors + url = https://github.com/filecoin-project/serialization-vectors.git [submodule "extern/test-vectors"] path = extern/test-vectors url = https://github.com/filecoin-project/test-vectors.git -[submodule "extern/fil-blst"] - path = extern/fil-blst - url = https://github.com/filecoin-project/fil-blst.git [submodule "extern/oni"] path = extern/oni - url = https://github.com/filecoin-project/oni + url = https://github.com/filecoin-project/oni.git +[submodule "extern/blst"] + path = extern/blst + url = https://github.com/supranational/blst.git diff --git a/CHANGELOG.md b/CHANGELOG.md index 88a30c91d..f290faf83 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,171 @@ # Lotus changelog +# 1.2.0 / 2020-11-18 + +This is a mandatory release of Lotus that introduces the second post-liftoff upgrade to the Filecoin network. The network upgrade occurs at height 265200, before which time all nodes must have update to this release (or later). This release also bumps the required version of Go to 1.15. + +The changes that break consensus are: + +- Upgrading to sepcs-actors 2.3.2 (https://github.com/filecoin-project/specs-actors/releases/tag/v2.3.2) +- Introducing proofs v5.4.0 (https://github.com/filecoin-project/rust-fil-proofs/releases/tag/storage-proofs-v5.4.0), and switching between the proof types (https://github.com/filecoin-project/lotus/pull/4873) +- Don't use terminated sectors for winning PoSt (https://github.com/filecoin-project/lotus/pull/4770) +- Various small VM-level edge-case handling (https://github.com/filecoin-project/lotus/pull/4783) +- Correction of the VM circulating supply calculation (https://github.com/filecoin-project/lotus/pull/4862) +- Retuning gas costs (https://github.com/filecoin-project/lotus/pull/4830) +- Avoid sending messages to the zero BLS address (https://github.com/filecoin-project/lotus/pull/4888) + +## Other Changes + +- delayed pubsub subscribe for messages topic (https://github.com/filecoin-project/lotus/pull/3646) +- add chain base64 decode params (https://github.com/filecoin-project/lotus/pull/4748) +- chore(dep): update bitswap to fix an initialization race that could panic (https://github.com/filecoin-project/lotus/pull/4855) +- Chore/blockstore nits (https://github.com/filecoin-project/lotus/pull/4813) +- Print Consensus Faults in miner info (https://github.com/filecoin-project/lotus/pull/4853) +- Truncate genesis file before generating (https://github.com/filecoin-project/lotus/pull/4851) +- miner: Winning PoSt Warmup (https://github.com/filecoin-project/lotus/pull/4824) +- Fix init actor address map diffing (https://github.com/filecoin-project/lotus/pull/4875) +- Bump API versions to 1.0.0 (https://github.com/filecoin-project/lotus/pull/4884) +- Fix cid recording issue (https://github.com/filecoin-project/lotus/pull/4874) +- Speed up worker key retrieval (https://github.com/filecoin-project/lotus/pull/4885) +- Add error codes to worker return (https://github.com/filecoin-project/lotus/pull/4890) +- Update go to 1.15.5 (https://github.com/filecoin-project/lotus/pull/4896) +- Fix MaxSealingSectrosForDeals getting reset to 0 (https://github.com/filecoin-project/lotus/pull/4879) +- add sanity check for maximum block size (https://github.com/filecoin-project/lotus/pull/3171) +- Check (pre)commit receipt before other checks in failed states (https://github.com/filecoin-project/lotus/pull/4712) +- fix badger double open on daemon --import-snapshot; chainstore lifecycle (https://github.com/filecoin-project/lotus/pull/4872) +- Update to ipfs-blockstore 1.0.3 (https://github.com/filecoin-project/lotus/pull/4897) +- break loop when found warm up sector (https://github.com/filecoin-project/lotus/pull/4869) +- Tweak handling of bad beneficaries in DeleteActor (https://github.com/filecoin-project/lotus/pull/4903) +- cap maximum number of messages per block in selection (https://github.com/filecoin-project/lotus/pull/4905) +- Set Calico epoch (https://github.com/filecoin-project/lotus/pull/4889) + +# 1.1.3 / 2020-11-13 + +This is an optional release of Lotus that upgrades Lotus dependencies, and includes many performance enhancements, bugfixes, and UX improvements. + +## Highlights + +- Refactored much of the miner code (https://github.com/filecoin-project/lotus/pull/3618), improving its recovery from restarts and overall sector success rate +- Updated [proofs](https://github.com/filecoin-project/rust-fil-proofs) to v5.3.0, which brings significant performance improvements +- Updated [markets](https://github.com/filecoin-project/go-fil-markets/releases/tag/v1.0.4) to v1.0.4, which reduces failures due to reorgs (https://github.com/filecoin-project/lotus/pull/4730) and uses the newly refactored fund manager (https://github.com/filecoin-project/lotus/pull/4736) + +## Changes + +#### Core Lotus + +- polish: add Equals method to MinerInfo shim (https://github.com/filecoin-project/lotus/pull/4604) +- Fix messagepool accounting (https://github.com/filecoin-project/lotus/pull/4668) +- Prep for gas balancing (https://github.com/filecoin-project/lotus/pull/4651) +- Reduce badger ValueThreshold to 128 (https://github.com/filecoin-project/lotus/pull/4629) +- Config for default max gas fee (https://github.com/filecoin-project/lotus/pull/4652) +- bootstrap: don't return early when one drand resolution fails (https://github.com/filecoin-project/lotus/pull/4626) +- polish: add ClaimsChanged and DiffClaims method to power shim (https://github.com/filecoin-project/lotus/pull/4628) +- Simplify chain event Called API (https://github.com/filecoin-project/lotus/pull/4664) +- Cache deal states for most recent old/new tipset (https://github.com/filecoin-project/lotus/pull/4623) +- Add miner available balance and power info to state miner info (https://github.com/filecoin-project/lotus/pull/4618) +- Call GetHeaviestTipSet() only once when syncing (https://github.com/filecoin-project/lotus/pull/4696) +- modify runtime gasUsed printf (https://github.com/filecoin-project/lotus/pull/4704) +- Rename builtin actor generators (https://github.com/filecoin-project/lotus/pull/4697) +- Move gas multiplier as property of pricelist (https://github.com/filecoin-project/lotus/pull/4728) +- polish: add msig pendingtxn diffing and comp (https://github.com/filecoin-project/lotus/pull/4719) +- Optional chain Bitswap (https://github.com/filecoin-project/lotus/pull/4717) +- rewrite sync manager (https://github.com/filecoin-project/lotus/pull/4599) +- async connect to bootstrappers (https://github.com/filecoin-project/lotus/pull/4785) +- head change coalescer (https://github.com/filecoin-project/lotus/pull/4688) +- move to native badger blockstore; leverage zero-copy View() to deserialize in-place (https://github.com/filecoin-project/lotus/pull/4681) +- badger blockstore: minor improvements (https://github.com/filecoin-project/lotus/pull/4811) +- Do not fail wallet delete because of pre-existing trashed key (https://github.com/filecoin-project/lotus/pull/4589) +- Correctly delete the default wallet address (https://github.com/filecoin-project/lotus/pull/4705) +- Reduce badger ValueTreshold to 128 (https://github.com/filecoin-project/lotus/pull/4629) +- predicates: Fast StateGetActor wrapper (https://github.com/filecoin-project/lotus/pull/4835) + +#### Mining + +- worker key should change when set sender found key not equal with the value on chain (https://github.com/filecoin-project/lotus/pull/4595) +- extern/sector-storage: fix GPU usage overwrite bug (https://github.com/filecoin-project/lotus/pull/4627) +- sectorstorage: Fix manager restart edge-case (https://github.com/filecoin-project/lotus/pull/4645) +- storagefsm: Fix GetTicket loop when the sector is already precommitted (https://github.com/filecoin-project/lotus/pull/4643) +- Debug flag to force running sealing scheduler (https://github.com/filecoin-project/lotus/pull/4662) +- Fix worker reenabling, handle multiple restarts in worker (https://github.com/filecoin-project/lotus/pull/4666) +- keep retrying the proof until we run out of sectors to skip (https://github.com/filecoin-project/lotus/pull/4633) +- worker: Commands to pause/resume task processing (https://github.com/filecoin-project/lotus/pull/4615) +- struct name incorrect (https://github.com/filecoin-project/lotus/pull/4699) +- optimize code replace strings with constants (https://github.com/filecoin-project/lotus/pull/4769) +- optimize pledge sector (https://github.com/filecoin-project/lotus/pull/4765) +- Track sealing processes across lotus-miner restarts (https://github.com/filecoin-project/lotus/pull/3618) +- Fix scheduler lockups after storage is freed (https://github.com/filecoin-project/lotus/pull/4778) +- storage: Track worker hostnames with work (https://github.com/filecoin-project/lotus/pull/4779) +- Expand sched-diag; Command to abort sealing calls (https://github.com/filecoin-project/lotus/pull/4804) +- miner: Winning PoSt Warmup (https://github.com/filecoin-project/lotus/pull/4824) +- docsgen: Support miner/worker (https://github.com/filecoin-project/lotus/pull/4817) +- miner: Basic storage cleanup command (https://github.com/filecoin-project/lotus/pull/4834) + +#### Markets and Data Transfer + +- Flesh out data transfer features (https://github.com/filecoin-project/lotus/pull/4572) +- Fix memory leaks in data transfer (https://github.com/filecoin-project/lotus/pull/4619) +- Handle deal id changes in OnDealSectorCommitted (https://github.com/filecoin-project/lotus/pull/4730) +- Refactor FundManager (https://github.com/filecoin-project/lotus/pull/4736) +- refactor: integrate new FundManager (https://github.com/filecoin-project/lotus/pull/4787) +- Fix race in paych manager when req context is cancelled (https://github.com/filecoin-project/lotus/pull/4803) +- fix race in paych manager add funds (https://github.com/filecoin-project/lotus/pull/4597) +- Fix panic in FundManager (https://github.com/filecoin-project/lotus/pull/4808) +- Fix: dont crash on startup if funds migration fails (https://github.com/filecoin-project/lotus/pull/4827) + +#### UX + +- Make EarlyExpiration in sectors list less scary (https://github.com/filecoin-project/lotus/pull/4600) +- Add commands to change the worker key (https://github.com/filecoin-project/lotus/pull/4513) +- Expose ClientDealSize via CLI (https://github.com/filecoin-project/lotus/pull/4569) +- client deal: Cache CommD when creating multiple deals (https://github.com/filecoin-project/lotus/pull/4535) +- miner sectors list: flags for events/seal time (https://github.com/filecoin-project/lotus/pull/4649) +- make IPFS online mode configurable (https://github.com/filecoin-project/lotus/pull/4650) +- Add sync status to miner info command (https://github.com/filecoin-project/lotus/pull/4669) +- Add a StateDecodeParams method (https://github.com/filecoin-project/lotus/pull/4105) +- sched: Interactive RPC Shell (https://github.com/filecoin-project/lotus/pull/4692) +- Add api for getting status given a code (https://github.com/filecoin-project/lotus/pull/4210) +- Update lotus-stats with a richer cli (https://github.com/filecoin-project/lotus/pull/4718) +- Use TSK passed to GasEstimateGasLimit (https://github.com/filecoin-project/lotus/pull/4739) +- match data type for reward state api (https://github.com/filecoin-project/lotus/pull/4745) +- Add `termination-estimate` to get an estimation for how much a termination penalty will be (https://github.com/filecoin-project/lotus/pull/4617) +- Restrict `ParseFIL` input length (https://github.com/filecoin-project/lotus/pull/4780) +- cmd sectors commitIDs len debug (https://github.com/filecoin-project/lotus/pull/4786) +- Add client deal-stats CLI (https://github.com/filecoin-project/lotus/pull/4788) +- Modify printf format (https://github.com/filecoin-project/lotus/pull/4795) +- Updated msig inspect (https://github.com/filecoin-project/lotus/pull/4533) +- Delete the duplicate output (https://github.com/filecoin-project/lotus/pull/4819) +- miner: Storage list sectors command (https://github.com/filecoin-project/lotus/pull/4831) +- drop a few logs down to debug (https://github.com/filecoin-project/lotus/pull/4832) + +#### Testing and Tooling + +- refactor: share code between CLI tests (https://github.com/filecoin-project/lotus/pull/4598) +- Fix flaky TestCLIDealFlow (https://github.com/filecoin-project/lotus/pull/4608) +- Fix flaky testMiningReal (https://github.com/filecoin-project/lotus/pull/4609) +- Add election run-dummy command (https://github.com/filecoin-project/lotus/pull/4498) +- Fix .gitmodules (https://github.com/filecoin-project/lotus/pull/4713) +- fix metrics wiring.(https://github.com/filecoin-project/lotus/pull/4691) +- shed: Util for creating ID CIDs (https://github.com/filecoin-project/lotus/pull/4726) +- Run kumquat upgrade on devnets (https://github.com/filecoin-project/lotus/pull/4734) +- Make pond work again (https://github.com/filecoin-project/lotus/pull/4775) +- lotus-stats: fix influx flags (https://github.com/filecoin-project/lotus/pull/4810) +- 2k sync BootstrapPeerThreshold (https://github.com/filecoin-project/lotus/pull/4797) +- test for FundManager panic to ensure it is fixed (https://github.com/filecoin-project/lotus/pull/4825) +- Stop mining at the end of tests (https://github.com/filecoin-project/lotus/pull/4826) +- Make some logs quieter (https://github.com/filecoin-project/lotus/pull/4709) + +#### Dependencies + +- update filecoin-ffi in go mod (https://github.com/filecoin-project/lotus/pull/4584) +- Update FFI (https://github.com/filecoin-project/lotus/pull/4613) +- feat: integrate new optional blst backend and verification optimizations from proofs (https://github.com/filecoin-project/lotus/pull/4630) +- Use https for blst submodule (https://github.com/filecoin-project/lotus/pull/4710) +- Update go-bitfield (https://github.com/filecoin-project/lotus/pull/4756) +- Update Yamux (https://github.com/filecoin-project/lotus/pull/4758) +- Update to latest go-bitfield (https://github.com/filecoin-project/lotus/pull/4793) +- Update to latest go-address (https://github.com/filecoin-project/lotus/pull/4798) +- update libp2p for stream interface changes (https://github.com/filecoin-project/lotus/pull/4814) + # 1.1.2 / 2020-10-24 This is a patch release of Lotus that builds on the fixes involving worker keys that was introduced in v1.1.1. Miners and node operators should update to this release as soon as possible in order to ensure their blocks are propagated and validated. diff --git a/Makefile b/Makefile index 093f62ef6..9c77aa8ee 100644 --- a/Makefile +++ b/Makefile @@ -5,10 +5,10 @@ all: build unexport GOFLAGS -GOVERSION:=$(shell go version | cut -d' ' -f 3 | cut -d. -f 2) -ifeq ($(shell expr $(GOVERSION) \< 14), 1) -$(warning Your Golang version is go 1.$(GOVERSION)) -$(error Update Golang to version $(shell grep '^go' go.mod)) +GOVERSION:=$(shell go version | cut -d' ' -f 3 | awk -F. '{printf "%d%03d", $$2, $$3}') +ifeq ($(shell expr $(GOVERSION) \< 15005), 1) +$(warning Your Golang version is go 1.$(shell expr $(GOVERSION) / 1000).$(shell expr $(GOVERSION) % 1000)) +$(error Update Golang to version to at least 1.15.5) endif # git modules that need to be loaded @@ -179,7 +179,7 @@ BINS+=lotus-bench lotus-stats: rm -f lotus-stats - go build -o lotus-stats ./cmd/lotus-stats + go build $(GOFLAGS) -o lotus-stats ./cmd/lotus-stats go run github.com/GeertJohan/go.rice/rice append --exec lotus-stats -i ./build .PHONY: lotus-stats BINS+=lotus-stats @@ -304,7 +304,9 @@ method-gen: gen: type-gen method-gen docsgen: - go run ./api/docgen > documentation/en/api-methods.md + go run ./api/docgen "api/api_full.go" "FullNode" > documentation/en/api-methods.md + go run ./api/docgen "api/api_storage.go" "StorageMiner" > documentation/en/api-methods-miner.md + go run ./api/docgen "api/api_worker.go" "WorkerAPI" > documentation/en/api-methods-worker.md print-%: @echo $*=$($*) diff --git a/api/api_full.go b/api/api_full.go index 0b9fb71d2..d5e97d3a6 100644 --- a/api/api_full.go +++ b/api/api_full.go @@ -514,8 +514,10 @@ type FullNode interface { // along with the address removal. MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error) - MarketEnsureAvailable(context.Context, address.Address, address.Address, types.BigInt) (cid.Cid, error) - // MarketFreeBalance + // MarketReserveFunds reserves funds for a deal + MarketReserveFunds(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error) + // MarketReleaseFunds releases funds reserved by MarketReserveFunds + MarketReleaseFunds(ctx context.Context, addr address.Address, amt types.BigInt) error // MethodGroup: Paych // The Paych methods are for interacting with and managing payment channels @@ -789,8 +791,9 @@ type IpldObject struct { } type ActiveSync struct { - Base *types.TipSet - Target *types.TipSet + WorkerID uint64 + Base *types.TipSet + Target *types.TipSet Stage SyncStateStage Height abi.ChainEpoch diff --git a/api/api_storage.go b/api/api_storage.go index d003ec776..738a05e09 100644 --- a/api/api_storage.go +++ b/api/api_storage.go @@ -71,6 +71,7 @@ type StorageMiner interface { // SealingSchedDiag dumps internal sealing scheduler state SealingSchedDiag(ctx context.Context, doSched bool) (interface{}, error) + SealingAbort(ctx context.Context, call storiface.CallID) error stores.SectorIndex diff --git a/api/apistruct/struct.go b/api/apistruct/struct.go index 244d309a0..214f56422 100644 --- a/api/apistruct/struct.go +++ b/api/apistruct/struct.go @@ -241,7 +241,8 @@ type FullNodeStruct struct { MsigSwapCancel func(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (cid.Cid, error) `perm:"sign"` MsigRemoveSigner func(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error) `perm:"sign"` - MarketEnsureAvailable func(context.Context, address.Address, address.Address, types.BigInt) (cid.Cid, error) `perm:"sign"` + MarketReserveFunds func(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error) `perm:"sign"` + MarketReleaseFunds func(ctx context.Context, addr address.Address, amt types.BigInt) error `perm:"sign"` PaychGet func(ctx context.Context, from, to address.Address, amt types.BigInt) (*api.ChannelInfo, error) `perm:"sign"` PaychGetWaitReady func(context.Context, cid.Cid) (address.Address, error) `perm:"sign"` @@ -310,19 +311,20 @@ type StorageMinerStruct struct { WorkerStats func(context.Context) (map[uuid.UUID]storiface.WorkerStats, error) `perm:"admin"` WorkerJobs func(context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) `perm:"admin"` - ReturnAddPiece func(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err string) error `perm:"admin" retry:"true"` - ReturnSealPreCommit1 func(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err string) error `perm:"admin" retry:"true"` - ReturnSealPreCommit2 func(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err string) error `perm:"admin" retry:"true"` - ReturnSealCommit1 func(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err string) error `perm:"admin" retry:"true"` - ReturnSealCommit2 func(ctx context.Context, callID storiface.CallID, proof storage.Proof, err string) error `perm:"admin" retry:"true"` - ReturnFinalizeSector func(ctx context.Context, callID storiface.CallID, err string) error `perm:"admin" retry:"true"` - ReturnReleaseUnsealed func(ctx context.Context, callID storiface.CallID, err string) error `perm:"admin" retry:"true"` - ReturnMoveStorage func(ctx context.Context, callID storiface.CallID, err string) error `perm:"admin" retry:"true"` - ReturnUnsealPiece func(ctx context.Context, callID storiface.CallID, err string) error `perm:"admin" retry:"true"` - ReturnReadPiece func(ctx context.Context, callID storiface.CallID, ok bool, err string) error `perm:"admin" retry:"true"` - ReturnFetch func(ctx context.Context, callID storiface.CallID, err string) error `perm:"admin" retry:"true"` + ReturnAddPiece func(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error `perm:"admin" retry:"true"` + ReturnSealPreCommit1 func(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err *storiface.CallError) error `perm:"admin" retry:"true"` + ReturnSealPreCommit2 func(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err *storiface.CallError) error `perm:"admin" retry:"true"` + ReturnSealCommit1 func(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err *storiface.CallError) error `perm:"admin" retry:"true"` + ReturnSealCommit2 func(ctx context.Context, callID storiface.CallID, proof storage.Proof, err *storiface.CallError) error `perm:"admin" retry:"true"` + ReturnFinalizeSector func(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error `perm:"admin" retry:"true"` + ReturnReleaseUnsealed func(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error `perm:"admin" retry:"true"` + ReturnMoveStorage func(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error `perm:"admin" retry:"true"` + ReturnUnsealPiece func(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error `perm:"admin" retry:"true"` + ReturnReadPiece func(ctx context.Context, callID storiface.CallID, ok bool, err *storiface.CallError) error `perm:"admin" retry:"true"` + ReturnFetch func(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error `perm:"admin" retry:"true"` - SealingSchedDiag func(context.Context, bool) (interface{}, error) `perm:"admin"` + SealingSchedDiag func(context.Context, bool) (interface{}, error) `perm:"admin"` + SealingAbort func(ctx context.Context, call storiface.CallID) error `perm:"admin"` StorageList func(context.Context) (map[stores.ID][]stores.Decl, error) `perm:"admin"` StorageLocal func(context.Context) (map[stores.ID]string, error) `perm:"admin"` @@ -371,17 +373,17 @@ type WorkerStruct struct { Paths func(context.Context) ([]stores.StoragePath, error) `perm:"admin"` Info func(context.Context) (storiface.WorkerInfo, error) `perm:"admin"` - AddPiece func(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) `perm:"admin"` - SealPreCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) `perm:"admin"` - SealPreCommit2 func(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storiface.CallID, error) `perm:"admin"` - SealCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) `perm:"admin"` - SealCommit2 func(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storiface.CallID, error) `perm:"admin"` - FinalizeSector func(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (storiface.CallID, error) `perm:"admin"` - ReleaseUnsealed func(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) (storiface.CallID, error) `perm:"admin"` - MoveStorage func(ctx context.Context, sector abi.SectorID, types storiface.SectorFileType) (storiface.CallID, error) `perm:"admin"` - UnsealPiece func(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (storiface.CallID, error) `perm:"admin"` - ReadPiece func(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (storiface.CallID, error) `perm:"admin"` - Fetch func(context.Context, abi.SectorID, storiface.SectorFileType, storiface.PathType, storiface.AcquireMode) (storiface.CallID, error) `perm:"admin"` + AddPiece func(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) `perm:"admin"` + SealPreCommit1 func(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) `perm:"admin"` + SealPreCommit2 func(ctx context.Context, sector storage.SectorRef, pc1o storage.PreCommit1Out) (storiface.CallID, error) `perm:"admin"` + SealCommit1 func(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) `perm:"admin"` + SealCommit2 func(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storiface.CallID, error) `perm:"admin"` + FinalizeSector func(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) `perm:"admin"` + ReleaseUnsealed func(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (storiface.CallID, error) `perm:"admin"` + MoveStorage func(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) `perm:"admin"` + UnsealPiece func(context.Context, storage.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (storiface.CallID, error) `perm:"admin"` + ReadPiece func(context.Context, io.Writer, storage.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (storiface.CallID, error) `perm:"admin"` + Fetch func(context.Context, storage.SectorRef, storiface.SectorFileType, storiface.PathType, storiface.AcquireMode) (storiface.CallID, error) `perm:"admin"` Remove func(ctx context.Context, sector abi.SectorID) error `perm:"admin"` StorageAddLocal func(ctx context.Context, path string) error `perm:"admin"` @@ -1117,8 +1119,12 @@ func (c *FullNodeStruct) MsigRemoveSigner(ctx context.Context, msig address.Addr return c.Internal.MsigRemoveSigner(ctx, msig, proposer, toRemove, decrease) } -func (c *FullNodeStruct) MarketEnsureAvailable(ctx context.Context, addr, wallet address.Address, amt types.BigInt) (cid.Cid, error) { - return c.Internal.MarketEnsureAvailable(ctx, addr, wallet, amt) +func (c *FullNodeStruct) MarketReserveFunds(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error) { + return c.Internal.MarketReserveFunds(ctx, wallet, addr, amt) +} + +func (c *FullNodeStruct) MarketReleaseFunds(ctx context.Context, addr address.Address, amt types.BigInt) error { + return c.Internal.MarketReleaseFunds(ctx, addr, amt) } func (c *FullNodeStruct) PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*api.ChannelInfo, error) { @@ -1265,47 +1271,47 @@ func (c *StorageMinerStruct) WorkerJobs(ctx context.Context) (map[uuid.UUID][]st return c.Internal.WorkerJobs(ctx) } -func (c *StorageMinerStruct) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err string) error { +func (c *StorageMinerStruct) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error { return c.Internal.ReturnAddPiece(ctx, callID, pi, err) } -func (c *StorageMinerStruct) ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err string) error { +func (c *StorageMinerStruct) ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err *storiface.CallError) error { return c.Internal.ReturnSealPreCommit1(ctx, callID, p1o, err) } -func (c *StorageMinerStruct) ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err string) error { +func (c *StorageMinerStruct) ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err *storiface.CallError) error { return c.Internal.ReturnSealPreCommit2(ctx, callID, sealed, err) } -func (c *StorageMinerStruct) ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err string) error { +func (c *StorageMinerStruct) ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err *storiface.CallError) error { return c.Internal.ReturnSealCommit1(ctx, callID, out, err) } -func (c *StorageMinerStruct) ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err string) error { +func (c *StorageMinerStruct) ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err *storiface.CallError) error { return c.Internal.ReturnSealCommit2(ctx, callID, proof, err) } -func (c *StorageMinerStruct) ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err string) error { +func (c *StorageMinerStruct) ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { return c.Internal.ReturnFinalizeSector(ctx, callID, err) } -func (c *StorageMinerStruct) ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err string) error { +func (c *StorageMinerStruct) ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { return c.Internal.ReturnReleaseUnsealed(ctx, callID, err) } -func (c *StorageMinerStruct) ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err string) error { +func (c *StorageMinerStruct) ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { return c.Internal.ReturnMoveStorage(ctx, callID, err) } -func (c *StorageMinerStruct) ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err string) error { +func (c *StorageMinerStruct) ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { return c.Internal.ReturnUnsealPiece(ctx, callID, err) } -func (c *StorageMinerStruct) ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err string) error { +func (c *StorageMinerStruct) ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err *storiface.CallError) error { return c.Internal.ReturnReadPiece(ctx, callID, ok, err) } -func (c *StorageMinerStruct) ReturnFetch(ctx context.Context, callID storiface.CallID, err string) error { +func (c *StorageMinerStruct) ReturnFetch(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { return c.Internal.ReturnFetch(ctx, callID, err) } @@ -1313,6 +1319,10 @@ func (c *StorageMinerStruct) SealingSchedDiag(ctx context.Context, doSched bool) return c.Internal.SealingSchedDiag(ctx, doSched) } +func (c *StorageMinerStruct) SealingAbort(ctx context.Context, call storiface.CallID) error { + return c.Internal.SealingAbort(ctx, call) +} + func (c *StorageMinerStruct) StorageAttach(ctx context.Context, si stores.StorageInfo, st fsutil.FsStat) error { return c.Internal.StorageAttach(ctx, si, st) } @@ -1503,47 +1513,47 @@ func (w *WorkerStruct) Info(ctx context.Context) (storiface.WorkerInfo, error) { return w.Internal.Info(ctx) } -func (w *WorkerStruct) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) { +func (w *WorkerStruct) AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) { return w.Internal.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData) } -func (w *WorkerStruct) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) { +func (w *WorkerStruct) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) { return w.Internal.SealPreCommit1(ctx, sector, ticket, pieces) } -func (w *WorkerStruct) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storiface.CallID, error) { +func (w *WorkerStruct) SealPreCommit2(ctx context.Context, sector storage.SectorRef, pc1o storage.PreCommit1Out) (storiface.CallID, error) { return w.Internal.SealPreCommit2(ctx, sector, pc1o) } -func (w *WorkerStruct) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) { +func (w *WorkerStruct) SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) { return w.Internal.SealCommit1(ctx, sector, ticket, seed, pieces, cids) } -func (w *WorkerStruct) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storiface.CallID, error) { +func (w *WorkerStruct) SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storiface.CallID, error) { return w.Internal.SealCommit2(ctx, sector, c1o) } -func (w *WorkerStruct) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (storiface.CallID, error) { +func (w *WorkerStruct) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) { return w.Internal.FinalizeSector(ctx, sector, keepUnsealed) } -func (w *WorkerStruct) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) (storiface.CallID, error) { +func (w *WorkerStruct) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (storiface.CallID, error) { return w.Internal.ReleaseUnsealed(ctx, sector, safeToFree) } -func (w *WorkerStruct) MoveStorage(ctx context.Context, sector abi.SectorID, types storiface.SectorFileType) (storiface.CallID, error) { +func (w *WorkerStruct) MoveStorage(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) { return w.Internal.MoveStorage(ctx, sector, types) } -func (w *WorkerStruct) UnsealPiece(ctx context.Context, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, c cid.Cid) (storiface.CallID, error) { +func (w *WorkerStruct) UnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, c cid.Cid) (storiface.CallID, error) { return w.Internal.UnsealPiece(ctx, sector, offset, size, ticket, c) } -func (w *WorkerStruct) ReadPiece(ctx context.Context, sink io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) { +func (w *WorkerStruct) ReadPiece(ctx context.Context, sink io.Writer, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) { return w.Internal.ReadPiece(ctx, sink, sector, offset, size) } -func (w *WorkerStruct) Fetch(ctx context.Context, id abi.SectorID, fileType storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) { +func (w *WorkerStruct) Fetch(ctx context.Context, id storage.SectorRef, fileType storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) { return w.Internal.Fetch(ctx, id, fileType, ptype, am) } diff --git a/api/docgen/docgen.go b/api/docgen/docgen.go index dc6004121..bc29cf91d 100644 --- a/api/docgen/docgen.go +++ b/api/docgen/docgen.go @@ -6,12 +6,14 @@ import ( "go/ast" "go/parser" "go/token" + "os" "reflect" "sort" "strings" "time" "unicode" + "github.com/google/uuid" "github.com/ipfs/go-cid" "github.com/ipfs/go-filestore" metrics "github.com/libp2p/go-libp2p-core/metrics" @@ -24,6 +26,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" datatransfer "github.com/filecoin-project/go-data-transfer" + filestore2 "github.com/filecoin-project/go-fil-markets/filestore" "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-jsonrpc/auth" "github.com/filecoin-project/go-multistore" @@ -36,6 +39,10 @@ import ( "github.com/filecoin-project/lotus/api/apistruct" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" + "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" "github.com/filecoin-project/lotus/node/modules/dtypes" ) @@ -83,7 +90,7 @@ func init() { addExample(&pid) addExample(bitfield.NewFromSet([]uint64{5})) - addExample(abi.RegisteredSealProof_StackedDrg32GiBV1) + addExample(abi.RegisteredSealProof_StackedDrg32GiBV1_1) addExample(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1) addExample(abi.ChainEpoch(10101)) addExample(crypto.SigTypeBLS) @@ -117,17 +124,17 @@ func init() { addExample(network.ReachabilityPublic) addExample(build.NewestNetworkVersion) addExample(&types.ExecutionTrace{ - Msg: exampleValue(reflect.TypeOf(&types.Message{}), nil).(*types.Message), - MsgRct: exampleValue(reflect.TypeOf(&types.MessageReceipt{}), nil).(*types.MessageReceipt), + Msg: exampleValue("init", reflect.TypeOf(&types.Message{}), nil).(*types.Message), + MsgRct: exampleValue("init", reflect.TypeOf(&types.MessageReceipt{}), nil).(*types.MessageReceipt), }) addExample(map[string]types.Actor{ - "t01236": exampleValue(reflect.TypeOf(types.Actor{}), nil).(types.Actor), + "t01236": exampleValue("init", reflect.TypeOf(types.Actor{}), nil).(types.Actor), }) addExample(map[string]api.MarketDeal{ - "t026363": exampleValue(reflect.TypeOf(api.MarketDeal{}), nil).(api.MarketDeal), + "t026363": exampleValue("init", reflect.TypeOf(api.MarketDeal{}), nil).(api.MarketDeal), }) addExample(map[string]api.MarketBalance{ - "t026363": exampleValue(reflect.TypeOf(api.MarketBalance{}), nil).(api.MarketBalance), + "t026363": exampleValue("init", reflect.TypeOf(api.MarketBalance{}), nil).(api.MarketBalance), }) addExample(map[string]*pubsub.TopicScoreSnapshot{ "/blocks": { @@ -162,9 +169,81 @@ func init() { // because reflect.TypeOf(maddr) returns the concrete type... ExampleValues[reflect.TypeOf(struct{ A multiaddr.Multiaddr }{}).Field(0).Type] = maddr + // miner specific + addExample(filestore2.Path(".lotusminer/fstmp123")) + si := multistore.StoreID(12) + addExample(&si) + addExample(retrievalmarket.DealID(5)) + addExample(abi.ActorID(1000)) + addExample(map[string][]api.SealedRef{ + "98000": { + api.SealedRef{ + SectorID: 100, + Offset: 10 << 20, + Size: 1 << 20, + }, + }, + }) + addExample(api.SectorState(sealing.Proving)) + addExample(stores.ID("76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8")) + addExample(storiface.FTUnsealed) + addExample(storiface.PathSealing) + addExample(map[stores.ID][]stores.Decl{ + "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8": { + { + SectorID: abi.SectorID{Miner: 1000, Number: 100}, + SectorFileType: storiface.FTSealed, + }, + }, + }) + addExample(map[stores.ID]string{ + "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8": "/data/path", + }) + addExample(map[uuid.UUID][]storiface.WorkerJob{ + uuid.MustParse("ef8d99a2-6865-4189-8ffa-9fef0f806eee"): { + { + ID: storiface.CallID{ + Sector: abi.SectorID{Miner: 1000, Number: 100}, + ID: uuid.MustParse("76081ba0-61bd-45a5-bc08-af05f1c26e5d"), + }, + Sector: abi.SectorID{Miner: 1000, Number: 100}, + Task: sealtasks.TTPreCommit2, + RunWait: 0, + Start: time.Unix(1605172927, 0).UTC(), + Hostname: "host", + }, + }, + }) + addExample(map[uuid.UUID]storiface.WorkerStats{ + uuid.MustParse("ef8d99a2-6865-4189-8ffa-9fef0f806eee"): { + Info: storiface.WorkerInfo{ + Hostname: "host", + Resources: storiface.WorkerResources{ + MemPhysical: 256 << 30, + MemSwap: 120 << 30, + MemReserved: 2 << 30, + CPUs: 64, + GPUs: []string{"aGPU 1337"}, + }, + }, + Enabled: true, + MemUsedMin: 0, + MemUsedMax: 0, + GpuUsed: false, + CpuUse: 0, + }, + }) + addExample(storiface.ErrorCode(0)) + + // worker specific + addExample(storiface.AcquireMove) + addExample(storiface.UnpaddedByteIndex(abi.PaddedPieceSize(1 << 20).Unpadded())) + addExample(map[sealtasks.TaskType]struct{}{ + sealtasks.TTPreCommit2: {}, + }) } -func exampleValue(t, parent reflect.Type) interface{} { +func exampleValue(method string, t, parent reflect.Type) interface{} { v, ok := ExampleValues[t] if ok { return v @@ -173,25 +252,25 @@ func exampleValue(t, parent reflect.Type) interface{} { switch t.Kind() { case reflect.Slice: out := reflect.New(t).Elem() - reflect.Append(out, reflect.ValueOf(exampleValue(t.Elem(), t))) + reflect.Append(out, reflect.ValueOf(exampleValue(method, t.Elem(), t))) return out.Interface() case reflect.Chan: - return exampleValue(t.Elem(), nil) + return exampleValue(method, t.Elem(), nil) case reflect.Struct: - es := exampleStruct(t, parent) + es := exampleStruct(method, t, parent) v := reflect.ValueOf(es).Elem().Interface() ExampleValues[t] = v return v case reflect.Array: out := reflect.New(t).Elem() for i := 0; i < t.Len(); i++ { - out.Index(i).Set(reflect.ValueOf(exampleValue(t.Elem(), t))) + out.Index(i).Set(reflect.ValueOf(exampleValue(method, t.Elem(), t))) } return out.Interface() case reflect.Ptr: if t.Elem().Kind() == reflect.Struct { - es := exampleStruct(t.Elem(), t) + es := exampleStruct(method, t.Elem(), t) //ExampleValues[t] = es return es } @@ -199,10 +278,10 @@ func exampleValue(t, parent reflect.Type) interface{} { return struct{}{} } - panic(fmt.Sprintf("No example value for type: %s", t)) + panic(fmt.Sprintf("No example value for type: %s (method '%s')", t, method)) } -func exampleStruct(t, parent reflect.Type) interface{} { +func exampleStruct(method string, t, parent reflect.Type) interface{} { ns := reflect.New(t) for i := 0; i < t.NumField(); i++ { f := t.Field(i) @@ -210,7 +289,7 @@ func exampleStruct(t, parent reflect.Type) interface{} { continue } if strings.Title(f.Name) == f.Name { - ns.Elem().Field(i).Set(reflect.ValueOf(exampleValue(f.Type, t))) + ns.Elem().Field(i).Set(reflect.ValueOf(exampleValue(method, f.Type, t))) } } @@ -218,6 +297,7 @@ func exampleStruct(t, parent reflect.Type) interface{} { } type Visitor struct { + Root string Methods map[string]ast.Node } @@ -227,7 +307,7 @@ func (v *Visitor) Visit(node ast.Node) ast.Visitor { return v } - if st.Name.Name != "FullNode" { + if st.Name.Name != v.Root { return nil } @@ -243,7 +323,7 @@ func (v *Visitor) Visit(node ast.Node) ast.Visitor { const noComment = "There are not yet any comments for this method." -func parseApiASTInfo() (map[string]string, map[string]string) { //nolint:golint +func parseApiASTInfo(apiFile, iface string) (map[string]string, map[string]string) { //nolint:golint fset := token.NewFileSet() pkgs, err := parser.ParseDir(fset, "./api", nil, parser.AllErrors|parser.ParseComments) if err != nil { @@ -252,11 +332,11 @@ func parseApiASTInfo() (map[string]string, map[string]string) { //nolint:golint ap := pkgs["api"] - f := ap.Files["api/api_full.go"] + f := ap.Files[apiFile] cmap := ast.NewCommentMap(fset, f, f.Comments) - v := &Visitor{make(map[string]ast.Node)} + v := &Visitor{iface, make(map[string]ast.Node)} ast.Walk(v, pkgs["api"]) groupDocs := make(map[string]string) @@ -312,13 +392,30 @@ func methodGroupFromName(mn string) string { } func main() { - - comments, groupComments := parseApiASTInfo() + comments, groupComments := parseApiASTInfo(os.Args[1], os.Args[2]) groups := make(map[string]*MethodGroup) - var api struct{ api.FullNode } - t := reflect.TypeOf(api) + var t reflect.Type + var permStruct, commonPermStruct reflect.Type + + switch os.Args[2] { + case "FullNode": + t = reflect.TypeOf(new(struct{ api.FullNode })).Elem() + permStruct = reflect.TypeOf(apistruct.FullNodeStruct{}.Internal) + commonPermStruct = reflect.TypeOf(apistruct.CommonStruct{}.Internal) + case "StorageMiner": + t = reflect.TypeOf(new(struct{ api.StorageMiner })).Elem() + permStruct = reflect.TypeOf(apistruct.StorageMinerStruct{}.Internal) + commonPermStruct = reflect.TypeOf(apistruct.CommonStruct{}.Internal) + case "WorkerAPI": + t = reflect.TypeOf(new(struct{ api.WorkerAPI })).Elem() + permStruct = reflect.TypeOf(apistruct.WorkerStruct{}.Internal) + commonPermStruct = reflect.TypeOf(apistruct.WorkerStruct{}.Internal) + default: + panic("unknown type") + } + for i := 0; i < t.NumMethod(); i++ { m := t.Method(i) @@ -336,7 +433,7 @@ func main() { ft := m.Func.Type() for j := 2; j < ft.NumIn(); j++ { inp := ft.In(j) - args = append(args, exampleValue(inp, nil)) + args = append(args, exampleValue(m.Name, inp, nil)) } v, err := json.MarshalIndent(args, "", " ") @@ -344,7 +441,7 @@ func main() { panic(err) } - outv := exampleValue(ft.Out(0), nil) + outv := exampleValue(m.Name, ft.Out(0), nil) ov, err := json.MarshalIndent(outv, "", " ") if err != nil { @@ -377,9 +474,6 @@ func main() { } } - permStruct := reflect.TypeOf(apistruct.FullNodeStruct{}.Internal) - commonPermStruct := reflect.TypeOf(apistruct.CommonStruct{}.Internal) - for _, g := range groupslice { g := g fmt.Printf("## %s\n", g.GroupName) diff --git a/api/test/blockminer.go b/api/test/blockminer.go index 6b28a5794..23af94a36 100644 --- a/api/test/blockminer.go +++ b/api/test/blockminer.go @@ -37,7 +37,12 @@ func (bm *BlockMiner) MineBlocks() { go func() { defer close(bm.done) for atomic.LoadInt64(&bm.mine) == 1 { - time.Sleep(bm.blocktime) + select { + case <-bm.ctx.Done(): + return + case <-time.After(bm.blocktime): + } + nulls := atomic.SwapInt64(&bm.nulls, 0) if err := bm.miner.MineOne(bm.ctx, miner.MineReq{ InjectNulls: abi.ChainEpoch(nulls), diff --git a/api/test/ccupgrade.go b/api/test/ccupgrade.go index 75f72d861..4f6b39701 100644 --- a/api/test/ccupgrade.go +++ b/api/test/ccupgrade.go @@ -31,7 +31,7 @@ func TestCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) { func testCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration, upgradeHeight abi.ChainEpoch) { ctx := context.Background() - n, sn := b(t, []FullNodeOpts{FullNodeWithUpgradeAt(upgradeHeight)}, OneMiner) + n, sn := b(t, []FullNodeOpts{FullNodeWithActorsV2At(upgradeHeight)}, OneMiner) client := n[0].FullNode.(*impl.FullNodeAPI) miner := sn[0] diff --git a/api/test/test.go b/api/test/test.go index bae3d520e..a1b82c590 100644 --- a/api/test/test.go +++ b/api/test/test.go @@ -109,7 +109,7 @@ var OneMiner = []StorageMiner{{Full: 0, Preseal: PresealGenesis}} var OneFull = DefaultFullOpts(1) var TwoFull = DefaultFullOpts(2) -var FullNodeWithUpgradeAt = func(upgradeHeight abi.ChainEpoch) FullNodeOpts { +var FullNodeWithActorsV2At = func(upgradeHeight abi.ChainEpoch) FullNodeOpts { return FullNodeOpts{ Opts: func(nodes []TestNode) node.Option { return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{ @@ -122,6 +122,25 @@ var FullNodeWithUpgradeAt = func(upgradeHeight abi.ChainEpoch) FullNodeOpts { } } +var FullNodeWithSDRAt = func(calico, persian abi.ChainEpoch) FullNodeOpts { + return FullNodeOpts{ + Opts: func(nodes []TestNode) node.Option { + return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{ + Network: network.Version6, + Height: 1, + Migration: stmgr.UpgradeActorsV2, + }, { + Network: network.Version7, + Height: calico, + Migration: stmgr.UpgradeCalico, + }, { + Network: network.Version8, + Height: persian, + }}) + }, + } +} + var MineNext = miner.MineReq{ InjectNulls: 0, Done: func(bool, abi.ChainEpoch, error) {}, diff --git a/api/test/window_post.go b/api/test/window_post.go index 55fc4ad70..ff107ae8d 100644 --- a/api/test/window_post.go +++ b/api/test/window_post.go @@ -3,18 +3,22 @@ package test import ( "context" "fmt" + "sort" "sync/atomic" "strings" "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/extern/sector-storage/mock" sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" @@ -23,6 +27,90 @@ import ( "github.com/filecoin-project/lotus/node/impl" ) +func TestSDRUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + n, sn := b(t, []FullNodeOpts{FullNodeWithSDRAt(500, 1000)}, OneMiner) + client := n[0].FullNode.(*impl.FullNodeAPI) + miner := sn[0] + + addrinfo, err := client.NetAddrsListen(ctx) + if err != nil { + t.Fatal(err) + } + + if err := miner.NetConnect(ctx, addrinfo); err != nil { + t.Fatal(err) + } + build.Clock.Sleep(time.Second) + + pledge := make(chan struct{}) + mine := int64(1) + done := make(chan struct{}) + go func() { + defer close(done) + round := 0 + for atomic.LoadInt64(&mine) != 0 { + build.Clock.Sleep(blocktime) + if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) { + + }}); err != nil { + t.Error(err) + } + + // 3 sealing rounds: before, during after. + if round >= 3 { + continue + } + + head, err := client.ChainHead(ctx) + assert.NoError(t, err) + + // rounds happen every 100 blocks, with a 50 block offset. + if head.Height() >= abi.ChainEpoch(round*500+50) { + round++ + pledge <- struct{}{} + + ver, err := client.StateNetworkVersion(ctx, head.Key()) + assert.NoError(t, err) + switch round { + case 1: + assert.Equal(t, network.Version6, ver) + case 2: + assert.Equal(t, network.Version7, ver) + case 3: + assert.Equal(t, network.Version8, ver) + } + } + + } + }() + + // before. + pledgeSectors(t, ctx, miner, 9, 0, pledge) + + s, err := miner.SectorsList(ctx) + require.NoError(t, err) + sort.Slice(s, func(i, j int) bool { + return s[i] < s[j] + }) + + for i, id := range s { + info, err := miner.SectorsStatus(ctx, id, true) + require.NoError(t, err) + expectProof := abi.RegisteredSealProof_StackedDrg2KiBV1 + if i >= 3 { + // after + expectProof = abi.RegisteredSealProof_StackedDrg2KiBV1_1 + } + assert.Equal(t, expectProof, info.SealProof, "sector %d, id %d", i, id) + } + + atomic.StoreInt64(&mine, 0) + <-done +} + func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -63,11 +151,13 @@ func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSect func pledgeSectors(t *testing.T, ctx context.Context, miner TestStorageNode, n, existing int, blockNotif <-chan struct{}) { for i := 0; i < n; i++ { - err := miner.PledgeSector(ctx) - require.NoError(t, err) if i%3 == 0 && blockNotif != nil { <-blockNotif + log.Errorf("WAIT") } + log.Errorf("PLEDGING %d", i) + err := miner.PledgeSector(ctx) + require.NoError(t, err) } for { @@ -126,7 +216,7 @@ func testWindowPostUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration, ctx, cancel := context.WithCancel(context.Background()) defer cancel() - n, sn := b(t, []FullNodeOpts{FullNodeWithUpgradeAt(upgradeHeight)}, OneMiner) + n, sn := b(t, []FullNodeOpts{FullNodeWithActorsV2At(upgradeHeight)}, OneMiner) client := n[0].FullNode.(*impl.FullNodeAPI) miner := sn[0] @@ -209,15 +299,17 @@ func testWindowPostUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration, // Drop the partition err = secs.ForEach(func(sid uint64) error { - return miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkCorrupted(abi.SectorID{ - Miner: abi.ActorID(mid), - Number: abi.SectorNumber(sid), + return miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkCorrupted(storage.SectorRef{ + ID: abi.SectorID{ + Miner: abi.ActorID(mid), + Number: abi.SectorNumber(sid), + }, }, true) }) require.NoError(t, err) } - var s abi.SectorID + var s storage.SectorRef // Drop 1 sectors from deadline 3 partition 0 { @@ -238,9 +330,11 @@ func testWindowPostUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration, require.NoError(t, err) fmt.Println("the sectors", all) - s = abi.SectorID{ - Miner: abi.ActorID(mid), - Number: abi.SectorNumber(sn), + s = storage.SectorRef{ + ID: abi.SectorID{ + Miner: abi.ActorID(mid), + Number: abi.SectorNumber(sn), + }, } err = miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(s, true) diff --git a/build/params_2k.go b/build/params_2k.go index 5a0e8fd61..c86de7ffa 100644 --- a/build/params_2k.go +++ b/build/params_2k.go @@ -7,7 +7,6 @@ import ( "os" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/chain/actors/policy" ) @@ -22,7 +21,9 @@ const UpgradeTapeHeight = -4 var UpgradeActorsV2Height = abi.ChainEpoch(10) var UpgradeLiftoffHeight = abi.ChainEpoch(-5) -const UpgradeKumquatHeight = -6 +const UpgradeKumquatHeight = 15 +const UpgradeCalicoHeight = 20 +const UpgradePersianHeight = 25 var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ 0: DrandMainnet, @@ -53,3 +54,5 @@ const SlashablePowerDelay = 20 // Epochs const InteractivePoRepConfidence = 6 + +const BootstrapPeerThreshold = 1 diff --git a/build/params_mainnet.go b/build/params_mainnet.go index 94deedfec..425d23ac1 100644 --- a/build/params_mainnet.go +++ b/build/params_mainnet.go @@ -11,7 +11,6 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/chain/actors/policy" - builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" ) @@ -39,12 +38,11 @@ const UpgradeLiftoffHeight = 148888 const UpgradeKumquatHeight = 170000 +const UpgradeCalicoHeight = 265200 +const UpgradePersianHeight = UpgradeCalicoHeight + (builtin2.EpochsInHour * 60) + func init() { policy.SetConsensusMinerMinPower(abi.NewStoragePower(10 << 40)) - policy.SetSupportedProofTypes( - abi.RegisteredSealProof_StackedDrg32GiBV1, - abi.RegisteredSealProof_StackedDrg64GiBV1, - ) if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" { SetAddressNetwork(address.Mainnet) @@ -60,3 +58,5 @@ func init() { const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds) const PropagationDelaySecs = uint64(6) + +const BootstrapPeerThreshold = 4 diff --git a/build/params_shared_funcs.go b/build/params_shared_funcs.go index 77fd9256d..bcd82bb48 100644 --- a/build/params_shared_funcs.go +++ b/build/params_shared_funcs.go @@ -19,3 +19,12 @@ func DhtProtocolName(netName dtypes.NetworkName) protocol.ID { func SetAddressNetwork(n address.Network) { address.CurrentNetwork = n } + +func MustParseAddress(addr string) address.Address { + ret, err := address.NewFromString(addr) + if err != nil { + panic(err) + } + + return ret +} diff --git a/build/params_shared_vals.go b/build/params_shared_vals.go index 5070777bd..994c32934 100644 --- a/build/params_shared_vals.go +++ b/build/params_shared_vals.go @@ -25,7 +25,7 @@ const UnixfsLinksPerLevel = 1024 // Consensus / Network const AllowableClockDriftSecs = uint64(1) -const NewestNetworkVersion = network.Version6 +const NewestNetworkVersion = network.Version8 const ActorUpgradeNetworkVersion = network.Version4 // Epochs @@ -61,6 +61,9 @@ const TicketRandomnessLookback = abi.ChainEpoch(1) const AddressMainnetEnvVar = "_mainnet_" +// the 'f' prefix doesn't matter +var ZeroAddress = MustParseAddress("f3yaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaby2smx7a") + // ///// // Devnet settings diff --git a/build/params_testground.go b/build/params_testground.go index d9893a5f5..0ee986a7c 100644 --- a/build/params_testground.go +++ b/build/params_testground.go @@ -88,13 +88,18 @@ var ( UpgradeActorsV2Height abi.ChainEpoch = 10 UpgradeLiftoffHeight abi.ChainEpoch = -5 UpgradeKumquatHeight abi.ChainEpoch = -6 + UpgradeCalicoHeight abi.ChainEpoch = -7 + UpgradePersianHeight abi.ChainEpoch = -8 DrandSchedule = map[abi.ChainEpoch]DrandEnum{ 0: DrandMainnet, } - NewestNetworkVersion = network.Version5 + NewestNetworkVersion = network.Version8 ActorUpgradeNetworkVersion = network.Version4 - Devnet = true + Devnet = true + ZeroAddress = MustParseAddress("f3yaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaby2smx7a") ) + +const BootstrapPeerThreshold = 1 diff --git a/build/version.go b/build/version.go index 80977f2f1..1a6a4e5b7 100644 --- a/build/version.go +++ b/build/version.go @@ -29,7 +29,7 @@ func buildType() string { } // BuildVersion is the local build version, set by build system -const BuildVersion = "1.1.2" +const BuildVersion = "1.2.0" func UserVersion() string { return BuildVersion + buildType() + CurrentCommit @@ -83,9 +83,9 @@ func VersionForType(nodeType NodeType) (Version, error) { // semver versions of the rpc api exposed var ( - FullAPIVersion = newVer(0, 17, 0) - MinerAPIVersion = newVer(0, 17, 0) - WorkerAPIVersion = newVer(0, 16, 0) + FullAPIVersion = newVer(1, 0, 0) + MinerAPIVersion = newVer(1, 0, 0) + WorkerAPIVersion = newVer(1, 0, 0) ) //nolint:varcheck,deadcode diff --git a/chain/actors/builtin/init/diff.go b/chain/actors/builtin/init/diff.go new file mode 100644 index 000000000..593171322 --- /dev/null +++ b/chain/actors/builtin/init/diff.go @@ -0,0 +1,152 @@ +package init + +import ( + "bytes" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + typegen "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +func DiffAddressMap(pre, cur State) (*AddressMapChanges, error) { + prem, err := pre.addressMap() + if err != nil { + return nil, err + } + + curm, err := cur.addressMap() + if err != nil { + return nil, err + } + + preRoot, err := prem.Root() + if err != nil { + return nil, err + } + + curRoot, err := curm.Root() + if err != nil { + return nil, err + } + + results := new(AddressMapChanges) + // no change. + if curRoot.Equals(preRoot) { + return results, nil + } + + err = adt.DiffAdtMap(prem, curm, &addressMapDiffer{results, pre, cur}) + if err != nil { + return nil, err + } + + return results, nil +} + +type addressMapDiffer struct { + Results *AddressMapChanges + pre, adter State +} + +type AddressMapChanges struct { + Added []AddressPair + Modified []AddressChange + Removed []AddressPair +} + +func (i *addressMapDiffer) AsKey(key string) (abi.Keyer, error) { + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return nil, err + } + return abi.AddrKey(addr), nil +} + +func (i *addressMapDiffer) Add(key string, val *typegen.Deferred) error { + pkAddr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + id := new(typegen.CborInt) + if err := id.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return err + } + idAddr, err := address.NewIDAddress(uint64(*id)) + if err != nil { + return err + } + i.Results.Added = append(i.Results.Added, AddressPair{ + ID: idAddr, + PK: pkAddr, + }) + return nil +} + +func (i *addressMapDiffer) Modify(key string, from, to *typegen.Deferred) error { + pkAddr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + + fromID := new(typegen.CborInt) + if err := fromID.UnmarshalCBOR(bytes.NewReader(from.Raw)); err != nil { + return err + } + fromIDAddr, err := address.NewIDAddress(uint64(*fromID)) + if err != nil { + return err + } + + toID := new(typegen.CborInt) + if err := toID.UnmarshalCBOR(bytes.NewReader(to.Raw)); err != nil { + return err + } + toIDAddr, err := address.NewIDAddress(uint64(*toID)) + if err != nil { + return err + } + + i.Results.Modified = append(i.Results.Modified, AddressChange{ + From: AddressPair{ + ID: fromIDAddr, + PK: pkAddr, + }, + To: AddressPair{ + ID: toIDAddr, + PK: pkAddr, + }, + }) + return nil +} + +func (i *addressMapDiffer) Remove(key string, val *typegen.Deferred) error { + pkAddr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + id := new(typegen.CborInt) + if err := id.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return err + } + idAddr, err := address.NewIDAddress(uint64(*id)) + if err != nil { + return err + } + i.Results.Removed = append(i.Results.Removed, AddressPair{ + ID: idAddr, + PK: pkAddr, + }) + return nil +} + +type AddressChange struct { + From AddressPair + To AddressPair +} + +type AddressPair struct { + ID address.Address + PK address.Address +} diff --git a/chain/actors/builtin/init/init.go b/chain/actors/builtin/init/init.go index 60dbdf4fe..466af6a6c 100644 --- a/chain/actors/builtin/init/init.go +++ b/chain/actors/builtin/init/init.go @@ -57,4 +57,6 @@ type State interface { // Sets the network's name. This should only be used on upgrade/fork. SetNetworkName(name string) error + + addressMap() (adt.Map, error) } diff --git a/chain/actors/builtin/init/v0.go b/chain/actors/builtin/init/v0.go index ceb87f970..c019705b1 100644 --- a/chain/actors/builtin/init/v0.go +++ b/chain/actors/builtin/init/v0.go @@ -79,3 +79,7 @@ func (s *state0) Remove(addrs ...address.Address) (err error) { s.State.AddressMap = amr return nil } + +func (s *state0) addressMap() (adt.Map, error) { + return adt0.AsMap(s.store, s.AddressMap) +} diff --git a/chain/actors/builtin/init/v2.go b/chain/actors/builtin/init/v2.go index 5aa0ddc18..420243be4 100644 --- a/chain/actors/builtin/init/v2.go +++ b/chain/actors/builtin/init/v2.go @@ -79,3 +79,7 @@ func (s *state2) Remove(addrs ...address.Address) (err error) { s.State.AddressMap = amr return nil } + +func (s *state2) addressMap() (adt.Map, error) { + return adt2.AsMap(s.store, s.AddressMap) +} diff --git a/chain/actors/builtin/market/market.go b/chain/actors/builtin/market/market.go index 195ca40b9..8bb31f2b4 100644 --- a/chain/actors/builtin/market/market.go +++ b/chain/actors/builtin/market/market.go @@ -81,6 +81,7 @@ type DealProposals interface { type PublishStorageDealsParams = market0.PublishStorageDealsParams type PublishStorageDealsReturn = market0.PublishStorageDealsReturn type VerifyDealsForActivationParams = market0.VerifyDealsForActivationParams +type WithdrawBalanceParams = market0.WithdrawBalanceParams type ClientDealProposal = market0.ClientDealProposal diff --git a/chain/actors/builtin/miner/utils.go b/chain/actors/builtin/miner/utils.go index f9c6b3da3..2f24e8454 100644 --- a/chain/actors/builtin/miner/utils.go +++ b/chain/actors/builtin/miner/utils.go @@ -4,6 +4,8 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" ) func AllPartSectors(mas State, sget func(Partition) (bitfield.BitField, error)) (bitfield.BitField, error) { @@ -26,3 +28,42 @@ func AllPartSectors(mas State, sget func(Partition) (bitfield.BitField, error)) return bitfield.MultiMerge(parts...) } + +// SealProofTypeFromSectorSize returns preferred seal proof type for creating +// new miner actors and new sectors +func SealProofTypeFromSectorSize(ssize abi.SectorSize, nv network.Version) (abi.RegisteredSealProof, error) { + switch { + case nv < network.Version7: + switch ssize { + case 2 << 10: + return abi.RegisteredSealProof_StackedDrg2KiBV1, nil + case 8 << 20: + return abi.RegisteredSealProof_StackedDrg8MiBV1, nil + case 512 << 20: + return abi.RegisteredSealProof_StackedDrg512MiBV1, nil + case 32 << 30: + return abi.RegisteredSealProof_StackedDrg32GiBV1, nil + case 64 << 30: + return abi.RegisteredSealProof_StackedDrg64GiBV1, nil + default: + return 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize) + } + case nv >= network.Version7: + switch ssize { + case 2 << 10: + return abi.RegisteredSealProof_StackedDrg2KiBV1_1, nil + case 8 << 20: + return abi.RegisteredSealProof_StackedDrg8MiBV1_1, nil + case 512 << 20: + return abi.RegisteredSealProof_StackedDrg512MiBV1_1, nil + case 32 << 30: + return abi.RegisteredSealProof_StackedDrg32GiBV1_1, nil + case 64 << 30: + return abi.RegisteredSealProof_StackedDrg64GiBV1_1, nil + default: + return 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize) + } + } + + return 0, xerrors.Errorf("unsupported network version") +} diff --git a/chain/actors/builtin/multisig/diff.go b/chain/actors/builtin/multisig/diff.go new file mode 100644 index 000000000..680d0870a --- /dev/null +++ b/chain/actors/builtin/multisig/diff.go @@ -0,0 +1,134 @@ +package multisig + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +type PendingTransactionChanges struct { + Added []TransactionChange + Modified []TransactionModification + Removed []TransactionChange +} + +type TransactionChange struct { + TxID int64 + Tx Transaction +} + +type TransactionModification struct { + TxID int64 + From Transaction + To Transaction +} + +func DiffPendingTransactions(pre, cur State) (*PendingTransactionChanges, error) { + results := new(PendingTransactionChanges) + if changed, err := pre.PendingTxnChanged(cur); err != nil { + return nil, err + } else if !changed { // if nothing has changed then return an empty result and bail. + return results, nil + } + + pret, err := pre.transactions() + if err != nil { + return nil, err + } + + curt, err := cur.transactions() + if err != nil { + return nil, err + } + + if err := adt.DiffAdtMap(pret, curt, &transactionDiffer{results, pre, cur}); err != nil { + return nil, err + } + return results, nil +} + +type transactionDiffer struct { + Results *PendingTransactionChanges + pre, after State +} + +func (t *transactionDiffer) AsKey(key string) (abi.Keyer, error) { + txID, err := abi.ParseIntKey(key) + if err != nil { + return nil, err + } + return abi.IntKey(txID), nil +} + +func (t *transactionDiffer) Add(key string, val *cbg.Deferred) error { + txID, err := abi.ParseIntKey(key) + if err != nil { + return err + } + tx, err := t.after.decodeTransaction(val) + if err != nil { + return err + } + t.Results.Added = append(t.Results.Added, TransactionChange{ + TxID: txID, + Tx: tx, + }) + return nil +} + +func (t *transactionDiffer) Modify(key string, from, to *cbg.Deferred) error { + txID, err := abi.ParseIntKey(key) + if err != nil { + return err + } + + txFrom, err := t.pre.decodeTransaction(from) + if err != nil { + return err + } + + txTo, err := t.after.decodeTransaction(to) + if err != nil { + return err + } + + if approvalsChanged(txFrom.Approved, txTo.Approved) { + t.Results.Modified = append(t.Results.Modified, TransactionModification{ + TxID: txID, + From: txFrom, + To: txTo, + }) + } + + return nil +} + +func approvalsChanged(from, to []address.Address) bool { + if len(from) != len(to) { + return true + } + for idx := range from { + if from[idx] != to[idx] { + return true + } + } + return false +} + +func (t *transactionDiffer) Remove(key string, val *cbg.Deferred) error { + txID, err := abi.ParseIntKey(key) + if err != nil { + return err + } + tx, err := t.pre.decodeTransaction(val) + if err != nil { + return err + } + t.Results.Removed = append(t.Results.Removed, TransactionChange{ + TxID: txID, + Tx: tx, + }) + return nil +} diff --git a/chain/actors/builtin/multisig/state.go b/chain/actors/builtin/multisig/state.go index 89a7eedad..fea42ba5f 100644 --- a/chain/actors/builtin/multisig/state.go +++ b/chain/actors/builtin/multisig/state.go @@ -1,6 +1,7 @@ package multisig import ( + cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" @@ -47,6 +48,10 @@ type State interface { Signers() ([]address.Address, error) ForEachPendingTxn(func(id int64, txn Transaction) error) error + PendingTxnChanged(State) (bool, error) + + transactions() (adt.Map, error) + decodeTransaction(val *cbg.Deferred) (Transaction, error) } type Transaction = msig0.Transaction diff --git a/chain/actors/builtin/multisig/state0.go b/chain/actors/builtin/multisig/state0.go index c934343e7..e6f9a9c36 100644 --- a/chain/actors/builtin/multisig/state0.go +++ b/chain/actors/builtin/multisig/state0.go @@ -1,17 +1,20 @@ package multisig import ( + "bytes" "encoding/binary" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" "github.com/filecoin-project/lotus/chain/actors/adt" msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" + multisig0 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig" ) var _ State = (*state0)(nil) @@ -68,3 +71,24 @@ func (s *state0) ForEachPendingTxn(cb func(id int64, txn Transaction) error) err return cb(txid, (Transaction)(out)) }) } + +func (s *state0) PendingTxnChanged(other State) (bool, error) { + other0, ok := other.(*state0) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.PendingTxns.Equals(other0.PendingTxns), nil +} + +func (s *state0) transactions() (adt.Map, error) { + return adt0.AsMap(s.store, s.PendingTxns) +} + +func (s *state0) decodeTransaction(val *cbg.Deferred) (Transaction, error) { + var tx multisig0.Transaction + if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Transaction{}, err + } + return tx, nil +} diff --git a/chain/actors/builtin/multisig/state2.go b/chain/actors/builtin/multisig/state2.go index a78b07d55..628da3f2c 100644 --- a/chain/actors/builtin/multisig/state2.go +++ b/chain/actors/builtin/multisig/state2.go @@ -1,11 +1,13 @@ package multisig import ( + "bytes" "encoding/binary" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" "github.com/filecoin-project/lotus/chain/actors/adt" @@ -68,3 +70,24 @@ func (s *state2) ForEachPendingTxn(cb func(id int64, txn Transaction) error) err return cb(txid, (Transaction)(out)) }) } + +func (s *state2) PendingTxnChanged(other State) (bool, error) { + other2, ok := other.(*state2) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.PendingTxns.Equals(other2.PendingTxns), nil +} + +func (s *state2) transactions() (adt.Map, error) { + return adt2.AsMap(s.store, s.PendingTxns) +} + +func (s *state2) decodeTransaction(val *cbg.Deferred) (Transaction, error) { + var tx msig2.Transaction + if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Transaction{}, err + } + return tx, nil +} diff --git a/chain/actors/builtin/reward/v2.go b/chain/actors/builtin/reward/v2.go index b7cb49102..c9a591532 100644 --- a/chain/actors/builtin/reward/v2.go +++ b/chain/actors/builtin/reward/v2.go @@ -28,7 +28,7 @@ type state2 struct { store adt.Store } -func (s *state2) ThisEpochReward() (abi.StoragePower, error) { +func (s *state2) ThisEpochReward() (abi.TokenAmount, error) { return s.State.ThisEpochReward, nil } @@ -55,11 +55,11 @@ func (s *state2) EffectiveNetworkTime() (abi.ChainEpoch, error) { return s.State.EffectiveNetworkTime, nil } -func (s *state2) CumsumBaseline() (abi.StoragePower, error) { +func (s *state2) CumsumBaseline() (reward2.Spacetime, error) { return s.State.CumsumBaseline, nil } -func (s *state2) CumsumRealized() (abi.StoragePower, error) { +func (s *state2) CumsumRealized() (reward2.Spacetime, error) { return s.State.CumsumRealized, nil } diff --git a/chain/actors/params.go b/chain/actors/params.go index e14dcafc9..6dc0b1084 100644 --- a/chain/actors/params.go +++ b/chain/actors/params.go @@ -3,6 +3,8 @@ package actors import ( "bytes" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/lotus/chain/actors/aerrors" cbg "github.com/whyrusleeping/cbor-gen" ) @@ -11,7 +13,7 @@ func SerializeParams(i cbg.CBORMarshaler) ([]byte, aerrors.ActorError) { buf := new(bytes.Buffer) if err := i.MarshalCBOR(buf); err != nil { // TODO: shouldnt this be a fatal error? - return nil, aerrors.Absorb(err, 1, "failed to encode parameter") + return nil, aerrors.Absorb(err, exitcode.ErrSerialization, "failed to encode parameter") } return buf.Bytes(), nil } diff --git a/chain/actors/policy/policy.go b/chain/actors/policy/policy.go index c1a971db5..a19a43aaa 100644 --- a/chain/actors/policy/policy.go +++ b/chain/actors/policy/policy.go @@ -26,22 +26,29 @@ const ( // SetSupportedProofTypes sets supported proof types, across all actor versions. // This should only be used for testing. func SetSupportedProofTypes(types ...abi.RegisteredSealProof) { - newTypes := make(map[abi.RegisteredSealProof]struct{}, len(types)) - for _, t := range types { - newTypes[t] = struct{}{} - } - // Set for all miner versions. - miner0.SupportedProofTypes = newTypes - miner2.SupportedProofTypes = newTypes + miner0.SupportedProofTypes = make(map[abi.RegisteredSealProof]struct{}, len(types)) + miner2.PreCommitSealProofTypesV0 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + miner2.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + miner2.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + + AddSupportedProofTypes(types...) } // AddSupportedProofTypes sets supported proof types, across all actor versions. // This should only be used for testing. func AddSupportedProofTypes(types ...abi.RegisteredSealProof) { for _, t := range types { + if t >= abi.RegisteredSealProof_StackedDrg2KiBV1_1 { + panic("must specify v1 proof types only") + } // Set for all miner versions. miner0.SupportedProofTypes[t] = struct{}{} - miner2.SupportedProofTypes[t] = struct{}{} + miner2.PreCommitSealProofTypesV0[t] = struct{}{} + + miner2.PreCommitSealProofTypesV7[t] = struct{}{} + miner2.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} + + miner2.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} } } @@ -133,9 +140,9 @@ func GetMaxPoStPartitions(p abi.RegisteredPoStProof) (int, error) { } func GetDefaultSectorSize() abi.SectorSize { - // supported proof types are the same across versions. - szs := make([]abi.SectorSize, 0, len(miner2.SupportedProofTypes)) - for spt := range miner2.SupportedProofTypes { + // supported sector sizes are the same across versions. + szs := make([]abi.SectorSize, 0, len(miner2.PreCommitSealProofTypesV8)) + for spt := range miner2.PreCommitSealProofTypesV8 { ss, err := spt.SectorSize() if err != nil { panic(err) diff --git a/chain/actors/policy/policy_test.go b/chain/actors/policy/policy_test.go index af600cc75..24e47aaa0 100644 --- a/chain/actors/policy/policy_test.go +++ b/chain/actors/policy/policy_test.go @@ -44,7 +44,7 @@ func TestSupportedProofTypes(t *testing.T) { // Tests assumptions about policies being the same between actor versions. func TestAssumptions(t *testing.T) { - require.EqualValues(t, miner0.SupportedProofTypes, miner2.SupportedProofTypes) + require.EqualValues(t, miner0.SupportedProofTypes, miner2.PreCommitSealProofTypesV0) require.Equal(t, miner0.PreCommitChallengeDelay, miner2.PreCommitChallengeDelay) require.Equal(t, miner0.MaxSectorExpirationExtension, miner2.MaxSectorExpirationExtension) require.Equal(t, miner0.ChainFinality, miner2.ChainFinality) @@ -57,10 +57,10 @@ func TestAssumptions(t *testing.T) { } func TestPartitionSizes(t *testing.T) { - for p := range abi.PoStSealProofTypes { - sizeNew, err := builtin2.PoStProofWindowPoStPartitionSectors(p) + for _, p := range abi.SealProofInfos { + sizeNew, err := builtin2.PoStProofWindowPoStPartitionSectors(p.WindowPoStProof) require.NoError(t, err) - sizeOld, err := builtin0.PoStProofWindowPoStPartitionSectors(p) + sizeOld, err := builtin0.PoStProofWindowPoStPartitionSectors(p.WindowPoStProof) if err != nil { // new proof type. continue diff --git a/chain/actors/version.go b/chain/actors/version.go index fe16d521e..1cafa45c9 100644 --- a/chain/actors/version.go +++ b/chain/actors/version.go @@ -18,7 +18,7 @@ func VersionForNetwork(version network.Version) Version { switch version { case network.Version0, network.Version1, network.Version2, network.Version3: return Version0 - case network.Version4, network.Version5, network.Version6: + case network.Version4, network.Version5, network.Version6, network.Version7, network.Version8: return Version2 default: panic(fmt.Sprintf("unsupported network version %d", version)) diff --git a/chain/events/state/fastapi.go b/chain/events/state/fastapi.go new file mode 100644 index 000000000..9375d9d78 --- /dev/null +++ b/chain/events/state/fastapi.go @@ -0,0 +1,34 @@ +package state + +import ( + "context" + + "github.com/filecoin-project/go-address" + + "github.com/filecoin-project/lotus/chain/types" +) + +type FastChainApiAPI interface { + ChainAPI + + ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) +} + +type fastAPI struct { + FastChainApiAPI +} + +func WrapFastAPI(api FastChainApiAPI) ChainAPI { + return &fastAPI{ + api, + } +} + +func (a *fastAPI) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) { + ts, err := a.FastChainApiAPI.ChainGetTipSet(ctx, tsk) + if err != nil { + return nil, err + } + + return a.FastChainApiAPI.StateGetActor(ctx, actor, ts.Parents()) +} diff --git a/chain/events/state/predicates.go b/chain/events/state/predicates.go index 99b8480dc..551b776c2 100644 --- a/chain/events/state/predicates.go +++ b/chain/events/state/predicates.go @@ -1,7 +1,6 @@ package state import ( - "bytes" "context" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" @@ -10,7 +9,6 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" cbor "github.com/ipfs/go-ipld-cbor" - typegen "github.com/whyrusleeping/cbor-gen" "github.com/filecoin-project/lotus/api/apibstore" "github.com/filecoin-project/lotus/chain/actors/adt" @@ -419,179 +417,17 @@ type AddressPair struct { PK address.Address } -type InitActorAddressChanges struct { - Added []AddressPair - Modified []AddressChange - Removed []AddressPair -} - -type AddressChange struct { - From AddressPair - To AddressPair -} - type DiffInitActorStateFunc func(ctx context.Context, oldState init_.State, newState init_.State) (changed bool, user UserData, err error) -func (i *InitActorAddressChanges) AsKey(key string) (abi.Keyer, error) { - addr, err := address.NewFromBytes([]byte(key)) - if err != nil { - return nil, err - } - return abi.AddrKey(addr), nil -} - -func (i *InitActorAddressChanges) Add(key string, val *typegen.Deferred) error { - pkAddr, err := address.NewFromBytes([]byte(key)) - if err != nil { - return err - } - id := new(typegen.CborInt) - if err := id.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { - return err - } - idAddr, err := address.NewIDAddress(uint64(*id)) - if err != nil { - return err - } - i.Added = append(i.Added, AddressPair{ - ID: idAddr, - PK: pkAddr, - }) - return nil -} - -func (i *InitActorAddressChanges) Modify(key string, from, to *typegen.Deferred) error { - pkAddr, err := address.NewFromBytes([]byte(key)) - if err != nil { - return err - } - - fromID := new(typegen.CborInt) - if err := fromID.UnmarshalCBOR(bytes.NewReader(from.Raw)); err != nil { - return err - } - fromIDAddr, err := address.NewIDAddress(uint64(*fromID)) - if err != nil { - return err - } - - toID := new(typegen.CborInt) - if err := toID.UnmarshalCBOR(bytes.NewReader(to.Raw)); err != nil { - return err - } - toIDAddr, err := address.NewIDAddress(uint64(*toID)) - if err != nil { - return err - } - - i.Modified = append(i.Modified, AddressChange{ - From: AddressPair{ - ID: fromIDAddr, - PK: pkAddr, - }, - To: AddressPair{ - ID: toIDAddr, - PK: pkAddr, - }, - }) - return nil -} - -func (i *InitActorAddressChanges) Remove(key string, val *typegen.Deferred) error { - pkAddr, err := address.NewFromBytes([]byte(key)) - if err != nil { - return err - } - id := new(typegen.CborInt) - if err := id.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { - return err - } - idAddr, err := address.NewIDAddress(uint64(*id)) - if err != nil { - return err - } - i.Removed = append(i.Removed, AddressPair{ - ID: idAddr, - PK: pkAddr, - }) - return nil -} - func (sp *StatePredicates) OnAddressMapChange() DiffInitActorStateFunc { return func(ctx context.Context, oldState, newState init_.State) (changed bool, user UserData, err error) { - addressChanges := &InitActorAddressChanges{ - Added: []AddressPair{}, - Modified: []AddressChange{}, - Removed: []AddressPair{}, - } - - err = oldState.ForEachActor(func(oldId abi.ActorID, oldAddress address.Address) error { - oldIdAddress, err := address.NewIDAddress(uint64(oldId)) - if err != nil { - return err - } - - newIdAddress, found, err := newState.ResolveAddress(oldAddress) - if err != nil { - return err - } - - if !found { - addressChanges.Removed = append(addressChanges.Removed, AddressPair{ - ID: oldIdAddress, - PK: oldAddress, - }) - } - - if oldIdAddress != newIdAddress { - addressChanges.Modified = append(addressChanges.Modified, AddressChange{ - From: AddressPair{ - ID: oldIdAddress, - PK: oldAddress, - }, - To: AddressPair{ - ID: newIdAddress, - PK: oldAddress, - }, - }) - } - - return nil - }) - + addressChanges, err := init_.DiffAddressMap(oldState, newState) if err != nil { return false, nil, err } - - err = newState.ForEachActor(func(newId abi.ActorID, newAddress address.Address) error { - newIdAddress, err := address.NewIDAddress(uint64(newId)) - if err != nil { - return err - } - - _, found, err := newState.ResolveAddress(newAddress) - if err != nil { - return err - } - - if !found { - addressChanges.Added = append(addressChanges.Added, AddressPair{ - ID: newIdAddress, - PK: newAddress, - }) - } - - return nil - }) - - if err != nil { - return false, nil, err - } - - if len(addressChanges.Added)+len(addressChanges.Removed)+len(addressChanges.Modified) == 0 { + if len(addressChanges.Added)+len(addressChanges.Modified)+len(addressChanges.Removed) == 0 { return false, nil, nil } - return true, addressChanges, nil } } diff --git a/chain/exchange/client.go b/chain/exchange/client.go index cb030bcf7..fa9ed2974 100644 --- a/chain/exchange/client.go +++ b/chain/exchange/client.go @@ -7,7 +7,6 @@ import ( "math/rand" "time" - "github.com/libp2p/go-libp2p-core/helpers" "github.com/libp2p/go-libp2p-core/host" "github.com/libp2p/go-libp2p-core/network" "github.com/libp2p/go-libp2p-core/peer" @@ -412,11 +411,7 @@ func (c *client) sendRequestToPeer(ctx context.Context, peer peer.ID, req *Reque return nil, xerrors.Errorf("failed to open stream to peer: %w", err) } - defer func() { - // Note: this will become just stream.Close once we've completed the go-libp2p migration to - // go-libp2p-core 0.7.0 - go helpers.FullClose(stream) //nolint:errcheck - }() + defer stream.Close() //nolint:errcheck // Write request. _ = stream.SetWriteDeadline(time.Now().Add(WriteReqDeadline)) diff --git a/chain/exchange/server.go b/chain/exchange/server.go index dcdb5b3a5..31eec46ca 100644 --- a/chain/exchange/server.go +++ b/chain/exchange/server.go @@ -15,7 +15,6 @@ import ( "github.com/filecoin-project/lotus/chain/types" "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p-core/helpers" inet "github.com/libp2p/go-libp2p-core/network" ) @@ -40,16 +39,14 @@ func (s *server) HandleStream(stream inet.Stream) { ctx, span := trace.StartSpan(context.Background(), "chainxchg.HandleStream") defer span.End() - // Note: this will become just stream.Close once we've completed the go-libp2p migration to - // go-libp2p-core 0.7.0 - defer helpers.FullClose(stream) //nolint:errcheck + defer stream.Close() //nolint:errcheck var req Request if err := cborutil.ReadCborRPC(bufio.NewReader(stream), &req); err != nil { log.Warnf("failed to read block sync request: %s", err) return } - log.Infow("block sync request", + log.Debugw("block sync request", "start", req.Head, "len", req.Length) resp, err := s.processRequest(ctx, &req) diff --git a/chain/gen/gen.go b/chain/gen/gen.go index d56f285a0..ef717dc75 100644 --- a/chain/gen/gen.go +++ b/chain/gen/gen.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "fmt" + "io" "io/ioutil" "sync/atomic" "time" @@ -138,12 +139,20 @@ func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) { return nil, xerrors.Errorf("failed to get metadata datastore: %w", err) } - bds, err := lr.Datastore("/chain") + bs, err := lr.Blockstore(repo.BlockstoreChain) if err != nil { - return nil, xerrors.Errorf("failed to get blocks datastore: %w", err) + return nil, err } - bs := mybs{blockstore.NewBlockstore(bds)} + defer func() { + if c, ok := bs.(io.Closer); ok { + if err := c.Close(); err != nil { + log.Warnf("failed to close blockstore: %s", err) + } + } + }() + + bs = mybs{bs} ks, err := lr.KeyStore() if err != nil { @@ -236,7 +245,7 @@ func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) { return nil, xerrors.Errorf("make genesis block failed: %w", err) } - cs := store.NewChainStore(bs, ds, sys, j) + cs := store.NewChainStore(bs, bs, ds, sys, j) genfb := &types.FullBlock{Header: genb.Genesis} gents := store.NewFullTipSet([]*types.FullBlock{genfb}) diff --git a/chain/gen/genesis/t00_system.go b/chain/gen/genesis/f00_system.go similarity index 100% rename from chain/gen/genesis/t00_system.go rename to chain/gen/genesis/f00_system.go diff --git a/chain/gen/genesis/t01_init.go b/chain/gen/genesis/f01_init.go similarity index 100% rename from chain/gen/genesis/t01_init.go rename to chain/gen/genesis/f01_init.go diff --git a/chain/gen/genesis/t02_reward.go b/chain/gen/genesis/f02_reward.go similarity index 100% rename from chain/gen/genesis/t02_reward.go rename to chain/gen/genesis/f02_reward.go diff --git a/chain/gen/genesis/t03_cron.go b/chain/gen/genesis/f03_cron.go similarity index 100% rename from chain/gen/genesis/t03_cron.go rename to chain/gen/genesis/f03_cron.go diff --git a/chain/gen/genesis/t04_power.go b/chain/gen/genesis/f04_power.go similarity index 100% rename from chain/gen/genesis/t04_power.go rename to chain/gen/genesis/f04_power.go diff --git a/chain/gen/genesis/t05_market.go b/chain/gen/genesis/f05_market.go similarity index 100% rename from chain/gen/genesis/t05_market.go rename to chain/gen/genesis/f05_market.go diff --git a/chain/gen/genesis/t06_vreg.go b/chain/gen/genesis/f06_vreg.go similarity index 100% rename from chain/gen/genesis/t06_vreg.go rename to chain/gen/genesis/f06_vreg.go diff --git a/chain/gen/genesis/genesis.go b/chain/gen/genesis/genesis.go index 6a1090784..e441af7ae 100644 --- a/chain/gen/genesis/genesis.go +++ b/chain/gen/genesis/genesis.go @@ -482,7 +482,7 @@ func MakeGenesisBlock(ctx context.Context, j journal.Journal, bs bstore.Blocksto } // temp chainstore - cs := store.NewChainStore(bs, datastore.NewMapDatastore(), sys, j) + cs := store.NewChainStore(bs, bs, datastore.NewMapDatastore(), sys, j) // Verify PreSealed Data stateroot, err = VerifyPreSealedData(ctx, cs, stateroot, template, keyIDs) diff --git a/chain/gen/genesis/miners.go b/chain/gen/genesis/miners.go index be83a8711..850c2f39f 100644 --- a/chain/gen/genesis/miners.go +++ b/chain/gen/genesis/miners.go @@ -23,8 +23,6 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" - builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" power0 "github.com/filecoin-project/specs-actors/actors/builtin/power" @@ -101,7 +99,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid i := i m := m - spt, err := ffiwrapper.SealProofTypeFromSectorSize(m.SectorSize) + spt, err := miner.SealProofTypeFromSectorSize(m.SectorSize, GenesisNetworkVersion) if err != nil { return cid.Undef, err } diff --git a/chain/market/cbor_gen.go b/chain/market/cbor_gen.go new file mode 100644 index 000000000..397b5477a --- /dev/null +++ b/chain/market/cbor_gen.go @@ -0,0 +1,112 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package market + +import ( + "fmt" + "io" + + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf + +var lengthBufFundedAddressState = []byte{131} + +func (t *FundedAddressState) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufFundedAddressState); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Addr (address.Address) (struct) + if err := t.Addr.MarshalCBOR(w); err != nil { + return err + } + + // t.AmtReserved (big.Int) (struct) + if err := t.AmtReserved.MarshalCBOR(w); err != nil { + return err + } + + // t.MsgCid (cid.Cid) (struct) + + if t.MsgCid == nil { + if _, err := w.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCidBuf(scratch, w, *t.MsgCid); err != nil { + return xerrors.Errorf("failed to write cid field t.MsgCid: %w", err) + } + } + + return nil +} + +func (t *FundedAddressState) UnmarshalCBOR(r io.Reader) error { + *t = FundedAddressState{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Addr (address.Address) (struct) + + { + + if err := t.Addr.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Addr: %w", err) + } + + } + // t.AmtReserved (big.Int) (struct) + + { + + if err := t.AmtReserved.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.AmtReserved: %w", err) + } + + } + // t.MsgCid (cid.Cid) (struct) + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.MsgCid: %w", err) + } + + t.MsgCid = &c + } + + } + return nil +} diff --git a/chain/market/fundmanager.go b/chain/market/fundmanager.go new file mode 100644 index 000000000..5df7589fa --- /dev/null +++ b/chain/market/fundmanager.go @@ -0,0 +1,707 @@ +package market + +import ( + "context" + "sync" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/node/impl/full" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + logging "github.com/ipfs/go-log" + "go.uber.org/fx" + "golang.org/x/xerrors" +) + +var log = logging.Logger("market_adapter") + +// API is the fx dependencies need to run a fund manager +type FundManagerAPI struct { + fx.In + + full.StateAPI + full.MpoolAPI +} + +// fundManagerAPI is the specific methods called by the FundManager +// (used by the tests) +type fundManagerAPI interface { + MpoolPushMessage(context.Context, *types.Message, *api.MessageSendSpec) (*types.SignedMessage, error) + StateMarketBalance(context.Context, address.Address, types.TipSetKey) (api.MarketBalance, error) + StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64) (*api.MsgLookup, error) +} + +// FundManager keeps track of funds in a set of addresses +type FundManager struct { + ctx context.Context + shutdown context.CancelFunc + api fundManagerAPI + str *Store + + lk sync.Mutex + fundedAddrs map[address.Address]*fundedAddress +} + +func NewFundManager(lc fx.Lifecycle, api FundManagerAPI, ds dtypes.MetadataDS) *FundManager { + fm := newFundManager(&api, ds) + lc.Append(fx.Hook{ + OnStart: func(ctx context.Context) error { + return fm.Start() + }, + OnStop: func(ctx context.Context) error { + fm.Stop() + return nil + }, + }) + return fm +} + +// newFundManager is used by the tests +func newFundManager(api fundManagerAPI, ds datastore.Batching) *FundManager { + ctx, cancel := context.WithCancel(context.Background()) + return &FundManager{ + ctx: ctx, + shutdown: cancel, + api: api, + str: newStore(ds), + fundedAddrs: make(map[address.Address]*fundedAddress), + } +} + +func (fm *FundManager) Stop() { + fm.shutdown() +} + +func (fm *FundManager) Start() error { + fm.lk.Lock() + defer fm.lk.Unlock() + + // TODO: + // To save memory: + // - in State() only load addresses with in-progress messages + // - load the others just-in-time from getFundedAddress + // - delete(fm.fundedAddrs, addr) when the queue has been processed + return fm.str.forEach(func(state *FundedAddressState) { + fa := newFundedAddress(fm, state.Addr) + fa.state = state + fm.fundedAddrs[fa.state.Addr] = fa + fa.start() + }) +} + +// Creates a fundedAddress if it doesn't already exist, and returns it +func (fm *FundManager) getFundedAddress(addr address.Address) *fundedAddress { + fm.lk.Lock() + defer fm.lk.Unlock() + + fa, ok := fm.fundedAddrs[addr] + if !ok { + fa = newFundedAddress(fm, addr) + fm.fundedAddrs[addr] = fa + } + return fa +} + +// Reserve adds amt to `reserved`. If there are not enough available funds for +// the address, submits a message on chain to top up available funds. +// Returns the cid of the message that was submitted on chain, or cid.Undef if +// the required funds were already available. +func (fm *FundManager) Reserve(ctx context.Context, wallet, addr address.Address, amt abi.TokenAmount) (cid.Cid, error) { + return fm.getFundedAddress(addr).reserve(ctx, wallet, amt) +} + +// Subtract from `reserved`. +func (fm *FundManager) Release(addr address.Address, amt abi.TokenAmount) error { + return fm.getFundedAddress(addr).release(amt) +} + +// Withdraw unreserved funds. Only succeeds if there are enough unreserved +// funds for the address. +// Returns the cid of the message that was submitted on chain. +func (fm *FundManager) Withdraw(ctx context.Context, wallet, addr address.Address, amt abi.TokenAmount) (cid.Cid, error) { + return fm.getFundedAddress(addr).withdraw(ctx, wallet, amt) +} + +// FundedAddressState keeps track of the state of an address with funds in the +// datastore +type FundedAddressState struct { + Addr address.Address + // AmtReserved is the amount that must be kept in the address (cannot be + // withdrawn) + AmtReserved abi.TokenAmount + // MsgCid is the cid of an in-progress on-chain message + MsgCid *cid.Cid +} + +// fundedAddress keeps track of the state and request queues for a +// particular address +type fundedAddress struct { + ctx context.Context + env *fundManagerEnvironment + str *Store + + lk sync.Mutex + state *FundedAddressState + + // Note: These request queues are ephemeral, they are not saved to store + reservations []*fundRequest + releases []*fundRequest + withdrawals []*fundRequest + + // Used by the tests + onProcessStartListener func() bool +} + +func newFundedAddress(fm *FundManager, addr address.Address) *fundedAddress { + return &fundedAddress{ + ctx: fm.ctx, + env: &fundManagerEnvironment{api: fm.api}, + str: fm.str, + state: &FundedAddressState{ + Addr: addr, + AmtReserved: abi.NewTokenAmount(0), + }, + } +} + +// If there is an in-progress on-chain message, don't submit any more messages +// on chain until it completes +func (a *fundedAddress) start() { + a.lk.Lock() + defer a.lk.Unlock() + + if a.state.MsgCid != nil { + a.debugf("restart: wait for %s", a.state.MsgCid) + a.startWaitForResults(*a.state.MsgCid) + } +} + +func (a *fundedAddress) reserve(ctx context.Context, wallet address.Address, amt abi.TokenAmount) (cid.Cid, error) { + return a.requestAndWait(ctx, wallet, amt, &a.reservations) +} + +func (a *fundedAddress) release(amt abi.TokenAmount) error { + _, err := a.requestAndWait(context.Background(), address.Undef, amt, &a.releases) + return err +} + +func (a *fundedAddress) withdraw(ctx context.Context, wallet address.Address, amt abi.TokenAmount) (cid.Cid, error) { + return a.requestAndWait(ctx, wallet, amt, &a.withdrawals) +} + +func (a *fundedAddress) requestAndWait(ctx context.Context, wallet address.Address, amt abi.TokenAmount, reqs *[]*fundRequest) (cid.Cid, error) { + // Create a request and add it to the request queue + req := newFundRequest(ctx, wallet, amt) + + a.lk.Lock() + *reqs = append(*reqs, req) + a.lk.Unlock() + + // Process the queue + go a.process() + + // Wait for the results + select { + case <-ctx.Done(): + return cid.Undef, ctx.Err() + case r := <-req.Result: + return r.msgCid, r.err + } +} + +// Used by the tests +func (a *fundedAddress) onProcessStart(fn func() bool) { + a.lk.Lock() + defer a.lk.Unlock() + + a.onProcessStartListener = fn +} + +// Process queued requests +func (a *fundedAddress) process() { + a.lk.Lock() + defer a.lk.Unlock() + + // Used by the tests + if a.onProcessStartListener != nil { + done := a.onProcessStartListener() + if !done { + return + } + a.onProcessStartListener = nil + } + + // Check if we're still waiting for the response to a message + if a.state.MsgCid != nil { + return + } + + // Check if there's anything to do + haveReservations := len(a.reservations) > 0 || len(a.releases) > 0 + haveWithdrawals := len(a.withdrawals) > 0 + if !haveReservations && !haveWithdrawals { + return + } + + // Process reservations / releases + if haveReservations { + res, err := a.processReservations(a.reservations, a.releases) + if err == nil { + a.applyStateChange(res.msgCid, res.amtReserved) + } + a.reservations = filterOutProcessedReqs(a.reservations) + a.releases = filterOutProcessedReqs(a.releases) + } + + // If there was no message sent on chain by adding reservations, and all + // reservations have completed processing, process withdrawals + if haveWithdrawals && a.state.MsgCid == nil && len(a.reservations) == 0 { + withdrawalCid, err := a.processWithdrawals(a.withdrawals) + if err == nil && withdrawalCid != cid.Undef { + a.applyStateChange(&withdrawalCid, types.EmptyInt) + } + a.withdrawals = filterOutProcessedReqs(a.withdrawals) + } + + // If a message was sent on-chain + if a.state.MsgCid != nil { + // Start waiting for results of message (async) + a.startWaitForResults(*a.state.MsgCid) + } + + // Process any remaining queued requests + go a.process() +} + +// Filter out completed requests +func filterOutProcessedReqs(reqs []*fundRequest) []*fundRequest { + filtered := make([]*fundRequest, 0, len(reqs)) + for _, req := range reqs { + if !req.Completed() { + filtered = append(filtered, req) + } + } + return filtered +} + +// Apply the results of processing queues and save to the datastore +func (a *fundedAddress) applyStateChange(msgCid *cid.Cid, amtReserved abi.TokenAmount) { + a.state.MsgCid = msgCid + if !amtReserved.Nil() { + a.state.AmtReserved = amtReserved + } + a.saveState() +} + +// Clear the pending message cid so that a new message can be sent +func (a *fundedAddress) clearWaitState() { + a.state.MsgCid = nil + a.saveState() +} + +// Save state to datastore +func (a *fundedAddress) saveState() { + // Not much we can do if saving to the datastore fails, just log + err := a.str.save(a.state) + if err != nil { + log.Errorf("saving state to store for addr %s: %w", a.state.Addr, err) + } +} + +// The result of processing the reservation / release queues +type processResult struct { + // Requests that completed without adding funds + covered []*fundRequest + // Requests that added funds + added []*fundRequest + + // The new reserved amount + amtReserved abi.TokenAmount + // The message cid, if a message was submitted on-chain + msgCid *cid.Cid +} + +// process reservations and releases, and return the resulting changes to state +func (a *fundedAddress) processReservations(reservations []*fundRequest, releases []*fundRequest) (pr *processResult, prerr error) { + // When the function returns + defer func() { + // If there's an error, mark all requests as errored + if prerr != nil { + for _, req := range append(reservations, releases...) { + req.Complete(cid.Undef, prerr) + } + return + } + + // Complete all release requests + for _, req := range releases { + req.Complete(cid.Undef, nil) + } + + // Complete all requests that were covered by released amounts + for _, req := range pr.covered { + req.Complete(cid.Undef, nil) + } + + // If a message was sent + if pr.msgCid != nil { + // Complete all add funds requests + for _, req := range pr.added { + req.Complete(*pr.msgCid, nil) + } + } + }() + + // Split reservations into those that are covered by released amounts, + // and those to add to the reserved amount. + // Note that we process requests from the same wallet in batches. So some + // requests may not be included in covered if they don't match the first + // covered request's wallet. These will be processed on a subsequent + // invocation of processReservations. + toCancel, toAdd, reservedDelta := splitReservations(reservations, releases) + + // Apply the reserved delta to the reserved amount + reserved := types.BigAdd(a.state.AmtReserved, reservedDelta) + if reserved.LessThan(abi.NewTokenAmount(0)) { + reserved = abi.NewTokenAmount(0) + } + res := &processResult{ + amtReserved: reserved, + covered: toCancel, + } + + // Work out the amount to add to the balance + amtToAdd := abi.NewTokenAmount(0) + if len(toAdd) > 0 && reserved.GreaterThan(abi.NewTokenAmount(0)) { + // Get available funds for address + avail, err := a.env.AvailableFunds(a.ctx, a.state.Addr) + if err != nil { + return res, err + } + + // amount to add = new reserved amount - available + amtToAdd = types.BigSub(reserved, avail) + a.debugf("reserved %d - avail %d = to add %d", reserved, avail, amtToAdd) + } + + // If there's nothing to add to the balance, bail out + if amtToAdd.LessThanEqual(abi.NewTokenAmount(0)) { + res.covered = append(res.covered, toAdd...) + return res, nil + } + + // Add funds to address + a.debugf("add funds %d", amtToAdd) + addFundsCid, err := a.env.AddFunds(a.ctx, toAdd[0].Wallet, a.state.Addr, amtToAdd) + if err != nil { + return res, err + } + + // Mark reservation requests as complete + res.added = toAdd + + // Save the message CID to state + res.msgCid = &addFundsCid + return res, nil +} + +// Split reservations into those that are under the total release amount +// (covered) and those that exceed it (to add). +// Note that we process requests from the same wallet in batches. So some +// requests may not be included in covered if they don't match the first +// covered request's wallet. +func splitReservations(reservations []*fundRequest, releases []*fundRequest) ([]*fundRequest, []*fundRequest, abi.TokenAmount) { + toCancel := make([]*fundRequest, 0, len(reservations)) + toAdd := make([]*fundRequest, 0, len(reservations)) + toAddAmt := abi.NewTokenAmount(0) + + // Sum release amounts + releaseAmt := abi.NewTokenAmount(0) + for _, req := range releases { + releaseAmt = types.BigAdd(releaseAmt, req.Amount()) + } + + // We only want to combine requests that come from the same wallet + batchWallet := address.Undef + for _, req := range reservations { + amt := req.Amount() + + // If the amount to add to the reserve is cancelled out by a release + if amt.LessThanEqual(releaseAmt) { + // Cancel the request and update the release total + releaseAmt = types.BigSub(releaseAmt, amt) + toCancel = append(toCancel, req) + continue + } + + // The amount to add is greater that the release total so we want + // to send an add funds request + + // The first time the wallet will be undefined + if batchWallet == address.Undef { + batchWallet = req.Wallet + } + // If this request's wallet is the same as the batch wallet, + // the requests will be combined + if batchWallet == req.Wallet { + delta := types.BigSub(amt, releaseAmt) + toAddAmt = types.BigAdd(toAddAmt, delta) + releaseAmt = abi.NewTokenAmount(0) + toAdd = append(toAdd, req) + } + } + + // The change in the reserved amount is "amount to add" - "amount to release" + reservedDelta := types.BigSub(toAddAmt, releaseAmt) + + return toCancel, toAdd, reservedDelta +} + +// process withdrawal queue +func (a *fundedAddress) processWithdrawals(withdrawals []*fundRequest) (msgCid cid.Cid, prerr error) { + // If there's an error, mark all withdrawal requests as errored + defer func() { + if prerr != nil { + for _, req := range withdrawals { + req.Complete(cid.Undef, prerr) + } + } + }() + + // Get the net available balance + avail, err := a.env.AvailableFunds(a.ctx, a.state.Addr) + if err != nil { + return cid.Undef, err + } + + netAvail := types.BigSub(avail, a.state.AmtReserved) + + // Fit as many withdrawals as possible into the available balance, and fail + // the rest + withdrawalAmt := abi.NewTokenAmount(0) + allowedAmt := abi.NewTokenAmount(0) + allowed := make([]*fundRequest, 0, len(withdrawals)) + var batchWallet address.Address + for _, req := range withdrawals { + amt := req.Amount() + if amt.IsZero() { + // If the context for the request was cancelled, bail out + req.Complete(cid.Undef, err) + continue + } + + // If the amount would exceed the available amount, complete the + // request with an error + newWithdrawalAmt := types.BigAdd(withdrawalAmt, amt) + if newWithdrawalAmt.GreaterThan(netAvail) { + err := xerrors.Errorf("insufficient funds for withdrawal of %d", amt) + a.debugf("%s", err) + req.Complete(cid.Undef, err) + continue + } + + // If this is the first allowed withdrawal request in this batch, save + // its wallet address + if batchWallet == address.Undef { + batchWallet = req.Wallet + } + // If the request wallet doesn't match the batch wallet, bail out + // (the withdrawal will be processed after the current batch has + // completed) + if req.Wallet != batchWallet { + continue + } + + // Include this withdrawal request in the batch + withdrawalAmt = newWithdrawalAmt + a.debugf("withdraw %d", amt) + allowed = append(allowed, req) + allowedAmt = types.BigAdd(allowedAmt, amt) + } + + // Check if there is anything to withdraw. + // Note that if the context for a request is cancelled, + // req.Amount() returns zero + if allowedAmt.Equals(abi.NewTokenAmount(0)) { + // Mark allowed requests as complete + for _, req := range allowed { + req.Complete(cid.Undef, nil) + } + return cid.Undef, nil + } + + // Withdraw funds + a.debugf("withdraw funds %d", allowedAmt) + withdrawFundsCid, err := a.env.WithdrawFunds(a.ctx, allowed[0].Wallet, a.state.Addr, allowedAmt) + if err != nil { + return cid.Undef, err + } + + // Mark allowed requests as complete + for _, req := range allowed { + req.Complete(withdrawFundsCid, nil) + } + + // Save the message CID to state + return withdrawFundsCid, nil +} + +// asynchonously wait for results of message +func (a *fundedAddress) startWaitForResults(msgCid cid.Cid) { + go func() { + err := a.env.WaitMsg(a.ctx, msgCid) + if err != nil { + // We don't really care about the results here, we're just waiting + // so as to only process one on-chain message at a time + log.Errorf("waiting for results of message %s for addr %s: %w", msgCid, a.state.Addr, err) + } + + a.lk.Lock() + a.debugf("complete wait") + a.clearWaitState() + a.lk.Unlock() + + a.process() + }() +} + +func (a *fundedAddress) debugf(args ...interface{}) { + fmtStr := args[0].(string) + args = args[1:] + log.Debugf(a.state.Addr.String()+": "+fmtStr, args...) +} + +// The result of a fund request +type reqResult struct { + msgCid cid.Cid + err error +} + +// A request to change funds +type fundRequest struct { + ctx context.Context + amt abi.TokenAmount + completed chan struct{} + Wallet address.Address + Result chan reqResult +} + +func newFundRequest(ctx context.Context, wallet address.Address, amt abi.TokenAmount) *fundRequest { + return &fundRequest{ + ctx: ctx, + amt: amt, + Wallet: wallet, + Result: make(chan reqResult), + completed: make(chan struct{}), + } +} + +// Amount returns zero if the context has expired +func (frp *fundRequest) Amount() abi.TokenAmount { + if frp.ctx.Err() != nil { + return abi.NewTokenAmount(0) + } + return frp.amt +} + +// Complete is called with the message CID when the funds request has been +// started or with the error if there was an error +func (frp *fundRequest) Complete(msgCid cid.Cid, err error) { + select { + case <-frp.completed: + case <-frp.ctx.Done(): + case frp.Result <- reqResult{msgCid: msgCid, err: err}: + } + close(frp.completed) +} + +// Completed indicates if Complete has already been called +func (frp *fundRequest) Completed() bool { + select { + case <-frp.completed: + return true + default: + return false + } +} + +// fundManagerEnvironment simplifies some API calls +type fundManagerEnvironment struct { + api fundManagerAPI +} + +func (env *fundManagerEnvironment) AvailableFunds(ctx context.Context, addr address.Address) (abi.TokenAmount, error) { + bal, err := env.api.StateMarketBalance(ctx, addr, types.EmptyTSK) + if err != nil { + return abi.NewTokenAmount(0), err + } + + return types.BigSub(bal.Escrow, bal.Locked), nil +} + +func (env *fundManagerEnvironment) AddFunds( + ctx context.Context, + wallet address.Address, + addr address.Address, + amt abi.TokenAmount, +) (cid.Cid, error) { + params, err := actors.SerializeParams(&addr) + if err != nil { + return cid.Undef, err + } + + smsg, aerr := env.api.MpoolPushMessage(ctx, &types.Message{ + To: market.Address, + From: wallet, + Value: amt, + Method: market.Methods.AddBalance, + Params: params, + }, nil) + + if aerr != nil { + return cid.Undef, aerr + } + + return smsg.Cid(), nil +} + +func (env *fundManagerEnvironment) WithdrawFunds( + ctx context.Context, + wallet address.Address, + addr address.Address, + amt abi.TokenAmount, +) (cid.Cid, error) { + params, err := actors.SerializeParams(&market.WithdrawBalanceParams{ + ProviderOrClientAddress: addr, + Amount: amt, + }) + if err != nil { + return cid.Undef, xerrors.Errorf("serializing params: %w", err) + } + + smsg, aerr := env.api.MpoolPushMessage(ctx, &types.Message{ + To: market.Address, + From: wallet, + Value: types.NewInt(0), + Method: market.Methods.WithdrawBalance, + Params: params, + }, nil) + + if aerr != nil { + return cid.Undef, aerr + } + + return smsg.Cid(), nil +} + +func (env *fundManagerEnvironment) WaitMsg(ctx context.Context, c cid.Cid) error { + _, err := env.api.StateWaitMsg(ctx, c, build.MessageConfidence) + return err +} diff --git a/chain/market/fundmanager_test.go b/chain/market/fundmanager_test.go new file mode 100644 index 000000000..ac6b2a405 --- /dev/null +++ b/chain/market/fundmanager_test.go @@ -0,0 +1,820 @@ +package market + +import ( + "bytes" + "context" + "sync" + "testing" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/wallet" + tutils "github.com/filecoin-project/specs-actors/v2/support/testing" + "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + ds_sync "github.com/ipfs/go-datastore/sync" + "github.com/stretchr/testify/require" +) + +// TestFundManagerBasic verifies that the basic fund manager operations work +func TestFundManagerBasic(t *testing.T) { + s := setup(t) + defer s.fm.Stop() + + // Reserve 10 + // balance: 0 -> 10 + // reserved: 0 -> 10 + amt := abi.NewTokenAmount(10) + sentinel, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + + msg := s.mockApi.getSentMessage(sentinel) + checkAddMessageFields(t, msg, s.walletAddr, s.acctAddr, amt) + + s.mockApi.completeMsg(sentinel) + + // Reserve 7 + // balance: 10 -> 17 + // reserved: 10 -> 17 + amt = abi.NewTokenAmount(7) + sentinel, err = s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + + msg = s.mockApi.getSentMessage(sentinel) + checkAddMessageFields(t, msg, s.walletAddr, s.acctAddr, amt) + + s.mockApi.completeMsg(sentinel) + + // Release 5 + // balance: 17 + // reserved: 17 -> 12 + amt = abi.NewTokenAmount(5) + err = s.fm.Release(s.acctAddr, amt) + require.NoError(t, err) + + // Withdraw 2 + // balance: 17 -> 15 + // reserved: 12 + amt = abi.NewTokenAmount(2) + sentinel, err = s.fm.Withdraw(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + + msg = s.mockApi.getSentMessage(sentinel) + checkWithdrawMessageFields(t, msg, s.walletAddr, s.acctAddr, amt) + + s.mockApi.completeMsg(sentinel) + + // Reserve 3 + // balance: 15 + // reserved: 12 -> 15 + // Note: reserved (15) is <= balance (15) so should not send on-chain + // message + msgCount := s.mockApi.messageCount() + amt = abi.NewTokenAmount(3) + sentinel, err = s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + require.Equal(t, msgCount, s.mockApi.messageCount()) + require.Equal(t, sentinel, cid.Undef) + + // Reserve 1 + // balance: 15 -> 16 + // reserved: 15 -> 16 + // Note: reserved (16) is above balance (15) so *should* send on-chain + // message to top up balance + amt = abi.NewTokenAmount(1) + topUp := abi.NewTokenAmount(1) + sentinel, err = s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + + s.mockApi.completeMsg(sentinel) + msg = s.mockApi.getSentMessage(sentinel) + checkAddMessageFields(t, msg, s.walletAddr, s.acctAddr, topUp) + + // Withdraw 1 + // balance: 16 + // reserved: 16 + // Note: Expect failure because there is no available balance to withdraw: + // balance - reserved = 16 - 16 = 0 + amt = abi.NewTokenAmount(1) + sentinel, err = s.fm.Withdraw(s.ctx, s.walletAddr, s.acctAddr, amt) + require.Error(t, err) +} + +// TestFundManagerParallel verifies that operations can be run in parallel +func TestFundManagerParallel(t *testing.T) { + s := setup(t) + defer s.fm.Stop() + + // Reserve 10 + amt := abi.NewTokenAmount(10) + sentinelReserve10, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + + // Wait until all the subsequent requests are queued up + queueReady := make(chan struct{}) + fa := s.fm.getFundedAddress(s.acctAddr) + fa.onProcessStart(func() bool { + if len(fa.withdrawals) == 1 && len(fa.reservations) == 2 && len(fa.releases) == 1 { + close(queueReady) + return true + } + return false + }) + + // Withdraw 5 (should not run until after reserves / releases) + withdrawReady := make(chan error) + go func() { + amt = abi.NewTokenAmount(5) + _, err := s.fm.Withdraw(s.ctx, s.walletAddr, s.acctAddr, amt) + withdrawReady <- err + }() + + reserveSentinels := make(chan cid.Cid) + + // Reserve 3 + go func() { + amt := abi.NewTokenAmount(3) + sentinelReserve3, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + reserveSentinels <- sentinelReserve3 + }() + + // Reserve 5 + go func() { + amt := abi.NewTokenAmount(5) + sentinelReserve5, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + reserveSentinels <- sentinelReserve5 + }() + + // Release 2 + go func() { + amt := abi.NewTokenAmount(2) + err = s.fm.Release(s.acctAddr, amt) + require.NoError(t, err) + }() + + // Everything is queued up + <-queueReady + + // Complete the "Reserve 10" message + s.mockApi.completeMsg(sentinelReserve10) + msg := s.mockApi.getSentMessage(sentinelReserve10) + checkAddMessageFields(t, msg, s.walletAddr, s.acctAddr, abi.NewTokenAmount(10)) + + // The other requests should now be combined and be submitted on-chain as + // a single message + rs1 := <-reserveSentinels + rs2 := <-reserveSentinels + require.Equal(t, rs1, rs2) + + // Withdraw should not have been called yet, because reserve / release + // requests run first + select { + case <-withdrawReady: + require.Fail(t, "Withdraw should run after reserve / release") + default: + } + + // Complete the message + s.mockApi.completeMsg(rs1) + msg = s.mockApi.getSentMessage(rs1) + + // "Reserve 3" +3 + // "Reserve 5" +5 + // "Release 2" -2 + // Result: 6 + checkAddMessageFields(t, msg, s.walletAddr, s.acctAddr, abi.NewTokenAmount(6)) + + // Expect withdraw to fail because not enough available funds + err = <-withdrawReady + require.Error(t, err) +} + +// TestFundManagerReserveByWallet verifies that reserve requests are grouped by wallet +func TestFundManagerReserveByWallet(t *testing.T) { + s := setup(t) + defer s.fm.Stop() + + walletAddrA, err := s.wllt.WalletNew(context.Background(), types.KTSecp256k1) + require.NoError(t, err) + walletAddrB, err := s.wllt.WalletNew(context.Background(), types.KTSecp256k1) + require.NoError(t, err) + + // Wait until all the reservation requests are queued up + walletAQueuedUp := make(chan struct{}) + queueReady := make(chan struct{}) + fa := s.fm.getFundedAddress(s.acctAddr) + fa.onProcessStart(func() bool { + if len(fa.reservations) == 1 { + close(walletAQueuedUp) + } + if len(fa.reservations) == 3 { + close(queueReady) + return true + } + return false + }) + + type reserveResult struct { + ws cid.Cid + err error + } + results := make(chan *reserveResult) + + amtA1 := abi.NewTokenAmount(1) + go func() { + // Wallet A: Reserve 1 + sentinelA1, err := s.fm.Reserve(s.ctx, walletAddrA, s.acctAddr, amtA1) + results <- &reserveResult{ + ws: sentinelA1, + err: err, + } + }() + + amtB1 := abi.NewTokenAmount(2) + amtB2 := abi.NewTokenAmount(3) + go func() { + // Wait for reservation for wallet A to be queued up + <-walletAQueuedUp + + // Wallet B: Reserve 2 + go func() { + sentinelB1, err := s.fm.Reserve(s.ctx, walletAddrB, s.acctAddr, amtB1) + results <- &reserveResult{ + ws: sentinelB1, + err: err, + } + }() + + // Wallet B: Reserve 3 + sentinelB2, err := s.fm.Reserve(s.ctx, walletAddrB, s.acctAddr, amtB2) + results <- &reserveResult{ + ws: sentinelB2, + err: err, + } + }() + + // All reservation requests are queued up + <-queueReady + + resA := <-results + sentinelA1 := resA.ws + + // Should send to wallet A + msg := s.mockApi.getSentMessage(sentinelA1) + checkAddMessageFields(t, msg, walletAddrA, s.acctAddr, amtA1) + + // Complete wallet A message + s.mockApi.completeMsg(sentinelA1) + + resB1 := <-results + resB2 := <-results + require.NoError(t, resB1.err) + require.NoError(t, resB2.err) + sentinelB1 := resB1.ws + sentinelB2 := resB2.ws + + // Should send different message to wallet B + require.NotEqual(t, sentinelA1, sentinelB1) + // Should be single message combining amount 1 and 2 + require.Equal(t, sentinelB1, sentinelB2) + msg = s.mockApi.getSentMessage(sentinelB1) + checkAddMessageFields(t, msg, walletAddrB, s.acctAddr, types.BigAdd(amtB1, amtB2)) +} + +// TestFundManagerWithdrawal verifies that as many withdraw operations as +// possible are processed +func TestFundManagerWithdrawalLimit(t *testing.T) { + s := setup(t) + defer s.fm.Stop() + + // Reserve 10 + amt := abi.NewTokenAmount(10) + sentinelReserve10, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + + // Complete the "Reserve 10" message + s.mockApi.completeMsg(sentinelReserve10) + + // Release 10 + err = s.fm.Release(s.acctAddr, amt) + require.NoError(t, err) + + // Queue up withdraw requests + queueReady := make(chan struct{}) + fa := s.fm.getFundedAddress(s.acctAddr) + withdrawalReqTotal := 3 + withdrawalReqEnqueued := 0 + withdrawalReqQueue := make(chan func(), withdrawalReqTotal) + fa.onProcessStart(func() bool { + // If a new withdrawal request was enqueued + if len(fa.withdrawals) > withdrawalReqEnqueued { + withdrawalReqEnqueued++ + + // Pop the next request and run it + select { + case fn := <-withdrawalReqQueue: + go fn() + default: + } + } + // Once all the requests have arrived, we're ready to process the queue + if withdrawalReqEnqueued == withdrawalReqTotal { + close(queueReady) + return true + } + return false + }) + + type withdrawResult struct { + reqIndex int + ws cid.Cid + err error + } + withdrawRes := make(chan *withdrawResult) + + // Queue up three "Withdraw 5" requests + enqueuedCount := 0 + for i := 0; i < withdrawalReqTotal; i++ { + withdrawalReqQueue <- func() { + idx := enqueuedCount + enqueuedCount++ + + amt := abi.NewTokenAmount(5) + ws, err := s.fm.Withdraw(s.ctx, s.walletAddr, s.acctAddr, amt) + withdrawRes <- &withdrawResult{reqIndex: idx, ws: ws, err: err} + } + } + // Start the first request + fn := <-withdrawalReqQueue + go fn() + + // All withdrawal requests are queued up and ready to be processed + <-queueReady + + // Organize results in request order + results := make([]*withdrawResult, withdrawalReqTotal) + for i := 0; i < 3; i++ { + res := <-withdrawRes + results[res.reqIndex] = res + } + + // Available 10 + // Withdraw 5 + // Expect Success + require.NoError(t, results[0].err) + // Available 5 + // Withdraw 5 + // Expect Success + require.NoError(t, results[1].err) + // Available 0 + // Withdraw 5 + // Expect FAIL + require.Error(t, results[2].err) + + // Expect withdrawal requests that fit under reserved amount to be combined + // into a single message on-chain + require.Equal(t, results[0].ws, results[1].ws) +} + +// TestFundManagerWithdrawByWallet verifies that withdraw requests are grouped by wallet +func TestFundManagerWithdrawByWallet(t *testing.T) { + s := setup(t) + defer s.fm.Stop() + + walletAddrA, err := s.wllt.WalletNew(context.Background(), types.KTSecp256k1) + require.NoError(t, err) + walletAddrB, err := s.wllt.WalletNew(context.Background(), types.KTSecp256k1) + require.NoError(t, err) + + // Reserve 10 + reserveAmt := abi.NewTokenAmount(10) + sentinelReserve, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, reserveAmt) + require.NoError(t, err) + s.mockApi.completeMsg(sentinelReserve) + + time.Sleep(10 * time.Millisecond) + + // Release 10 + err = s.fm.Release(s.acctAddr, reserveAmt) + require.NoError(t, err) + + type withdrawResult struct { + ws cid.Cid + err error + } + results := make(chan *withdrawResult) + + // Wait until withdrawals are queued up + walletAQueuedUp := make(chan struct{}) + queueReady := make(chan struct{}) + withdrawalCount := 0 + fa := s.fm.getFundedAddress(s.acctAddr) + fa.onProcessStart(func() bool { + if len(fa.withdrawals) == withdrawalCount { + return false + } + withdrawalCount = len(fa.withdrawals) + + if withdrawalCount == 1 { + close(walletAQueuedUp) + } else if withdrawalCount == 3 { + close(queueReady) + return true + } + return false + }) + + amtA1 := abi.NewTokenAmount(1) + go func() { + // Wallet A: Withdraw 1 + sentinelA1, err := s.fm.Withdraw(s.ctx, walletAddrA, s.acctAddr, amtA1) + results <- &withdrawResult{ + ws: sentinelA1, + err: err, + } + }() + + amtB1 := abi.NewTokenAmount(2) + amtB2 := abi.NewTokenAmount(3) + go func() { + // Wait until withdraw for wallet A is queued up + <-walletAQueuedUp + + // Wallet B: Withdraw 2 + go func() { + sentinelB1, err := s.fm.Withdraw(s.ctx, walletAddrB, s.acctAddr, amtB1) + results <- &withdrawResult{ + ws: sentinelB1, + err: err, + } + }() + + // Wallet B: Withdraw 3 + sentinelB2, err := s.fm.Withdraw(s.ctx, walletAddrB, s.acctAddr, amtB2) + results <- &withdrawResult{ + ws: sentinelB2, + err: err, + } + }() + + // Withdrawals are queued up + <-queueReady + + // Should withdraw from wallet A first + resA1 := <-results + sentinelA1 := resA1.ws + msg := s.mockApi.getSentMessage(sentinelA1) + checkWithdrawMessageFields(t, msg, walletAddrA, s.acctAddr, amtA1) + + // Complete wallet A message + s.mockApi.completeMsg(sentinelA1) + + resB1 := <-results + resB2 := <-results + require.NoError(t, resB1.err) + require.NoError(t, resB2.err) + sentinelB1 := resB1.ws + sentinelB2 := resB2.ws + + // Should send different message for wallet B from wallet A + require.NotEqual(t, sentinelA1, sentinelB1) + // Should be single message combining amount 1 and 2 + require.Equal(t, sentinelB1, sentinelB2) + msg = s.mockApi.getSentMessage(sentinelB1) + checkWithdrawMessageFields(t, msg, walletAddrB, s.acctAddr, types.BigAdd(amtB1, amtB2)) +} + +// TestFundManagerRestart verifies that waiting for incomplete requests resumes +// on restart +func TestFundManagerRestart(t *testing.T) { + s := setup(t) + defer s.fm.Stop() + + acctAddr2 := tutils.NewActorAddr(t, "addr2") + + // Address 1: Reserve 10 + amt := abi.NewTokenAmount(10) + sentinelAddr1, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + + msg := s.mockApi.getSentMessage(sentinelAddr1) + checkAddMessageFields(t, msg, s.walletAddr, s.acctAddr, amt) + + // Address 2: Reserve 7 + amt2 := abi.NewTokenAmount(7) + sentinelAddr2Res7, err := s.fm.Reserve(s.ctx, s.walletAddr, acctAddr2, amt2) + require.NoError(t, err) + + msg2 := s.mockApi.getSentMessage(sentinelAddr2Res7) + checkAddMessageFields(t, msg2, s.walletAddr, acctAddr2, amt2) + + // Complete "Address 1: Reserve 10" + s.mockApi.completeMsg(sentinelAddr1) + + // Give the completed state a moment to be stored before restart + time.Sleep(time.Millisecond * 10) + + // Restart + mockApiAfter := s.mockApi + fmAfter := newFundManager(mockApiAfter, s.ds) + err = fmAfter.Start() + require.NoError(t, err) + + amt3 := abi.NewTokenAmount(9) + reserveSentinel := make(chan cid.Cid) + go func() { + // Address 2: Reserve 9 + sentinel3, err := fmAfter.Reserve(s.ctx, s.walletAddr, acctAddr2, amt3) + require.NoError(t, err) + reserveSentinel <- sentinel3 + }() + + // Expect no message to be sent, because still waiting for previous + // message "Address 2: Reserve 7" to complete on-chain + select { + case <-reserveSentinel: + require.Fail(t, "Expected no message to be sent") + case <-time.After(10 * time.Millisecond): + } + + // Complete "Address 2: Reserve 7" + mockApiAfter.completeMsg(sentinelAddr2Res7) + + // Expect waiting message to now be sent + sentinel3 := <-reserveSentinel + msg3 := mockApiAfter.getSentMessage(sentinel3) + checkAddMessageFields(t, msg3, s.walletAddr, acctAddr2, amt3) +} + +// TestFundManagerReleaseAfterPublish verifies that release is successful in +// the following scenario: +// 1. Deal A adds 5 to addr1: reserved 0 -> 5 available 0 -> 5 +// 2. Deal B adds 7 to addr1: reserved 5 -> 12 available 5 -> 12 +// 3. Deal B completes, reducing addr1 by 7: reserved 12 available 12 -> 5 +// 4. Deal A releases 5 from addr1: reserved 12 -> 7 available 5 +func TestFundManagerReleaseAfterPublish(t *testing.T) { + s := setup(t) + defer s.fm.Stop() + + // Deal A: Reserve 5 + // balance: 0 -> 5 + // reserved: 0 -> 5 + amt := abi.NewTokenAmount(5) + sentinel, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + s.mockApi.completeMsg(sentinel) + + // Deal B: Reserve 7 + // balance: 5 -> 12 + // reserved: 5 -> 12 + amt = abi.NewTokenAmount(7) + sentinel, err = s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + s.mockApi.completeMsg(sentinel) + + // Deal B: Publish (removes Deal B amount from balance) + // balance: 12 -> 5 + // reserved: 12 + amt = abi.NewTokenAmount(7) + s.mockApi.publish(s.acctAddr, amt) + + // Deal A: Release 5 + // balance: 5 + // reserved: 12 -> 7 + amt = abi.NewTokenAmount(5) + err = s.fm.Release(s.acctAddr, amt) + require.NoError(t, err) + + // Deal B: Release 7 + // balance: 5 + // reserved: 12 -> 7 + amt = abi.NewTokenAmount(5) + err = s.fm.Release(s.acctAddr, amt) + require.NoError(t, err) +} + +type scaffold struct { + ctx context.Context + ds *ds_sync.MutexDatastore + wllt *wallet.LocalWallet + walletAddr address.Address + acctAddr address.Address + mockApi *mockFundManagerAPI + fm *FundManager +} + +func setup(t *testing.T) *scaffold { + ctx := context.Background() + + wllt, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + walletAddr, err := wllt.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + + acctAddr := tutils.NewActorAddr(t, "addr") + + mockApi := newMockFundManagerAPI(walletAddr) + dstore := ds_sync.MutexWrap(ds.NewMapDatastore()) + fm := newFundManager(mockApi, dstore) + return &scaffold{ + ctx: ctx, + ds: dstore, + wllt: wllt, + walletAddr: walletAddr, + acctAddr: acctAddr, + mockApi: mockApi, + fm: fm, + } +} + +func checkAddMessageFields(t *testing.T, msg *types.Message, from address.Address, to address.Address, amt abi.TokenAmount) { + require.Equal(t, from, msg.From) + require.Equal(t, market.Address, msg.To) + require.Equal(t, amt, msg.Value) + + var paramsTo address.Address + err := paramsTo.UnmarshalCBOR(bytes.NewReader(msg.Params)) + require.NoError(t, err) + require.Equal(t, to, paramsTo) +} + +func checkWithdrawMessageFields(t *testing.T, msg *types.Message, from address.Address, addr address.Address, amt abi.TokenAmount) { + require.Equal(t, from, msg.From) + require.Equal(t, market.Address, msg.To) + require.Equal(t, abi.NewTokenAmount(0), msg.Value) + + var params market.WithdrawBalanceParams + err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)) + require.NoError(t, err) + require.Equal(t, addr, params.ProviderOrClientAddress) + require.Equal(t, amt, params.Amount) +} + +type sentMsg struct { + msg *types.SignedMessage + ready chan struct{} +} + +type mockFundManagerAPI struct { + wallet address.Address + + lk sync.Mutex + escrow map[address.Address]abi.TokenAmount + sentMsgs map[cid.Cid]*sentMsg + completedMsgs map[cid.Cid]struct{} + waitingFor map[cid.Cid]chan struct{} +} + +func newMockFundManagerAPI(wallet address.Address) *mockFundManagerAPI { + return &mockFundManagerAPI{ + wallet: wallet, + escrow: make(map[address.Address]abi.TokenAmount), + sentMsgs: make(map[cid.Cid]*sentMsg), + completedMsgs: make(map[cid.Cid]struct{}), + waitingFor: make(map[cid.Cid]chan struct{}), + } +} + +func (mapi *mockFundManagerAPI) MpoolPushMessage(ctx context.Context, message *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) { + mapi.lk.Lock() + defer mapi.lk.Unlock() + + smsg := &types.SignedMessage{Message: *message} + mapi.sentMsgs[smsg.Cid()] = &sentMsg{msg: smsg, ready: make(chan struct{})} + + return smsg, nil +} + +func (mapi *mockFundManagerAPI) getSentMessage(c cid.Cid) *types.Message { + mapi.lk.Lock() + defer mapi.lk.Unlock() + + for i := 0; i < 1000; i++ { + if pending, ok := mapi.sentMsgs[c]; ok { + return &pending.msg.Message + } + time.Sleep(time.Millisecond) + } + panic("expected message to be sent") +} + +func (mapi *mockFundManagerAPI) messageCount() int { + mapi.lk.Lock() + defer mapi.lk.Unlock() + + return len(mapi.sentMsgs) +} + +func (mapi *mockFundManagerAPI) completeMsg(msgCid cid.Cid) { + mapi.lk.Lock() + + pmsg, ok := mapi.sentMsgs[msgCid] + if ok { + if pmsg.msg.Message.Method == market.Methods.AddBalance { + var escrowAcct address.Address + err := escrowAcct.UnmarshalCBOR(bytes.NewReader(pmsg.msg.Message.Params)) + if err != nil { + panic(err) + } + + escrow := mapi.getEscrow(escrowAcct) + before := escrow + escrow = types.BigAdd(escrow, pmsg.msg.Message.Value) + mapi.escrow[escrowAcct] = escrow + log.Debugf("%s: escrow %d -> %d", escrowAcct, before, escrow) + } else { + var params market.WithdrawBalanceParams + err := params.UnmarshalCBOR(bytes.NewReader(pmsg.msg.Message.Params)) + if err != nil { + panic(err) + } + escrowAcct := params.ProviderOrClientAddress + + escrow := mapi.getEscrow(escrowAcct) + before := escrow + escrow = types.BigSub(escrow, params.Amount) + mapi.escrow[escrowAcct] = escrow + log.Debugf("%s: escrow %d -> %d", escrowAcct, before, escrow) + } + } + + mapi.completedMsgs[msgCid] = struct{}{} + + ready, ok := mapi.waitingFor[msgCid] + + mapi.lk.Unlock() + + if ok { + close(ready) + } +} + +func (mapi *mockFundManagerAPI) StateMarketBalance(ctx context.Context, a address.Address, key types.TipSetKey) (api.MarketBalance, error) { + mapi.lk.Lock() + defer mapi.lk.Unlock() + + return api.MarketBalance{ + Locked: abi.NewTokenAmount(0), + Escrow: mapi.getEscrow(a), + }, nil +} + +func (mapi *mockFundManagerAPI) getEscrow(a address.Address) abi.TokenAmount { + escrow := mapi.escrow[a] + if escrow.Nil() { + return abi.NewTokenAmount(0) + } + return escrow +} + +func (mapi *mockFundManagerAPI) publish(addr address.Address, amt abi.TokenAmount) { + mapi.lk.Lock() + defer mapi.lk.Unlock() + + escrow := mapi.escrow[addr] + if escrow.Nil() { + return + } + escrow = types.BigSub(escrow, amt) + if escrow.LessThan(abi.NewTokenAmount(0)) { + escrow = abi.NewTokenAmount(0) + } + mapi.escrow[addr] = escrow +} + +func (mapi *mockFundManagerAPI) StateWaitMsg(ctx context.Context, c cid.Cid, confidence uint64) (*api.MsgLookup, error) { + res := &api.MsgLookup{ + Message: c, + Receipt: types.MessageReceipt{ + ExitCode: 0, + Return: nil, + }, + } + ready := make(chan struct{}) + + mapi.lk.Lock() + _, ok := mapi.completedMsgs[c] + if !ok { + mapi.waitingFor[c] = ready + } + mapi.lk.Unlock() + + if !ok { + select { + case <-ctx.Done(): + case <-ready: + } + } + return res, nil +} diff --git a/chain/market/fundmgr.go b/chain/market/fundmgr.go deleted file mode 100644 index 50467a6e1..000000000 --- a/chain/market/fundmgr.go +++ /dev/null @@ -1,166 +0,0 @@ -package market - -import ( - "context" - "sync" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log" - "go.uber.org/fx" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/actors/builtin/market" - "github.com/filecoin-project/lotus/chain/events" - "github.com/filecoin-project/lotus/chain/events/state" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/node/impl/full" -) - -var log = logging.Logger("market_adapter") - -// API is the dependencies need to run a fund manager -type API struct { - fx.In - - full.ChainAPI - full.StateAPI - full.MpoolAPI -} - -// FundMgr monitors available balances and adds funds when EnsureAvailable is called -type FundMgr struct { - api fundMgrAPI - - lk sync.RWMutex - available map[address.Address]types.BigInt -} - -// StartFundManager creates a new fund manager and sets up event hooks to manage state changes -func StartFundManager(lc fx.Lifecycle, api API) *FundMgr { - fm := newFundMgr(&api) - lc.Append(fx.Hook{ - OnStart: func(ctx context.Context) error { - ev := events.NewEvents(ctx, &api) - preds := state.NewStatePredicates(&api) - dealDiffFn := preds.OnStorageMarketActorChanged(preds.OnBalanceChanged(preds.AvailableBalanceChangedForAddresses(fm.getAddresses))) - match := func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) { - return dealDiffFn(ctx, oldTs.Key(), newTs.Key()) - } - return ev.StateChanged(fm.checkFunc, fm.stateChanged, fm.revert, 0, events.NoTimeout, match) - }, - }) - return fm -} - -type fundMgrAPI interface { - StateMarketBalance(context.Context, address.Address, types.TipSetKey) (api.MarketBalance, error) - MpoolPushMessage(context.Context, *types.Message, *api.MessageSendSpec) (*types.SignedMessage, error) - StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) -} - -func newFundMgr(api fundMgrAPI) *FundMgr { - return &FundMgr{ - api: api, - available: map[address.Address]types.BigInt{}, - } -} - -// checkFunc tells the events api to simply proceed (we always want to watch) -func (fm *FundMgr) checkFunc(ts *types.TipSet) (done bool, more bool, err error) { - return false, true, nil -} - -// revert handles reverts to balances -func (fm *FundMgr) revert(ctx context.Context, ts *types.TipSet) error { - // TODO: Is it ok to just ignore this? - log.Warn("balance change reverted; TODO: actually handle this!") - return nil -} - -// stateChanged handles balance changes monitored on the chain from one tipset to the next -func (fm *FundMgr) stateChanged(ts *types.TipSet, ts2 *types.TipSet, states events.StateChange, h abi.ChainEpoch) (more bool, err error) { - changedBalances, ok := states.(state.ChangedBalances) - if !ok { - panic("Expected state.ChangedBalances") - } - // overwrite our in memory cache with new values from chain (chain is canonical) - fm.lk.Lock() - for addr, balanceChange := range changedBalances { - if fm.available[addr].Int != nil { - log.Infof("State balance change recorded, prev: %s, new: %s", fm.available[addr].String(), balanceChange.To.String()) - } - - fm.available[addr] = balanceChange.To - } - fm.lk.Unlock() - return true, nil -} - -func (fm *FundMgr) getAddresses() []address.Address { - fm.lk.RLock() - defer fm.lk.RUnlock() - addrs := make([]address.Address, 0, len(fm.available)) - for addr := range fm.available { - addrs = append(addrs, addr) - } - return addrs -} - -// EnsureAvailable looks at the available balance in escrow for a given -// address, and if less than the passed in amount, adds the difference -func (fm *FundMgr) EnsureAvailable(ctx context.Context, addr, wallet address.Address, amt types.BigInt) (cid.Cid, error) { - idAddr, err := fm.api.StateLookupID(ctx, addr, types.EmptyTSK) - if err != nil { - return cid.Undef, err - } - fm.lk.Lock() - defer fm.lk.Unlock() - - bal, err := fm.api.StateMarketBalance(ctx, addr, types.EmptyTSK) - if err != nil { - return cid.Undef, err - } - - stateAvail := types.BigSub(bal.Escrow, bal.Locked) - - avail, ok := fm.available[idAddr] - if !ok { - avail = stateAvail - } - - toAdd := types.BigSub(amt, avail) - if toAdd.LessThan(types.NewInt(0)) { - toAdd = types.NewInt(0) - } - fm.available[idAddr] = big.Add(avail, toAdd) - - log.Infof("Funds operation w/ Expected Balance: %s, In State: %s, Requested: %s, Adding: %s", avail.String(), stateAvail.String(), amt.String(), toAdd.String()) - - if toAdd.LessThanEqual(big.Zero()) { - return cid.Undef, nil - } - - params, err := actors.SerializeParams(&addr) - if err != nil { - fm.available[idAddr] = avail - return cid.Undef, err - } - - smsg, err := fm.api.MpoolPushMessage(ctx, &types.Message{ - To: market.Address, - From: wallet, - Value: toAdd, - Method: market.Methods.AddBalance, - Params: params, - }, nil) - if err != nil { - fm.available[idAddr] = avail - return cid.Undef, err - } - - return smsg.Cid(), nil -} diff --git a/chain/market/fundmgr_test.go b/chain/market/fundmgr_test.go deleted file mode 100644 index 88ca2e16f..000000000 --- a/chain/market/fundmgr_test.go +++ /dev/null @@ -1,199 +0,0 @@ -package market - -import ( - "context" - "errors" - "math/rand" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/crypto" - - tutils "github.com/filecoin-project/specs-actors/v2/support/testing" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/actors/builtin/market" - "github.com/filecoin-project/lotus/chain/types" -) - -type fakeAPI struct { - returnedBalance api.MarketBalance - returnedBalanceErr error - signature crypto.Signature - receivedMessage *types.Message - pushMessageErr error - lookupIDErr error -} - -func (fapi *fakeAPI) StateLookupID(_ context.Context, addr address.Address, _ types.TipSetKey) (address.Address, error) { - return addr, fapi.lookupIDErr -} -func (fapi *fakeAPI) StateMarketBalance(context.Context, address.Address, types.TipSetKey) (api.MarketBalance, error) { - return fapi.returnedBalance, fapi.returnedBalanceErr -} - -func (fapi *fakeAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) { - fapi.receivedMessage = msg - return &types.SignedMessage{ - Message: *msg, - Signature: fapi.signature, - }, fapi.pushMessageErr -} - -func addFundsMsg(toAdd abi.TokenAmount, addr address.Address, wallet address.Address) *types.Message { - params, _ := actors.SerializeParams(&addr) - return &types.Message{ - To: market.Address, - From: wallet, - Value: toAdd, - Method: market.Methods.AddBalance, - Params: params, - } -} - -type expectedResult struct { - addAmt abi.TokenAmount - shouldAdd bool - err error - cachedAvailable abi.TokenAmount -} - -func TestAddFunds(t *testing.T) { - ctx := context.Background() - testCases := map[string]struct { - returnedBalanceErr error - returnedBalance api.MarketBalance - addAmounts []abi.TokenAmount - pushMessageErr error - expectedResults []expectedResult - lookupIDErr error - }{ - "succeeds, trivial case": { - returnedBalance: api.MarketBalance{Escrow: abi.NewTokenAmount(0), Locked: abi.NewTokenAmount(0)}, - addAmounts: []abi.TokenAmount{abi.NewTokenAmount(100)}, - expectedResults: []expectedResult{ - { - addAmt: abi.NewTokenAmount(100), - shouldAdd: true, - err: nil, - }, - }, - }, - "succeeds, money already present": { - returnedBalance: api.MarketBalance{Escrow: abi.NewTokenAmount(150), Locked: abi.NewTokenAmount(50)}, - addAmounts: []abi.TokenAmount{abi.NewTokenAmount(100)}, - expectedResults: []expectedResult{ - { - shouldAdd: false, - err: nil, - cachedAvailable: abi.NewTokenAmount(100), - }, - }, - }, - "succeeds, multiple adds": { - returnedBalance: api.MarketBalance{Escrow: abi.NewTokenAmount(150), Locked: abi.NewTokenAmount(50)}, - addAmounts: []abi.TokenAmount{abi.NewTokenAmount(100), abi.NewTokenAmount(200), abi.NewTokenAmount(250), abi.NewTokenAmount(250)}, - expectedResults: []expectedResult{ - { - shouldAdd: false, - err: nil, - }, - { - addAmt: abi.NewTokenAmount(100), - shouldAdd: true, - err: nil, - cachedAvailable: abi.NewTokenAmount(200), - }, - { - addAmt: abi.NewTokenAmount(50), - shouldAdd: true, - err: nil, - cachedAvailable: abi.NewTokenAmount(250), - }, - { - shouldAdd: false, - err: nil, - cachedAvailable: abi.NewTokenAmount(250), - }, - }, - }, - "error on market balance": { - returnedBalanceErr: errors.New("something went wrong"), - addAmounts: []abi.TokenAmount{abi.NewTokenAmount(100)}, - expectedResults: []expectedResult{ - { - err: errors.New("something went wrong"), - }, - }, - }, - "error on push message": { - returnedBalance: api.MarketBalance{Escrow: abi.NewTokenAmount(0), Locked: abi.NewTokenAmount(0)}, - pushMessageErr: errors.New("something went wrong"), - addAmounts: []abi.TokenAmount{abi.NewTokenAmount(100)}, - expectedResults: []expectedResult{ - { - err: errors.New("something went wrong"), - cachedAvailable: abi.NewTokenAmount(0), - }, - }, - }, - "error looking up address": { - lookupIDErr: errors.New("something went wrong"), - addAmounts: []abi.TokenAmount{abi.NewTokenAmount(100)}, - expectedResults: []expectedResult{ - { - err: errors.New("something went wrong"), - }, - }, - }, - } - - for testCase, data := range testCases { - //nolint:scopelint - t.Run(testCase, func(t *testing.T) { - ctx, cancel := context.WithTimeout(ctx, 5*time.Second) - defer cancel() - sig := make([]byte, 100) - _, err := rand.Read(sig) - require.NoError(t, err) - fapi := &fakeAPI{ - returnedBalance: data.returnedBalance, - returnedBalanceErr: data.returnedBalanceErr, - signature: crypto.Signature{ - Type: crypto.SigTypeUnknown, - Data: sig, - }, - pushMessageErr: data.pushMessageErr, - lookupIDErr: data.lookupIDErr, - } - fundMgr := newFundMgr(fapi) - addr := tutils.NewIDAddr(t, uint64(rand.Uint32())) - wallet := tutils.NewIDAddr(t, uint64(rand.Uint32())) - for i, amount := range data.addAmounts { - fapi.receivedMessage = nil - _, err := fundMgr.EnsureAvailable(ctx, addr, wallet, amount) - expected := data.expectedResults[i] - if expected.err == nil { - require.NoError(t, err) - if expected.shouldAdd { - expectedMessage := addFundsMsg(expected.addAmt, addr, wallet) - require.Equal(t, expectedMessage, fapi.receivedMessage) - } else { - require.Nil(t, fapi.receivedMessage) - } - } else { - require.EqualError(t, err, expected.err.Error()) - } - - if !expected.cachedAvailable.Nil() { - require.Equal(t, expected.cachedAvailable, fundMgr.available[addr]) - } - } - }) - } -} diff --git a/chain/market/store.go b/chain/market/store.go new file mode 100644 index 000000000..e0d0e10be --- /dev/null +++ b/chain/market/store.go @@ -0,0 +1,90 @@ +package market + +import ( + "bytes" + + cborrpc "github.com/filecoin-project/go-cbor-util" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + dsq "github.com/ipfs/go-datastore/query" + + "github.com/filecoin-project/go-address" + + "github.com/filecoin-project/lotus/node/modules/dtypes" +) + +const dsKeyAddr = "Addr" + +type Store struct { + ds datastore.Batching +} + +func newStore(ds dtypes.MetadataDS) *Store { + ds = namespace.Wrap(ds, datastore.NewKey("/fundmgr/")) + return &Store{ + ds: ds, + } +} + +// save the state to the datastore +func (ps *Store) save(state *FundedAddressState) error { + k := dskeyForAddr(state.Addr) + + b, err := cborrpc.Dump(state) + if err != nil { + return err + } + + return ps.ds.Put(k, b) +} + +// get the state for the given address +func (ps *Store) get(addr address.Address) (*FundedAddressState, error) { + k := dskeyForAddr(addr) + + data, err := ps.ds.Get(k) + if err != nil { + return nil, err + } + + var state FundedAddressState + err = cborrpc.ReadCborRPC(bytes.NewReader(data), &state) + if err != nil { + return nil, err + } + return &state, nil +} + +// forEach calls iter with each address in the datastore +func (ps *Store) forEach(iter func(*FundedAddressState)) error { + res, err := ps.ds.Query(dsq.Query{Prefix: dsKeyAddr}) + if err != nil { + return err + } + defer res.Close() //nolint:errcheck + + for { + res, ok := res.NextSync() + if !ok { + break + } + + if res.Error != nil { + return err + } + + var stored FundedAddressState + if err := stored.UnmarshalCBOR(bytes.NewReader(res.Value)); err != nil { + return err + } + + iter(&stored) + } + + return nil +} + +// The datastore key used to identify the address state +func dskeyForAddr(addr address.Address) datastore.Key { + return datastore.KeyWithNamespaces([]string{dsKeyAddr, addr.String()}) +} diff --git a/chain/messagepool/messagepool.go b/chain/messagepool/messagepool.go index 8c8a8af15..a507b60cf 100644 --- a/chain/messagepool/messagepool.go +++ b/chain/messagepool/messagepool.go @@ -264,7 +264,7 @@ func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool, strict, untrusted } if strict && nonceGap { - log.Warnf("adding nonce-gapped message from %s (nonce: %d, nextNonce: %d)", + log.Debugf("adding nonce-gapped message from %s (nonce: %d, nextNonce: %d)", m.Message.From, m.Message.Nonce, nextNonce) } @@ -465,7 +465,7 @@ func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, curTs *types.T epoch := curTs.Height() minGas := vm.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength()) - if err := m.VMMessage().ValidForBlockInclusion(minGas.Total()); err != nil { + if err := m.VMMessage().ValidForBlockInclusion(minGas.Total(), build.NewestNetworkVersion); err != nil { return false, xerrors.Errorf("message will not be included in a block: %w", err) } @@ -546,7 +546,7 @@ func (mp *MessagePool) checkMessage(m *types.SignedMessage) error { } // Perform syntactic validation, minGas=0 as we check the actual mingas before we add it - if err := m.Message.ValidForBlockInclusion(0); err != nil { + if err := m.Message.ValidForBlockInclusion(0, build.NewestNetworkVersion); err != nil { return xerrors.Errorf("message not valid for block inclusion: %w", err) } @@ -1219,7 +1219,7 @@ func (mp *MessagePool) MessagesForBlocks(blks []*types.BlockHeader) ([]*types.Si if smsg != nil { out = append(out, smsg) } else { - log.Warnf("could not recover signature for bls message %s", msg.Cid()) + log.Debugf("could not recover signature for bls message %s", msg.Cid()) } } } diff --git a/chain/messagepool/provider.go b/chain/messagepool/provider.go index 347e90044..5a6c751bc 100644 --- a/chain/messagepool/provider.go +++ b/chain/messagepool/provider.go @@ -2,6 +2,7 @@ package messagepool import ( "context" + "time" "github.com/ipfs/go-cid" pubsub "github.com/libp2p/go-libp2p-pubsub" @@ -9,9 +10,16 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" ) +var ( + HeadChangeCoalesceMinDelay = 2 * time.Second + HeadChangeCoalesceMaxDelay = 6 * time.Second + HeadChangeCoalesceMergeInterval = time.Second +) + type Provider interface { SubscribeHeadChanges(func(rev, app []*types.TipSet) error) *types.TipSet PutMessage(m types.ChainMsg) (cid.Cid, error) @@ -34,7 +42,13 @@ func NewProvider(sm *stmgr.StateManager, ps *pubsub.PubSub) Provider { } func (mpp *mpoolProvider) SubscribeHeadChanges(cb func(rev, app []*types.TipSet) error) *types.TipSet { - mpp.sm.ChainStore().SubscribeHeadChanges(cb) + mpp.sm.ChainStore().SubscribeHeadChanges( + store.WrapHeadChangeCoalescer( + cb, + HeadChangeCoalesceMinDelay, + HeadChangeCoalesceMaxDelay, + HeadChangeCoalesceMergeInterval, + )) return mpp.sm.ChainStore().GetHeaviestTipSet() } diff --git a/chain/messagepool/selection.go b/chain/messagepool/selection.go index 5a8200bf8..8f33a6364 100644 --- a/chain/messagepool/selection.go +++ b/chain/messagepool/selection.go @@ -21,6 +21,8 @@ import ( var bigBlockGasLimit = big.NewInt(build.BlockGasLimit) +var MaxBlockMessages = 16000 + // this is *temporary* mutilation until we have implemented uncapped miner penalties -- it will go // away in the next fork. func allowNegativeChains(epoch abi.ChainEpoch) bool { @@ -43,7 +45,7 @@ type msgChain struct { prev *msgChain } -func (mp *MessagePool) SelectMessages(ts *types.TipSet, tq float64) ([]*types.SignedMessage, error) { +func (mp *MessagePool) SelectMessages(ts *types.TipSet, tq float64) (msgs []*types.SignedMessage, err error) { mp.curTsLk.Lock() defer mp.curTsLk.Unlock() @@ -54,10 +56,20 @@ func (mp *MessagePool) SelectMessages(ts *types.TipSet, tq float64) ([]*types.Si // than any other block, then we don't bother with optimal selection because the // first block will always have higher effective performance if tq > 0.84 { - return mp.selectMessagesGreedy(mp.curTs, ts) + msgs, err = mp.selectMessagesGreedy(mp.curTs, ts) + } else { + msgs, err = mp.selectMessagesOptimal(mp.curTs, ts, tq) } - return mp.selectMessagesOptimal(mp.curTs, ts, tq) + if err != nil { + return nil, err + } + + if len(msgs) > MaxBlockMessages { + msgs = msgs[:MaxBlockMessages] + } + + return msgs, nil } func (mp *MessagePool) selectMessagesOptimal(curTs, ts *types.TipSet, tq float64) ([]*types.SignedMessage, error) { diff --git a/chain/stmgr/forks.go b/chain/stmgr/forks.go index e089a1084..97c4ce401 100644 --- a/chain/stmgr/forks.go +++ b/chain/stmgr/forks.go @@ -6,6 +6,10 @@ import ( "encoding/binary" "math" + "github.com/filecoin-project/specs-actors/v2/actors/migration/nv7" + + "github.com/filecoin-project/specs-actors/v2/actors/migration/nv4" + "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/go-address" @@ -23,7 +27,6 @@ import ( adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" "github.com/filecoin-project/specs-actors/actors/migration/nv3" - m2 "github.com/filecoin-project/specs-actors/v2/actors/migration" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/adt" @@ -90,6 +93,14 @@ func DefaultUpgradeSchedule() UpgradeSchedule { Height: build.UpgradeKumquatHeight, Network: network.Version6, Migration: nil, + }, { + Height: build.UpgradeCalicoHeight, + Network: network.Version7, + Migration: UpgradeCalico, + }, { + Height: build.UpgradePersianHeight, + Network: network.Version8, + Migration: nil, }} if build.UpgradeActorsV2Height == math.MaxInt64 { // disable actors upgrade @@ -601,7 +612,7 @@ func UpgradeActorsV2(ctx context.Context, sm *StateManager, cb ExecCallback, roo return cid.Undef, xerrors.Errorf("failed to create new state info for actors v2: %w", err) } - newHamtRoot, err := m2.MigrateStateTree(ctx, store, root, epoch, m2.DefaultConfig()) + newHamtRoot, err := nv4.MigrateStateTree(ctx, store, root, epoch, nv4.DefaultConfig()) if err != nil { return cid.Undef, xerrors.Errorf("upgrading to actors v2: %w", err) } @@ -652,6 +663,48 @@ func UpgradeLiftoff(ctx context.Context, sm *StateManager, cb ExecCallback, root return tree.Flush(ctx) } +func UpgradeCalico(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + store := sm.cs.Store(ctx) + var stateRoot types.StateRoot + if err := store.Get(ctx, root, &stateRoot); err != nil { + return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err) + } + + if stateRoot.Version != types.StateTreeVersion1 { + return cid.Undef, xerrors.Errorf( + "expected state root version 1 for calico upgrade, got %d", + stateRoot.Version, + ) + } + + newHamtRoot, err := nv7.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, nv7.DefaultConfig()) + if err != nil { + return cid.Undef, xerrors.Errorf("running nv7 migration: %w", err) + } + + newRoot, err := store.Put(ctx, &types.StateRoot{ + Version: stateRoot.Version, + Actors: newHamtRoot, + Info: stateRoot.Info, + }) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err) + } + + // perform some basic sanity checks to make sure everything still works. + if newSm, err := state.LoadStateTree(store, newRoot); err != nil { + return cid.Undef, xerrors.Errorf("state tree sanity load failed: %w", err) + } else if newRoot2, err := newSm.Flush(ctx); err != nil { + return cid.Undef, xerrors.Errorf("state tree sanity flush failed: %w", err) + } else if newRoot2 != newRoot { + return cid.Undef, xerrors.Errorf("state-root mismatch: %s != %s", newRoot, newRoot2) + } else if _, err := newSm.GetActor(builtin0.InitActorAddr); err != nil { + return cid.Undef, xerrors.Errorf("failed to load init actor after upgrade: %w", err) + } + + return newRoot, nil +} + func setNetworkName(ctx context.Context, store adt.Store, tree *state.StateTree, name string) error { ia, err := tree.GetActor(builtin0.InitActorAddr) if err != nil { diff --git a/chain/stmgr/stmgr.go b/chain/stmgr/stmgr.go index 7e5809a84..10c71d8dc 100644 --- a/chain/stmgr/stmgr.go +++ b/chain/stmgr/stmgr.go @@ -72,13 +72,17 @@ type StateManager struct { // ErrExpensiveFork. expensiveUpgrades map[abi.ChainEpoch]struct{} - stCache map[string][]cid.Cid - compWait map[string]chan struct{} - stlk sync.Mutex - genesisMsigLk sync.Mutex - newVM func(context.Context, *vm.VMOpts) (*vm.VM, error) - preIgnitionGenInfos *genesisInfo - postIgnitionGenInfos *genesisInfo + stCache map[string][]cid.Cid + compWait map[string]chan struct{} + stlk sync.Mutex + genesisMsigLk sync.Mutex + newVM func(context.Context, *vm.VMOpts) (*vm.VM, error) + preIgnitionVesting []msig0.State + postIgnitionVesting []msig0.State + postCalicoVesting []msig0.State + + genesisPledge abi.TokenAmount + genesisMarketFunds abi.TokenAmount } func NewStateManager(cs *store.ChainStore) *StateManager { @@ -889,23 +893,8 @@ func (sm *StateManager) SetVMConstructor(nvm func(context.Context, *vm.VMOpts) ( sm.newVM = nvm } -type genesisInfo struct { - genesisMsigs []msig0.State - // info about the Accounts in the genesis state - genesisActors []genesisActor - genesisPledge abi.TokenAmount - genesisMarketFunds abi.TokenAmount -} - -type genesisActor struct { - addr address.Address - initBal abi.TokenAmount -} - -// sets up information about the actors in the genesis state -func (sm *StateManager) setupGenesisActors(ctx context.Context) error { - - gi := genesisInfo{} +// sets up information about the vesting schedule +func (sm *StateManager) setupGenesisVestingSchedule(ctx context.Context) error { gb, err := sm.cs.GetGenesis() if err != nil { @@ -928,127 +917,18 @@ func (sm *StateManager) setupGenesisActors(ctx context.Context) error { return xerrors.Errorf("loading state tree: %w", err) } - gi.genesisMarketFunds, err = getFilMarketLocked(ctx, sTree) + gmf, err := getFilMarketLocked(ctx, sTree) if err != nil { return xerrors.Errorf("setting up genesis market funds: %w", err) } - gi.genesisPledge, err = getFilPowerLocked(ctx, sTree) + gp, err := getFilPowerLocked(ctx, sTree) if err != nil { return xerrors.Errorf("setting up genesis pledge: %w", err) } - totalsByEpoch := make(map[abi.ChainEpoch]abi.TokenAmount) - err = sTree.ForEach(func(kaddr address.Address, act *types.Actor) error { - if builtin.IsMultisigActor(act.Code) { - s, err := multisig.Load(sm.cs.Store(ctx), act) - if err != nil { - return err - } - - se, err := s.StartEpoch() - if err != nil { - return err - } - - if se != 0 { - return xerrors.New("genesis multisig doesn't start vesting at epoch 0!") - } - - ud, err := s.UnlockDuration() - if err != nil { - return err - } - - ib, err := s.InitialBalance() - if err != nil { - return err - } - - ot, f := totalsByEpoch[ud] - if f { - totalsByEpoch[ud] = big.Add(ot, ib) - } else { - totalsByEpoch[ud] = ib - } - - } else if builtin.IsAccountActor(act.Code) { - // should exclude burnt funds actor and "remainder account actor" - // should only ever be "faucet" accounts in testnets - if kaddr == builtin.BurntFundsActorAddr { - return nil - } - - kid, err := sTree.LookupID(kaddr) - if err != nil { - return xerrors.Errorf("resolving address: %w", err) - } - - gi.genesisActors = append(gi.genesisActors, genesisActor{ - addr: kid, - initBal: act.Balance, - }) - } - return nil - }) - - if err != nil { - return xerrors.Errorf("error setting up genesis infos: %w", err) - } - - // TODO: use network upgrade abstractions or always start at actors v0? - gi.genesisMsigs = make([]msig0.State, 0, len(totalsByEpoch)) - for k, v := range totalsByEpoch { - ns := msig0.State{ - InitialBalance: v, - UnlockDuration: k, - PendingTxns: cid.Undef, - } - gi.genesisMsigs = append(gi.genesisMsigs, ns) - } - - sm.preIgnitionGenInfos = &gi - - return nil -} - -// sets up information about the actors in the genesis state -// For testnet we use a hardcoded set of multisig states, instead of what's actually in the genesis multisigs -// We also do not consider ANY account actors (including the faucet) -func (sm *StateManager) setupPreIgnitionGenesisActorsTestnet(ctx context.Context) error { - - gi := genesisInfo{} - - gb, err := sm.cs.GetGenesis() - if err != nil { - return xerrors.Errorf("getting genesis block: %w", err) - } - - gts, err := types.NewTipSet([]*types.BlockHeader{gb}) - if err != nil { - return xerrors.Errorf("getting genesis tipset: %w", err) - } - - st, _, err := sm.TipSetState(ctx, gts) - if err != nil { - return xerrors.Errorf("getting genesis tipset state: %w", err) - } - - cst := cbor.NewCborStore(sm.cs.Blockstore()) - sTree, err := state.LoadStateTree(cst, st) - if err != nil { - return xerrors.Errorf("loading state tree: %w", err) - } - - gi.genesisMarketFunds, err = getFilMarketLocked(ctx, sTree) - if err != nil { - return xerrors.Errorf("setting up genesis market funds: %w", err) - } - - gi.genesisPledge, err = getFilPowerLocked(ctx, sTree) - if err != nil { - return xerrors.Errorf("setting up genesis pledge: %w", err) - } + sm.genesisMarketFunds = gmf + sm.genesisPledge = gp totalsByEpoch := make(map[abi.ChainEpoch]abi.TokenAmount) @@ -1074,58 +954,21 @@ func (sm *StateManager) setupPreIgnitionGenesisActorsTestnet(ctx context.Context totalsByEpoch[sixYears] = big.NewInt(100_000_000) totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(300_000_000)) - gi.genesisMsigs = make([]msig0.State, 0, len(totalsByEpoch)) + sm.preIgnitionVesting = make([]msig0.State, 0, len(totalsByEpoch)) for k, v := range totalsByEpoch { ns := msig0.State{ InitialBalance: v, UnlockDuration: k, PendingTxns: cid.Undef, } - gi.genesisMsigs = append(gi.genesisMsigs, ns) + sm.preIgnitionVesting = append(sm.preIgnitionVesting, ns) } - sm.preIgnitionGenInfos = &gi - return nil } -// sets up information about the actors in the genesis state, post the ignition fork -func (sm *StateManager) setupPostIgnitionGenesisActors(ctx context.Context) error { - - gi := genesisInfo{} - - gb, err := sm.cs.GetGenesis() - if err != nil { - return xerrors.Errorf("getting genesis block: %w", err) - } - - gts, err := types.NewTipSet([]*types.BlockHeader{gb}) - if err != nil { - return xerrors.Errorf("getting genesis tipset: %w", err) - } - - st, _, err := sm.TipSetState(ctx, gts) - if err != nil { - return xerrors.Errorf("getting genesis tipset state: %w", err) - } - - cst := cbor.NewCborStore(sm.cs.Blockstore()) - sTree, err := state.LoadStateTree(cst, st) - if err != nil { - return xerrors.Errorf("loading state tree: %w", err) - } - - // Unnecessary, should be removed - gi.genesisMarketFunds, err = getFilMarketLocked(ctx, sTree) - if err != nil { - return xerrors.Errorf("setting up genesis market funds: %w", err) - } - - // Unnecessary, should be removed - gi.genesisPledge, err = getFilPowerLocked(ctx, sTree) - if err != nil { - return xerrors.Errorf("setting up genesis pledge: %w", err) - } +// sets up information about the vesting schedule post the ignition upgrade +func (sm *StateManager) setupPostIgnitionVesting(ctx context.Context) error { totalsByEpoch := make(map[abi.ChainEpoch]abi.TokenAmount) @@ -1151,7 +994,7 @@ func (sm *StateManager) setupPostIgnitionGenesisActors(ctx context.Context) erro totalsByEpoch[sixYears] = big.NewInt(100_000_000) totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(300_000_000)) - gi.genesisMsigs = make([]msig0.State, 0, len(totalsByEpoch)) + sm.postIgnitionVesting = make([]msig0.State, 0, len(totalsByEpoch)) for k, v := range totalsByEpoch { ns := msig0.State{ // In the pre-ignition logic, we incorrectly set this value in Fil, not attoFil, an off-by-10^18 error @@ -1161,10 +1004,56 @@ func (sm *StateManager) setupPostIgnitionGenesisActors(ctx context.Context) erro // In the pre-ignition logic, the start epoch was 0. This changes in the fork logic of the Ignition upgrade itself. StartEpoch: build.UpgradeLiftoffHeight, } - gi.genesisMsigs = append(gi.genesisMsigs, ns) + sm.postIgnitionVesting = append(sm.postIgnitionVesting, ns) } - sm.postIgnitionGenInfos = &gi + return nil +} + +// sets up information about the vesting schedule post the calico upgrade +func (sm *StateManager) setupPostCalicoVesting(ctx context.Context) error { + + totalsByEpoch := make(map[abi.ChainEpoch]abi.TokenAmount) + + // 0 days + zeroDays := abi.ChainEpoch(0) + totalsByEpoch[zeroDays] = big.NewInt(10_632_000) + + // 6 months + sixMonths := abi.ChainEpoch(183 * builtin.EpochsInDay) + totalsByEpoch[sixMonths] = big.NewInt(19_015_887) + totalsByEpoch[sixMonths] = big.Add(totalsByEpoch[sixMonths], big.NewInt(32_787_700)) + + // 1 year + oneYear := abi.ChainEpoch(365 * builtin.EpochsInDay) + totalsByEpoch[oneYear] = big.NewInt(22_421_712) + totalsByEpoch[oneYear] = big.Add(totalsByEpoch[oneYear], big.NewInt(9_400_000)) + + // 2 years + twoYears := abi.ChainEpoch(2 * 365 * builtin.EpochsInDay) + totalsByEpoch[twoYears] = big.NewInt(7_223_364) + + // 3 years + threeYears := abi.ChainEpoch(3 * 365 * builtin.EpochsInDay) + totalsByEpoch[threeYears] = big.NewInt(87_637_883) + totalsByEpoch[threeYears] = big.Add(totalsByEpoch[threeYears], big.NewInt(898_958)) + + // 6 years + sixYears := abi.ChainEpoch(6 * 365 * builtin.EpochsInDay) + totalsByEpoch[sixYears] = big.NewInt(100_000_000) + totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(300_000_000)) + totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(9_805_053)) + + sm.postCalicoVesting = make([]msig0.State, 0, len(totalsByEpoch)) + for k, v := range totalsByEpoch { + ns := msig0.State{ + InitialBalance: big.Mul(v, big.NewInt(int64(build.FilecoinPrecision))), + UnlockDuration: k, + PendingTxns: cid.Undef, + StartEpoch: build.UpgradeLiftoffHeight, + } + sm.postCalicoVesting = append(sm.postCalicoVesting, ns) + } return nil } @@ -1175,12 +1064,19 @@ func (sm *StateManager) setupPostIgnitionGenesisActors(ctx context.Context) erro func (sm *StateManager) GetFilVested(ctx context.Context, height abi.ChainEpoch, st *state.StateTree) (abi.TokenAmount, error) { vf := big.Zero() if height <= build.UpgradeIgnitionHeight { - for _, v := range sm.preIgnitionGenInfos.genesisMsigs { + for _, v := range sm.preIgnitionVesting { au := big.Sub(v.InitialBalance, v.AmountLocked(height)) vf = big.Add(vf, au) } + } else if height <= build.UpgradeCalicoHeight { + for _, v := range sm.postIgnitionVesting { + // In the pre-ignition logic, we simply called AmountLocked(height), assuming startEpoch was 0. + // The start epoch changed in the Ignition upgrade. + au := big.Sub(v.InitialBalance, v.AmountLocked(height-v.StartEpoch)) + vf = big.Add(vf, au) + } } else { - for _, v := range sm.postIgnitionGenInfos.genesisMsigs { + for _, v := range sm.postCalicoVesting { // In the pre-ignition logic, we simply called AmountLocked(height), assuming startEpoch was 0. // The start epoch changed in the Ignition upgrade. au := big.Sub(v.InitialBalance, v.AmountLocked(height-v.StartEpoch)) @@ -1188,26 +1084,12 @@ func (sm *StateManager) GetFilVested(ctx context.Context, height abi.ChainEpoch, } } - // there should not be any such accounts in testnet (and also none in mainnet?) - // continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch - for _, v := range sm.preIgnitionGenInfos.genesisActors { - act, err := st.GetActor(v.addr) - if err != nil { - return big.Zero(), xerrors.Errorf("failed to get actor: %w", err) - } - - diff := big.Sub(v.initBal, act.Balance) - if diff.GreaterThan(big.Zero()) { - vf = big.Add(vf, diff) - } - } - // After UpgradeActorsV2Height these funds are accounted for in GetFilReserveDisbursed if height <= build.UpgradeActorsV2Height { // continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch - vf = big.Add(vf, sm.preIgnitionGenInfos.genesisPledge) + vf = big.Add(vf, sm.genesisPledge) // continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch - vf = big.Add(vf, sm.preIgnitionGenInfos.genesisMarketFunds) + vf = big.Add(vf, sm.genesisMarketFunds) } return vf, nil @@ -1301,16 +1183,22 @@ func (sm *StateManager) GetVMCirculatingSupply(ctx context.Context, height abi.C func (sm *StateManager) GetVMCirculatingSupplyDetailed(ctx context.Context, height abi.ChainEpoch, st *state.StateTree) (api.CirculatingSupply, error) { sm.genesisMsigLk.Lock() defer sm.genesisMsigLk.Unlock() - if sm.preIgnitionGenInfos == nil { - err := sm.setupPreIgnitionGenesisActorsTestnet(ctx) + if sm.preIgnitionVesting == nil || sm.genesisPledge.IsZero() || sm.genesisMarketFunds.IsZero() { + err := sm.setupGenesisVestingSchedule(ctx) if err != nil { - return api.CirculatingSupply{}, xerrors.Errorf("failed to setup pre-ignition genesis information: %w", err) + return api.CirculatingSupply{}, xerrors.Errorf("failed to setup pre-ignition vesting schedule: %w", err) } } - if sm.postIgnitionGenInfos == nil { - err := sm.setupPostIgnitionGenesisActors(ctx) + if sm.postIgnitionVesting == nil { + err := sm.setupPostIgnitionVesting(ctx) if err != nil { - return api.CirculatingSupply{}, xerrors.Errorf("failed to setup post-ignition genesis information: %w", err) + return api.CirculatingSupply{}, xerrors.Errorf("failed to setup post-ignition vesting schedule: %w", err) + } + } + if sm.postCalicoVesting == nil { + err := sm.setupPostCalicoVesting(ctx) + if err != nil { + return api.CirculatingSupply{}, xerrors.Errorf("failed to setup post-calico vesting schedule: %w", err) } } diff --git a/chain/stmgr/utils.go b/chain/stmgr/utils.go index 78121cc4c..fb0b91378 100644 --- a/chain/stmgr/utils.go +++ b/chain/stmgr/utils.go @@ -158,7 +158,7 @@ func GetMinerSectorSet(ctx context.Context, sm *StateManager, ts *types.TipSet, return mas.LoadSectors(snos) } -func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *StateManager, st cid.Cid, maddr address.Address, rand abi.PoStRandomness) ([]builtin.SectorInfo, error) { +func GetSectorsForWinningPoSt(ctx context.Context, nv network.Version, pv ffiwrapper.Verifier, sm *StateManager, st cid.Cid, maddr address.Address, rand abi.PoStRandomness) ([]builtin.SectorInfo, error) { act, err := sm.LoadActorRaw(ctx, maddr, st) if err != nil { return nil, xerrors.Errorf("failed to load miner actor: %w", err) @@ -169,21 +169,27 @@ func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *S return nil, xerrors.Errorf("failed to load miner actor state: %w", err) } - // TODO (!!): Actor Update: Make this active sectors + var provingSectors bitfield.BitField + if nv < network.Version7 { + allSectors, err := miner.AllPartSectors(mas, miner.Partition.AllSectors) + if err != nil { + return nil, xerrors.Errorf("get all sectors: %w", err) + } - allSectors, err := miner.AllPartSectors(mas, miner.Partition.AllSectors) - if err != nil { - return nil, xerrors.Errorf("get all sectors: %w", err) - } + faultySectors, err := miner.AllPartSectors(mas, miner.Partition.FaultySectors) + if err != nil { + return nil, xerrors.Errorf("get faulty sectors: %w", err) + } - faultySectors, err := miner.AllPartSectors(mas, miner.Partition.FaultySectors) - if err != nil { - return nil, xerrors.Errorf("get faulty sectors: %w", err) - } - - provingSectors, err := bitfield.SubtractBitField(allSectors, faultySectors) // TODO: This is wrong, as it can contain faaults, change to just ActiveSectors in an upgrade - if err != nil { - return nil, xerrors.Errorf("calc proving sectors: %w", err) + provingSectors, err = bitfield.SubtractBitField(allSectors, faultySectors) + if err != nil { + return nil, xerrors.Errorf("calc proving sectors: %w", err) + } + } else { + provingSectors, err = miner.AllPartSectors(mas, miner.Partition.ActiveSectors) + if err != nil { + return nil, xerrors.Errorf("get active sectors sectors: %w", err) + } } numProvSect, err := provingSectors.Count() @@ -201,12 +207,7 @@ func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *S return nil, xerrors.Errorf("getting miner info: %w", err) } - spt, err := ffiwrapper.SealProofTypeFromSectorSize(info.SectorSize) - if err != nil { - return nil, xerrors.Errorf("getting seal proof type: %w", err) - } - - wpt, err := spt.RegisteredWinningPoStProof() + wpt, err := info.SealProofType.RegisteredWinningPoStProof() if err != nil { return nil, xerrors.Errorf("getting window proof type: %w", err) } @@ -246,7 +247,7 @@ func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *S out := make([]builtin.SectorInfo, len(sectors)) for i, sinfo := range sectors { out[i] = builtin.SectorInfo{ - SealProof: spt, + SealProof: sinfo.SealProof, SectorNumber: sinfo.SectorNumber, SealedCID: sinfo.SealedCID, } @@ -497,7 +498,9 @@ func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcs beacon.Schedule return nil, xerrors.Errorf("failed to get randomness for winning post: %w", err) } - sectors, err := GetSectorsForWinningPoSt(ctx, pv, sm, lbst, maddr, prand) + nv := sm.GetNtwkVersion(ctx, ts.Height()) + + sectors, err := GetSectorsForWinningPoSt(ctx, nv, pv, sm, lbst, maddr, prand) if err != nil { return nil, xerrors.Errorf("getting winning post proving set: %w", err) } diff --git a/chain/store/coalescer.go b/chain/store/coalescer.go new file mode 100644 index 000000000..443359c8a --- /dev/null +++ b/chain/store/coalescer.go @@ -0,0 +1,214 @@ +package store + +import ( + "context" + "time" + + "github.com/filecoin-project/lotus/chain/types" +) + +// WrapHeadChangeCoalescer wraps a ReorgNotifee with a head change coalescer. +// minDelay is the minimum coalesce delay; when a head change is first received, the coalescer will +// wait for that long to coalesce more head changes. +// maxDelay is the maximum coalesce delay; the coalescer will not delay delivery of a head change +// more than that. +// mergeInterval is the interval that triggers additional coalesce delay; if the last head change was +// within the merge interval when the coalesce timer fires, then the coalesce time is extended +// by min delay and up to max delay total. +func WrapHeadChangeCoalescer(fn ReorgNotifee, minDelay, maxDelay, mergeInterval time.Duration) ReorgNotifee { + c := NewHeadChangeCoalescer(fn, minDelay, maxDelay, mergeInterval) + return c.HeadChange +} + +// HeadChangeCoalescer is a stateful reorg notifee which coalesces incoming head changes +// with pending head changes to reduce state computations from head change notifications. +type HeadChangeCoalescer struct { + notify ReorgNotifee + + ctx context.Context + cancel func() + + eventq chan headChange + + revert []*types.TipSet + apply []*types.TipSet +} + +type headChange struct { + revert, apply []*types.TipSet +} + +// NewHeadChangeCoalescer creates a HeadChangeCoalescer. +func NewHeadChangeCoalescer(fn ReorgNotifee, minDelay, maxDelay, mergeInterval time.Duration) *HeadChangeCoalescer { + ctx, cancel := context.WithCancel(context.Background()) + c := &HeadChangeCoalescer{ + notify: fn, + ctx: ctx, + cancel: cancel, + eventq: make(chan headChange), + } + + go c.background(minDelay, maxDelay, mergeInterval) + + return c +} + +// HeadChange is the ReorgNotifee callback for the stateful coalescer; it receives an incoming +// head change and schedules dispatch of a coalesced head change in the background. +func (c *HeadChangeCoalescer) HeadChange(revert, apply []*types.TipSet) error { + select { + case c.eventq <- headChange{revert: revert, apply: apply}: + return nil + case <-c.ctx.Done(): + return c.ctx.Err() + } +} + +// Close closes the coalescer and cancels the background dispatch goroutine. +// Any further notification will result in an error. +func (c *HeadChangeCoalescer) Close() error { + select { + case <-c.ctx.Done(): + default: + c.cancel() + } + + return nil +} + +// Implementation details + +func (c *HeadChangeCoalescer) background(minDelay, maxDelay, mergeInterval time.Duration) { + var timerC <-chan time.Time + var first, last time.Time + + for { + select { + case evt := <-c.eventq: + c.coalesce(evt.revert, evt.apply) + + now := time.Now() + last = now + if first.IsZero() { + first = now + } + + if timerC == nil { + timerC = time.After(minDelay) + } + + case now := <-timerC: + sinceFirst := now.Sub(first) + sinceLast := now.Sub(last) + + if sinceLast < mergeInterval && sinceFirst < maxDelay { + // coalesce some more + maxWait := maxDelay - sinceFirst + wait := minDelay + if maxWait < wait { + wait = maxWait + } + + timerC = time.After(wait) + } else { + // dispatch + c.dispatch() + + first = time.Time{} + last = time.Time{} + timerC = nil + } + + case <-c.ctx.Done(): + if c.revert != nil || c.apply != nil { + c.dispatch() + } + return + } + } +} + +func (c *HeadChangeCoalescer) coalesce(revert, apply []*types.TipSet) { + // newly reverted tipsets cancel out with pending applys. + // similarly, newly applied tipsets cancel out with pending reverts. + + // pending tipsets + pendRevert := make(map[types.TipSetKey]struct{}, len(c.revert)) + for _, ts := range c.revert { + pendRevert[ts.Key()] = struct{}{} + } + + pendApply := make(map[types.TipSetKey]struct{}, len(c.apply)) + for _, ts := range c.apply { + pendApply[ts.Key()] = struct{}{} + } + + // incoming tipsets + reverting := make(map[types.TipSetKey]struct{}, len(revert)) + for _, ts := range revert { + reverting[ts.Key()] = struct{}{} + } + + applying := make(map[types.TipSetKey]struct{}, len(apply)) + for _, ts := range apply { + applying[ts.Key()] = struct{}{} + } + + // coalesced revert set + // - pending reverts are cancelled by incoming applys + // - incoming reverts are cancelled by pending applys + newRevert := make([]*types.TipSet, 0, len(c.revert)+len(revert)) + for _, ts := range c.revert { + _, cancel := applying[ts.Key()] + if cancel { + continue + } + + newRevert = append(newRevert, ts) + } + + for _, ts := range revert { + _, cancel := pendApply[ts.Key()] + if cancel { + continue + } + + newRevert = append(newRevert, ts) + } + + // coalesced apply set + // - pending applys are cancelled by incoming reverts + // - incoming applys are cancelled by pending reverts + newApply := make([]*types.TipSet, 0, len(c.apply)+len(apply)) + for _, ts := range c.apply { + _, cancel := reverting[ts.Key()] + if cancel { + continue + } + + newApply = append(newApply, ts) + } + + for _, ts := range apply { + _, cancel := pendRevert[ts.Key()] + if cancel { + continue + } + + newApply = append(newApply, ts) + } + + // commit the coalesced sets + c.revert = newRevert + c.apply = newApply +} + +func (c *HeadChangeCoalescer) dispatch() { + err := c.notify(c.revert, c.apply) + if err != nil { + log.Errorf("error dispatching coalesced head change notification: %s", err) + } + + c.revert = nil + c.apply = nil +} diff --git a/chain/store/coalescer_test.go b/chain/store/coalescer_test.go new file mode 100644 index 000000000..d46285108 --- /dev/null +++ b/chain/store/coalescer_test.go @@ -0,0 +1,72 @@ +package store + +import ( + "testing" + "time" + + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/types/mock" +) + +func TestHeadChangeCoalescer(t *testing.T) { + notif := make(chan headChange, 1) + c := NewHeadChangeCoalescer(func(revert, apply []*types.TipSet) error { + notif <- headChange{apply: apply, revert: revert} + return nil + }, + 100*time.Millisecond, + 200*time.Millisecond, + 10*time.Millisecond, + ) + defer c.Close() //nolint + + b0 := mock.MkBlock(nil, 0, 0) + root := mock.TipSet(b0) + bA := mock.MkBlock(root, 1, 1) + tA := mock.TipSet(bA) + bB := mock.MkBlock(root, 1, 2) + tB := mock.TipSet(bB) + tAB := mock.TipSet(bA, bB) + bC := mock.MkBlock(root, 1, 3) + tABC := mock.TipSet(bA, bB, bC) + bD := mock.MkBlock(root, 1, 4) + tABCD := mock.TipSet(bA, bB, bC, bD) + bE := mock.MkBlock(root, 1, 5) + tABCDE := mock.TipSet(bA, bB, bC, bD, bE) + + c.HeadChange(nil, []*types.TipSet{tA}) //nolint + c.HeadChange(nil, []*types.TipSet{tB}) //nolint + c.HeadChange([]*types.TipSet{tA, tB}, []*types.TipSet{tAB}) //nolint + c.HeadChange([]*types.TipSet{tAB}, []*types.TipSet{tABC}) //nolint + + change := <-notif + + if len(change.revert) != 0 { + t.Fatalf("expected empty revert set but got %d elements", len(change.revert)) + } + if len(change.apply) != 1 { + t.Fatalf("expected single element apply set but got %d elements", len(change.apply)) + } + if change.apply[0] != tABC { + t.Fatalf("expected to apply tABC") + } + + c.HeadChange([]*types.TipSet{tABC}, []*types.TipSet{tABCD}) //nolint + c.HeadChange([]*types.TipSet{tABCD}, []*types.TipSet{tABCDE}) //nolint + + change = <-notif + + if len(change.revert) != 1 { + t.Fatalf("expected single element revert set but got %d elements", len(change.revert)) + } + if change.revert[0] != tABC { + t.Fatalf("expected to revert tABC") + } + if len(change.apply) != 1 { + t.Fatalf("expected single element apply set but got %d elements", len(change.apply)) + } + if change.apply[0] != tABCDE { + t.Fatalf("expected to revert tABC") + } + +} diff --git a/chain/store/index_test.go b/chain/store/index_test.go index 5283d10dc..89756a252 100644 --- a/chain/store/index_test.go +++ b/chain/store/index_test.go @@ -31,7 +31,8 @@ func TestIndexSeeks(t *testing.T) { ctx := context.TODO() nbs := blockstore.NewTemporarySync() - cs := store.NewChainStore(nbs, syncds.MutexWrap(datastore.NewMapDatastore()), nil, nil) + cs := store.NewChainStore(nbs, nbs, syncds.MutexWrap(datastore.NewMapDatastore()), nil, nil) + defer cs.Close() //nolint:errcheck _, err = cs.Import(bytes.NewReader(gencar)) if err != nil { diff --git a/chain/store/store.go b/chain/store/store.go index 00a78500e..f4ce8112b 100644 --- a/chain/store/store.go +++ b/chain/store/store.go @@ -5,6 +5,7 @@ import ( "context" "encoding/binary" "encoding/json" + "errors" "io" "os" "strconv" @@ -44,10 +45,10 @@ import ( "github.com/ipfs/go-datastore/query" cbor "github.com/ipfs/go-ipld-cbor" logging "github.com/ipfs/go-log/v2" - car "github.com/ipld/go-car" + "github.com/ipld/go-car" carutil "github.com/ipld/go-car/util" cbg "github.com/whyrusleeping/cbor-gen" - pubsub "github.com/whyrusleeping/pubsub" + "github.com/whyrusleeping/pubsub" "golang.org/x/xerrors" ) @@ -59,6 +60,8 @@ var blockValidationCacheKeyPrefix = dstore.NewKey("blockValidation") var DefaultTipSetCacheSize = 8192 var DefaultMsgMetaCacheSize = 2048 +var ErrNotifeeDone = errors.New("notifee is done and should be removed") + func init() { if s := os.Getenv("LOTUS_CHAIN_TIPSET_CACHE"); s != "" { tscs, err := strconv.Atoi(s) @@ -104,8 +107,11 @@ type HeadChangeEvt struct { // 1. a tipset cache // 2. a block => messages references cache. type ChainStore struct { - bs bstore.Blockstore - ds dstore.Batching + bs bstore.Blockstore + localbs bstore.Blockstore + ds dstore.Batching + + localviewer bstore.Viewer heaviestLk sync.Mutex heaviest *types.TipSet @@ -128,25 +134,37 @@ type ChainStore struct { evtTypes [1]journal.EventType journal journal.Journal + + cancelFn context.CancelFunc + wg sync.WaitGroup } -func NewChainStore(bs bstore.Blockstore, ds dstore.Batching, vmcalls vm.SyscallBuilder, j journal.Journal) *ChainStore { - c, _ := lru.NewARC(DefaultMsgMetaCacheSize) - tsc, _ := lru.NewARC(DefaultTipSetCacheSize) +// localbs is guaranteed to fail Get* if requested block isn't stored locally +func NewChainStore(bs bstore.Blockstore, localbs bstore.Blockstore, ds dstore.Batching, vmcalls vm.SyscallBuilder, j journal.Journal) *ChainStore { + mmCache, _ := lru.NewARC(DefaultMsgMetaCacheSize) + tsCache, _ := lru.NewARC(DefaultTipSetCacheSize) if j == nil { j = journal.NilJournal() } + + ctx, cancel := context.WithCancel(context.Background()) cs := &ChainStore{ bs: bs, + localbs: localbs, ds: ds, bestTips: pubsub.New(64), tipsets: make(map[abi.ChainEpoch][]cid.Cid), - mmCache: c, - tsCache: tsc, + mmCache: mmCache, + tsCache: tsCache, vmcalls: vmcalls, + cancelFn: cancel, journal: j, } + if v, ok := localbs.(bstore.Viewer); ok { + cs.localviewer = v + } + cs.evtTypes = [1]journal.EventType{ evtTypeHeadChange: j.RegisterEventType("sync", "head_change"), } @@ -179,19 +197,24 @@ func NewChainStore(bs bstore.Blockstore, ds dstore.Batching, vmcalls vm.SyscallB } hcmetric := func(rev, app []*types.TipSet) error { - ctx := context.Background() for _, r := range app { - stats.Record(ctx, metrics.ChainNodeHeight.M(int64(r.Height()))) + stats.Record(context.Background(), metrics.ChainNodeHeight.M(int64(r.Height()))) } return nil } cs.reorgNotifeeCh = make(chan ReorgNotifee) - cs.reorgCh = cs.reorgWorker(context.TODO(), []ReorgNotifee{hcnf, hcmetric}) + cs.reorgCh = cs.reorgWorker(ctx, []ReorgNotifee{hcnf, hcmetric}) return cs } +func (cs *ChainStore) Close() error { + cs.cancelFn() + cs.wg.Wait() + return nil +} + func (cs *ChainStore) Load() error { head, err := cs.ds.Get(chainHeadKey) if err == dstore.ErrNotFound { @@ -259,7 +282,7 @@ func (cs *ChainStore) SubHeadChanges(ctx context.Context) chan []*api.HeadChange log.Warn("chain head sub exit loop") return } - if len(out) > 0 { + if len(out) > 5 { log.Warnf("head change sub is slow, has %d buffered entries", len(out)) } select { @@ -358,10 +381,32 @@ func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipS // difference between 'bootstrap sync' and 'caught up' sync, we need // some other heuristic. return cs.takeHeaviestTipSet(ctx, ts) + } else if w.Equals(heaviestW) && !ts.Equals(cs.heaviest) { + log.Errorw("weight draw", "currTs", cs.heaviest, "ts", ts) } return nil } +// ForceHeadSilent forces a chain head tipset without triggering a reorg +// operation. +// +// CAUTION: Use it only for testing, such as to teleport the chain to a +// particular tipset to carry out a benchmark, verification, etc. on a chain +// segment. +func (cs *ChainStore) ForceHeadSilent(_ context.Context, ts *types.TipSet) error { + log.Warnf("(!!!) forcing a new head silently; new head: %s", ts) + + cs.heaviestLk.Lock() + defer cs.heaviestLk.Unlock() + cs.heaviest = ts + + err := cs.writeHead(ts) + if err != nil { + err = xerrors.Errorf("failed to write chain head: %s", err) + } + return err +} + type reorg struct { old *types.TipSet new *types.TipSet @@ -372,7 +417,9 @@ func (cs *ChainStore) reorgWorker(ctx context.Context, initialNotifees []ReorgNo notifees := make([]ReorgNotifee, len(initialNotifees)) copy(notifees, initialNotifees) + cs.wg.Add(1) go func() { + defer cs.wg.Done() defer log.Warn("reorgWorker quit") for { @@ -404,11 +451,36 @@ func (cs *ChainStore) reorgWorker(ctx context.Context, initialNotifees []ReorgNo apply[i], apply[opp] = apply[opp], apply[i] } - for _, hcf := range notifees { - if err := hcf(revert, apply); err != nil { + var toremove map[int]struct{} + for i, hcf := range notifees { + err := hcf(revert, apply) + + switch err { + case nil: + + case ErrNotifeeDone: + if toremove == nil { + toremove = make(map[int]struct{}) + } + toremove[i] = struct{}{} + + default: log.Error("head change func errored (BAD): ", err) } } + + if len(toremove) > 0 { + newNotifees := make([]ReorgNotifee, 0, len(notifees)-len(toremove)) + for i, hcf := range notifees { + _, remove := toremove[i] + if remove { + continue + } + newNotifees = append(newNotifees, hcf) + } + notifees = newNotifees + } + case <-ctx.Done(): return } @@ -522,12 +594,20 @@ func (cs *ChainStore) Contains(ts *types.TipSet) (bool, error) { // GetBlock fetches a BlockHeader with the supplied CID. It returns // blockstore.ErrNotFound if the block was not found in the BlockStore. func (cs *ChainStore) GetBlock(c cid.Cid) (*types.BlockHeader, error) { - sb, err := cs.bs.Get(c) - if err != nil { - return nil, err + if cs.localviewer == nil { + sb, err := cs.localbs.Get(c) + if err != nil { + return nil, err + } + return types.DecodeBlock(sb.RawData()) } - return types.DecodeBlock(sb.RawData()) + var blk *types.BlockHeader + err := cs.localviewer.View(c, func(b []byte) (err error) { + blk, err = types.DecodeBlock(b) + return err + }) + return blk, err } func (cs *ChainStore) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) { @@ -772,12 +852,7 @@ func (cs *ChainStore) GetGenesis() (*types.BlockHeader, error) { return nil, err } - genb, err := cs.bs.Get(c) - if err != nil { - return nil, err - } - - return types.DecodeBlock(genb.RawData()) + return cs.GetBlock(c) } func (cs *ChainStore) GetCMessage(c cid.Cid) (types.ChainMsg, error) { @@ -793,23 +868,39 @@ func (cs *ChainStore) GetCMessage(c cid.Cid) (types.ChainMsg, error) { } func (cs *ChainStore) GetMessage(c cid.Cid) (*types.Message, error) { - sb, err := cs.bs.Get(c) - if err != nil { - log.Errorf("get message get failed: %s: %s", c, err) - return nil, err + if cs.localviewer == nil { + sb, err := cs.localbs.Get(c) + if err != nil { + log.Errorf("get message get failed: %s: %s", c, err) + return nil, err + } + return types.DecodeMessage(sb.RawData()) } - return types.DecodeMessage(sb.RawData()) + var msg *types.Message + err := cs.localviewer.View(c, func(b []byte) (err error) { + msg, err = types.DecodeMessage(b) + return err + }) + return msg, err } func (cs *ChainStore) GetSignedMessage(c cid.Cid) (*types.SignedMessage, error) { - sb, err := cs.bs.Get(c) - if err != nil { - log.Errorf("get message get failed: %s: %s", c, err) - return nil, err + if cs.localviewer == nil { + sb, err := cs.localbs.Get(c) + if err != nil { + log.Errorf("get message get failed: %s: %s", c, err) + return nil, err + } + return types.DecodeSignedMessage(sb.RawData()) } - return types.DecodeSignedMessage(sb.RawData()) + var msg *types.SignedMessage + err := cs.localviewer.View(c, func(b []byte) (err error) { + msg, err = types.DecodeSignedMessage(b) + return err + }) + return msg, err } func (cs *ChainStore) readAMTCids(root cid.Cid) ([]cid.Cid, error) { @@ -939,7 +1030,7 @@ func (cs *ChainStore) ReadMsgMetaCids(mmc cid.Cid) ([]cid.Cid, []cid.Cid, error) return mmcids.bls, mmcids.secpk, nil } - cst := cbor.NewCborStore(cs.bs) + cst := cbor.NewCborStore(cs.localbs) var msgmeta types.MsgMeta if err := cst.Get(context.TODO(), mmc, &msgmeta); err != nil { return nil, nil, xerrors.Errorf("failed to load msgmeta (%s): %w", mmc, err) diff --git a/chain/store/store_test.go b/chain/store/store_test.go index 160527104..5f1f336f3 100644 --- a/chain/store/store_test.go +++ b/chain/store/store_test.go @@ -3,6 +3,7 @@ package store_test import ( "bytes" "context" + "io" "testing" datastore "github.com/ipfs/go-datastore" @@ -51,19 +52,26 @@ func BenchmarkGetRandomness(b *testing.B) { b.Fatal(err) } - bds, err := lr.Datastore("/chain") + bs, err := lr.Blockstore(repo.BlockstoreChain) if err != nil { b.Fatal(err) } + defer func() { + if c, ok := bs.(io.Closer); ok { + if err := c.Close(); err != nil { + b.Logf("WARN: failed to close blockstore: %s", err) + } + } + }() + mds, err := lr.Datastore("/metadata") if err != nil { b.Fatal(err) } - bs := blockstore.NewBlockstore(bds) - - cs := store.NewChainStore(bs, mds, nil, nil) + cs := store.NewChainStore(bs, bs, mds, nil, nil) + defer cs.Close() //nolint:errcheck b.ResetTimer() @@ -97,7 +105,8 @@ func TestChainExportImport(t *testing.T) { } nbs := blockstore.NewTemporary() - cs := store.NewChainStore(nbs, datastore.NewMapDatastore(), nil, nil) + cs := store.NewChainStore(nbs, nbs, datastore.NewMapDatastore(), nil, nil) + defer cs.Close() //nolint:errcheck root, err := cs.Import(buf) if err != nil { @@ -131,7 +140,9 @@ func TestChainExportImportFull(t *testing.T) { } nbs := blockstore.NewTemporary() - cs := store.NewChainStore(nbs, datastore.NewMapDatastore(), nil, nil) + cs := store.NewChainStore(nbs, nbs, datastore.NewMapDatastore(), nil, nil) + defer cs.Close() //nolint:errcheck + root, err := cs.Import(buf) if err != nil { t.Fatal(err) diff --git a/chain/sub/incoming.go b/chain/sub/incoming.go index 1701866eb..dd8d1684b 100644 --- a/chain/sub/incoming.go +++ b/chain/sub/incoming.go @@ -95,7 +95,10 @@ func HandleIncomingBlocks(ctx context.Context, bsub *pubsub.Subscription, s *cha } took := build.Clock.Since(start) - log.Infow("new block over pubsub", "cid", blk.Header.Cid(), "source", msg.GetFrom(), "msgfetch", took) + log.Debugw("new block over pubsub", "cid", blk.Header.Cid(), "source", msg.GetFrom(), "msgfetch", took) + if took > 3*time.Second { + log.Warnw("Slow msg fetch", "cid", blk.Header.Cid(), "source", msg.GetFrom(), "msgfetch", took) + } if delay := build.Clock.Now().Unix() - int64(blk.Header.Timestamp); delay > 5 { log.Warnf("Received block with large delay %d from miner %s", delay, blk.Header.Miner) } @@ -337,6 +340,13 @@ func (bv *BlockValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub func (bv *BlockValidator) validateLocalBlock(ctx context.Context, msg *pubsub.Message) pubsub.ValidationResult { stats.Record(ctx, metrics.BlockPublished.M(1)) + if size := msg.Size(); size > 1<<20-1<<15 { + log.Errorf("ignoring oversize block (%dB)", size) + ctx, _ = tag.New(ctx, tag.Insert(metrics.FailureType, "oversize_block")) + stats.Record(ctx, metrics.BlockValidationFailure.M(1)) + return pubsub.ValidationIgnore + } + blk, what, err := bv.decodeAndCheckBlock(msg) if err != nil { log.Errorf("got invalid local block: %s", err) diff --git a/chain/sync.go b/chain/sync.go index 1410dd2a7..d5c6f73fd 100644 --- a/chain/sync.go +++ b/chain/sync.go @@ -32,6 +32,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" blst "github.com/supranational/blst/bindings/go" @@ -278,7 +279,7 @@ func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool { for _, blk := range fts.TipSet().Blocks() { miners = append(miners, blk.Miner.String()) } - log.Infow("incoming tipset does not appear to be better than our best chain, ignoring for now", "miners", miners, "bestPweight", bestPweight, "bestTS", hts.Cids(), "incomingWeight", targetWeight, "incomingTS", fts.TipSet().Cids()) + log.Debugw("incoming tipset does not appear to be better than our best chain, ignoring for now", "miners", miners, "bestPweight", bestPweight, "bestTS", hts.Cids(), "incomingWeight", targetWeight, "incomingTS", fts.TipSet().Cids()) return false } @@ -563,15 +564,16 @@ func (syncer *Syncer) Sync(ctx context.Context, maybeHead *types.TipSet) error { ) } - if syncer.store.GetHeaviestTipSet().ParentWeight().GreaterThan(maybeHead.ParentWeight()) { + hts := syncer.store.GetHeaviestTipSet() + + if hts.ParentWeight().GreaterThan(maybeHead.ParentWeight()) { + return nil + } + if syncer.Genesis.Equals(maybeHead) || hts.Equals(maybeHead) { return nil } - if syncer.Genesis.Equals(maybeHead) || syncer.store.GetHeaviestTipSet().Equals(maybeHead) { - return nil - } - - if err := syncer.collectChain(ctx, maybeHead); err != nil { + if err := syncer.collectChain(ctx, maybeHead, hts); err != nil { span.AddAttributes(trace.StringAttribute("col_error", err.Error())) span.SetStatus(trace.Status{ Code: 13, @@ -730,6 +732,8 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock, use return xerrors.Errorf("load parent tipset failed (%s): %w", h.Parents, err) } + winPoStNv := syncer.sm.GetNtwkVersion(ctx, baseTs.Height()) + lbts, lbst, err := stmgr.GetLookbackTipSetForRound(ctx, syncer.sm, baseTs, h.Height) if err != nil { return xerrors.Errorf("failed to get lookback tipset for block: %w", err) @@ -923,7 +927,7 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock, use }) wproofCheck := async.Err(func() error { - if err := syncer.VerifyWinningPoStProof(ctx, h, *prevBeacon, lbst, waddr); err != nil { + if err := syncer.VerifyWinningPoStProof(ctx, winPoStNv, h, *prevBeacon, lbst, waddr); err != nil { return xerrors.Errorf("invalid election post: %w", err) } return nil @@ -975,7 +979,7 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock, use return nil } -func (syncer *Syncer) VerifyWinningPoStProof(ctx context.Context, h *types.BlockHeader, prevBeacon types.BeaconEntry, lbst cid.Cid, waddr address.Address) error { +func (syncer *Syncer) VerifyWinningPoStProof(ctx context.Context, nv network.Version, h *types.BlockHeader, prevBeacon types.BeaconEntry, lbst cid.Cid, waddr address.Address) error { if build.InsecurePoStValidation { if len(h.WinPoStProof) == 0 { return xerrors.Errorf("[INSECURE-POST-VALIDATION] No winning post proof given") @@ -1007,7 +1011,7 @@ func (syncer *Syncer) VerifyWinningPoStProof(ctx context.Context, h *types.Block return xerrors.Errorf("failed to get ID from miner address %s: %w", h.Miner, err) } - sectors, err := stmgr.GetSectorsForWinningPoSt(ctx, syncer.verifier, syncer.sm, lbst, h.Miner, rand) + sectors, err := stmgr.GetSectorsForWinningPoSt(ctx, nv, syncer.verifier, syncer.sm, lbst, h.Miner, rand) if err != nil { return xerrors.Errorf("getting winning post sector set: %w", err) } @@ -1071,7 +1075,7 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock // Phase 1: syntactic validation, as defined in the spec minGas := pl.OnChainMessage(msg.ChainLength()) - if err := m.ValidForBlockInclusion(minGas.Total()); err != nil { + if err := m.ValidForBlockInclusion(minGas.Total(), syncer.sm.GetNtwkVersion(ctx, b.Header.Height)); err != nil { return err } @@ -1684,14 +1688,14 @@ func persistMessages(ctx context.Context, bs bstore.Blockstore, bst *exchange.Co // // 3. StageMessages: having acquired the headers and found a common tipset, // we then move forward, requesting the full blocks, including the messages. -func (syncer *Syncer) collectChain(ctx context.Context, ts *types.TipSet) error { +func (syncer *Syncer) collectChain(ctx context.Context, ts *types.TipSet, hts *types.TipSet) error { ctx, span := trace.StartSpan(ctx, "collectChain") defer span.End() ss := extractSyncState(ctx) - ss.Init(syncer.store.GetHeaviestTipSet(), ts) + ss.Init(hts, ts) - headers, err := syncer.collectHeaders(ctx, ts, syncer.store.GetHeaviestTipSet()) + headers, err := syncer.collectHeaders(ctx, ts, hts) if err != nil { ss.Error(err) return err diff --git a/chain/sync_manager.go b/chain/sync_manager.go index c25068f60..3b1b54e51 100644 --- a/chain/sync_manager.go +++ b/chain/sync_manager.go @@ -4,30 +4,43 @@ import ( "context" "os" "sort" + "strconv" "strings" "sync" + "time" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/types" + peer "github.com/libp2p/go-libp2p-core/peer" ) -const BootstrapPeerThreshold = 2 +var ( + BootstrapPeerThreshold = build.BootstrapPeerThreshold -var coalesceForksParents = false + RecentSyncBufferSize = 10 + MaxSyncWorkers = 5 + SyncWorkerHistory = 3 + + InitialSyncTimeThreshold = 15 * time.Minute + + coalesceTipsets = false +) func init() { - if os.Getenv("LOTUS_SYNC_REL_PARENT") == "yes" { - coalesceForksParents = true + coalesceTipsets = os.Getenv("LOTUS_SYNC_FORMTS_PEND") == "yes" + + if bootstrapPeerThreshold := os.Getenv("LOTUS_SYNC_BOOTSTRAP_PEERS"); bootstrapPeerThreshold != "" { + threshold, err := strconv.Atoi(bootstrapPeerThreshold) + if err != nil { + log.Errorf("failed to parse 'LOTUS_SYNC_BOOTSTRAP_PEERS' env var: %s", err) + } else { + BootstrapPeerThreshold = threshold + } } } -const ( - BSStateInit = 0 - BSStateSelected = 1 - BSStateScheduled = 2 - BSStateComplete = 3 -) - type SyncFunc func(context.Context, *types.TipSet) error // SyncManager manages the chain synchronization process, both at bootstrap time @@ -52,108 +65,467 @@ type SyncManager interface { } type syncManager struct { - lk sync.Mutex - peerHeads map[peer.ID]*types.TipSet + ctx context.Context + cancel func() - bssLk sync.Mutex - bootstrapState int + workq chan peerHead + statusq chan workerStatus - bspThresh int + nextWorker uint64 + pend syncBucketSet + deferred syncBucketSet + heads map[peer.ID]*types.TipSet + recent *syncBuffer - incomingTipSets chan *types.TipSet - syncTargets chan *types.TipSet - syncResults chan *syncResult + initialSyncDone bool - syncStates []*SyncerState + mx sync.Mutex + state map[uint64]*workerState + + history []*workerState + historyI int - // Normally this handler is set to `(*Syncer).Sync()`. doSync func(context.Context, *types.TipSet) error - - stop chan struct{} - - // Sync Scheduler fields - activeSyncs map[types.TipSetKey]*types.TipSet - syncQueue syncBucketSet - activeSyncTips syncBucketSet - nextSyncTarget *syncTargetBucket - workerChan chan *types.TipSet } var _ SyncManager = (*syncManager)(nil) -type syncResult struct { - ts *types.TipSet - success bool +type peerHead struct { + p peer.ID + ts *types.TipSet } -const syncWorkerCount = 3 +type workerState struct { + id uint64 + ts *types.TipSet + ss *SyncerState + dt time.Duration +} +type workerStatus struct { + id uint64 + err error +} + +// sync manager interface func NewSyncManager(sync SyncFunc) SyncManager { - sm := &syncManager{ - bspThresh: 1, - peerHeads: make(map[peer.ID]*types.TipSet), - syncTargets: make(chan *types.TipSet), - syncResults: make(chan *syncResult), - syncStates: make([]*SyncerState, syncWorkerCount), - incomingTipSets: make(chan *types.TipSet), - activeSyncs: make(map[types.TipSetKey]*types.TipSet), - doSync: sync, - stop: make(chan struct{}), + ctx, cancel := context.WithCancel(context.Background()) + return &syncManager{ + ctx: ctx, + cancel: cancel, + + workq: make(chan peerHead), + statusq: make(chan workerStatus), + + heads: make(map[peer.ID]*types.TipSet), + state: make(map[uint64]*workerState), + recent: newSyncBuffer(RecentSyncBufferSize), + history: make([]*workerState, SyncWorkerHistory), + + doSync: sync, } - for i := range sm.syncStates { - sm.syncStates[i] = new(SyncerState) - } - return sm } func (sm *syncManager) Start() { - go sm.syncScheduler() - for i := 0; i < syncWorkerCount; i++ { - go sm.syncWorker(i) - } + go sm.scheduler() } func (sm *syncManager) Stop() { - close(sm.stop) + select { + case <-sm.ctx.Done(): + default: + sm.cancel() + } } func (sm *syncManager) SetPeerHead(ctx context.Context, p peer.ID, ts *types.TipSet) { - sm.lk.Lock() - defer sm.lk.Unlock() - sm.peerHeads[p] = ts - - if sm.getBootstrapState() == BSStateInit { - spc := sm.syncedPeerCount() - if spc >= sm.bspThresh { - // Its go time! - target, err := sm.selectSyncTarget() - if err != nil { - log.Error("failed to select sync target: ", err) - return - } - sm.setBootstrapState(BSStateSelected) - - sm.incomingTipSets <- target - } - log.Infof("sync bootstrap has %d peers", spc) - return + select { + case sm.workq <- peerHead{p: p, ts: ts}: + case <-sm.ctx.Done(): + case <-ctx.Done(): } - - sm.incomingTipSets <- ts } func (sm *syncManager) State() []SyncerStateSnapshot { - ret := make([]SyncerStateSnapshot, 0, len(sm.syncStates)) - for _, s := range sm.syncStates { - ret = append(ret, s.Snapshot()) + sm.mx.Lock() + workerStates := make([]*workerState, 0, len(sm.state)+len(sm.history)) + for _, ws := range sm.state { + workerStates = append(workerStates, ws) } - return ret + for _, ws := range sm.history { + if ws != nil { + workerStates = append(workerStates, ws) + } + } + sm.mx.Unlock() + + sort.Slice(workerStates, func(i, j int) bool { + return workerStates[i].id < workerStates[j].id + }) + + result := make([]SyncerStateSnapshot, 0, len(workerStates)) + for _, ws := range workerStates { + result = append(result, ws.ss.Snapshot()) + } + + return result } +// sync manager internals +func (sm *syncManager) scheduler() { + ticker := time.NewTicker(time.Minute) + tickerC := ticker.C + for { + select { + case head := <-sm.workq: + sm.handlePeerHead(head) + case status := <-sm.statusq: + sm.handleWorkerStatus(status) + case <-tickerC: + if sm.initialSyncDone { + ticker.Stop() + tickerC = nil + sm.handleInitialSyncDone() + } + case <-sm.ctx.Done(): + return + } + } +} + +func (sm *syncManager) handlePeerHead(head peerHead) { + log.Debugf("new peer head: %s %s", head.p, head.ts) + + // have we started syncing yet? + if sm.nextWorker == 0 { + // track the peer head until we start syncing + sm.heads[head.p] = head.ts + + // not yet; do we have enough peers? + if len(sm.heads) < BootstrapPeerThreshold { + // not enough peers; track it and wait + return + } + + // we are ready to start syncing; select the sync target and spawn a worker + target, err := sm.selectInitialSyncTarget() + if err != nil { + log.Errorf("failed to select initial sync target: %s", err) + return + } + + log.Infof("selected initial sync target: %s", target) + sm.spawnWorker(target) + return + } + + // we have started syncing, add peer head to the queue if applicable and maybe spawn a worker + // if there is work to do (possibly in a fork) + target, work, err := sm.addSyncTarget(head.ts) + if err != nil { + log.Warnf("failed to add sync target: %s", err) + return + } + + if work { + log.Infof("selected sync target: %s", target) + sm.spawnWorker(target) + } +} + +func (sm *syncManager) handleWorkerStatus(status workerStatus) { + log.Debugf("worker %d done; status error: %s", status.id, status.err) + + sm.mx.Lock() + ws := sm.state[status.id] + delete(sm.state, status.id) + + // we track the last few workers for debug purposes + sm.history[sm.historyI] = ws + sm.historyI++ + sm.historyI %= len(sm.history) + sm.mx.Unlock() + + if status.err != nil { + // we failed to sync this target -- log it and try to work on an extended chain + // if there is nothing related to be worked on, we stop working on this chain. + log.Errorf("error during sync in %s: %s", ws.ts, status.err) + } else { + // add to the recently synced buffer + sm.recent.Push(ws.ts) + // if we are still in initial sync and this was fast enough, mark the end of the initial sync + if !sm.initialSyncDone && ws.dt < InitialSyncTimeThreshold { + sm.initialSyncDone = true + } + } + + // we are done with this target, select the next sync target and spawn a worker if there is work + // to do, because of an extension of this chain. + target, work, err := sm.selectSyncTarget(ws.ts) + if err != nil { + log.Warnf("failed to select sync target: %s", err) + return + } + + if work { + log.Infof("selected sync target: %s", target) + sm.spawnWorker(target) + } +} + +func (sm *syncManager) handleInitialSyncDone() { + // we have just finished the initial sync; spawn some additional workers in deferred syncs + // as needed (and up to MaxSyncWorkers) to ramp up chain sync + for len(sm.state) < MaxSyncWorkers { + target, work, err := sm.selectDeferredSyncTarget() + if err != nil { + log.Errorf("error selecting deferred sync target: %s", err) + return + } + + if !work { + return + } + + log.Infof("selected deferred sync target: %s", target) + sm.spawnWorker(target) + } +} + +func (sm *syncManager) spawnWorker(target *types.TipSet) { + id := sm.nextWorker + sm.nextWorker++ + ws := &workerState{ + id: id, + ts: target, + ss: new(SyncerState), + } + ws.ss.data.WorkerID = id + + sm.mx.Lock() + sm.state[id] = ws + sm.mx.Unlock() + + go sm.worker(ws) +} + +func (sm *syncManager) worker(ws *workerState) { + log.Infof("worker %d syncing in %s", ws.id, ws.ts) + + start := build.Clock.Now() + + ctx := context.WithValue(sm.ctx, syncStateKey{}, ws.ss) + err := sm.doSync(ctx, ws.ts) + + ws.dt = build.Clock.Since(start) + log.Infof("worker %d done; took %s", ws.id, ws.dt) + select { + case sm.statusq <- workerStatus{id: ws.id, err: err}: + case <-sm.ctx.Done(): + } +} + +// selects the initial sync target by examining known peer heads; only called once for the initial +// sync. +func (sm *syncManager) selectInitialSyncTarget() (*types.TipSet, error) { + var buckets syncBucketSet + + var peerHeads []*types.TipSet + for _, ts := range sm.heads { + peerHeads = append(peerHeads, ts) + } + // clear the map, we don't use it any longer + sm.heads = nil + + sort.Slice(peerHeads, func(i, j int) bool { + return peerHeads[i].Height() < peerHeads[j].Height() + }) + + for _, ts := range peerHeads { + buckets.Insert(ts) + } + + if len(buckets.buckets) > 1 { + log.Warn("caution, multiple distinct chains seen during head selections") + // TODO: we *could* refuse to sync here without user intervention. + // For now, just select the best cluster + } + + return buckets.Heaviest(), nil +} + +// adds a tipset to the potential sync targets; returns true if there is a a tipset to work on. +// this could be either a restart, eg because there is no currently scheduled sync work or a worker +// failed or a potential fork. +func (sm *syncManager) addSyncTarget(ts *types.TipSet) (*types.TipSet, bool, error) { + // Note: we don't need the state lock here to access the active worker states, as the only + // competing threads that may access it do so through State() which is read only. + + // if we have recently synced this or any heavier tipset we just ignore it; this can happen + // with an empty worker set after we just finished syncing to a target + if sm.recent.Synced(ts) { + return nil, false, nil + } + + // if the worker set is empty, we have finished syncing and were waiting for the next tipset + // in this case, we just return the tipset as work to be done + if len(sm.state) == 0 { + return ts, true, nil + } + + // check if it is related to any active sync; if so insert into the pending sync queue + for _, ws := range sm.state { + if ts.Equals(ws.ts) { + // ignore it, we are already syncing it + return nil, false, nil + } + + if ts.Parents() == ws.ts.Key() { + // schedule for syncing next; it's an extension of an active sync + sm.pend.Insert(ts) + return nil, false, nil + } + } + + // check to see if it is related to any pending sync; if so insert it into the pending sync queue + if sm.pend.RelatedToAny(ts) { + sm.pend.Insert(ts) + return nil, false, nil + } + + // it's not related to any active or pending sync; this could be a fork in which case we + // start a new worker to sync it, if it is *heavier* than any active or pending set; + // if it is not, we ignore it. + for _, ws := range sm.state { + if isHeavier(ws.ts, ts) { + return nil, false, nil + } + } + + pendHeaviest := sm.pend.Heaviest() + if pendHeaviest != nil && isHeavier(pendHeaviest, ts) { + return nil, false, nil + } + + // if we have not finished the initial sync or have too many workers, add it to the deferred queue; + // it will be processed once a worker is freed from syncing a chain (or the initial sync finishes) + if !sm.initialSyncDone || len(sm.state) >= MaxSyncWorkers { + log.Debugf("deferring sync on %s", ts) + sm.deferred.Insert(ts) + return nil, false, nil + } + + // start a new worker, seems heavy enough and unrelated to active or pending syncs + return ts, true, nil +} + +// selects the next sync target after a worker sync has finished; returns true and a target +// TipSet if this chain should continue to sync because there is a heavier related tipset. +func (sm *syncManager) selectSyncTarget(done *types.TipSet) (*types.TipSet, bool, error) { + // we pop the related bucket and if there is any related tipset, we work on the heaviest one next + // if we are not already working on a heavier tipset + related := sm.pend.PopRelated(done) + if related == nil { + return sm.selectDeferredSyncTarget() + } + + heaviest := related.heaviestTipSet() + if isHeavier(done, heaviest) { + return sm.selectDeferredSyncTarget() + } + + for _, ws := range sm.state { + if isHeavier(ws.ts, heaviest) { + return sm.selectDeferredSyncTarget() + } + } + + if sm.recent.Synced(heaviest) { + return sm.selectDeferredSyncTarget() + } + + return heaviest, true, nil +} + +// selects a deferred sync target if there is any; these are sync targets that were not related to +// active syncs and were deferred because there were too many workers running +func (sm *syncManager) selectDeferredSyncTarget() (*types.TipSet, bool, error) { +deferredLoop: + for !sm.deferred.Empty() { + bucket := sm.deferred.Pop() + heaviest := bucket.heaviestTipSet() + + if sm.recent.Synced(heaviest) { + // we have synced it or something heavier recently, skip it + continue deferredLoop + } + + if sm.pend.RelatedToAny(heaviest) { + // this has converged to a pending sync, insert it to the pending queue + sm.pend.Insert(heaviest) + continue deferredLoop + } + + for _, ws := range sm.state { + if ws.ts.Equals(heaviest) || isHeavier(ws.ts, heaviest) { + // we have converged and are already syncing it or we are syncing on something heavier + // ignore it and pop the next deferred bucket + continue deferredLoop + } + + if heaviest.Parents() == ws.ts.Key() { + // we have converged and we are syncing its parent; insert it to the pending queue + sm.pend.Insert(heaviest) + continue deferredLoop + } + + // it's not related to any active or pending sync and this worker is free, so sync it! + return heaviest, true, nil + } + } + + return nil, false, nil +} + +func isHeavier(a, b *types.TipSet) bool { + return a.ParentWeight().GreaterThan(b.ParentWeight()) +} + +// sync buffer -- this is a circular buffer of recently synced tipsets +type syncBuffer struct { + buf []*types.TipSet + next int +} + +func newSyncBuffer(size int) *syncBuffer { + return &syncBuffer{buf: make([]*types.TipSet, size)} +} + +func (sb *syncBuffer) Push(ts *types.TipSet) { + sb.buf[sb.next] = ts + sb.next++ + sb.next %= len(sb.buf) +} + +func (sb *syncBuffer) Synced(ts *types.TipSet) bool { + for _, rts := range sb.buf { + if rts != nil && (rts.Equals(ts) || isHeavier(rts, ts)) { + return true + } + } + + return false +} + +// sync buckets and related utilities type syncBucketSet struct { buckets []*syncTargetBucket } +type syncTargetBucket struct { + tips []*types.TipSet +} + func newSyncTargetBucket(tipsets ...*types.TipSet) *syncTargetBucket { var stb syncTargetBucket for _, ts := range tipsets { @@ -250,10 +622,6 @@ func (sbs *syncBucketSet) Empty() bool { return len(sbs.buckets) == 0 } -type syncTargetBucket struct { - tips []*types.TipSet -} - func (stb *syncTargetBucket) sameChainAs(ts *types.TipSet) bool { for _, t := range stb.tips { if ts.Equals(t) { @@ -265,19 +633,43 @@ func (stb *syncTargetBucket) sameChainAs(ts *types.TipSet) bool { if ts.Parents() == t.Key() { return true } - if coalesceForksParents && ts.Parents() == t.Parents() { - return true - } } return false } func (stb *syncTargetBucket) add(ts *types.TipSet) { - - for _, t := range stb.tips { + for i, t := range stb.tips { if t.Equals(ts) { return } + if coalesceTipsets && t.Height() == ts.Height() && + types.CidArrsEqual(t.Blocks()[0].Parents, ts.Blocks()[0].Parents) { + miners := make(map[address.Address]struct{}) + newTs := []*types.BlockHeader{} + for _, b := range t.Blocks() { + _, have := miners[b.Miner] + if !have { + newTs = append(newTs, b) + miners[b.Miner] = struct{}{} + } + } + for _, b := range ts.Blocks() { + _, have := miners[b.Miner] + if !have { + newTs = append(newTs, b) + miners[b.Miner] = struct{}{} + } + } + + ts2, err := types.NewTipSet(newTs) + if err != nil { + log.Warnf("error while trying to recombine a tipset in a bucket: %+v", err) + continue + } + stb.tips[i] = ts2 + return + } + } stb.tips = append(stb.tips, ts) @@ -296,196 +688,3 @@ func (stb *syncTargetBucket) heaviestTipSet() *types.TipSet { } return best } - -func (sm *syncManager) selectSyncTarget() (*types.TipSet, error) { - var buckets syncBucketSet - - var peerHeads []*types.TipSet - for _, ts := range sm.peerHeads { - peerHeads = append(peerHeads, ts) - } - sort.Slice(peerHeads, func(i, j int) bool { - return peerHeads[i].Height() < peerHeads[j].Height() - }) - - for _, ts := range peerHeads { - buckets.Insert(ts) - } - - if len(buckets.buckets) > 1 { - log.Warn("caution, multiple distinct chains seen during head selections") - // TODO: we *could* refuse to sync here without user intervention. - // For now, just select the best cluster - } - - return buckets.Heaviest(), nil -} - -func (sm *syncManager) syncScheduler() { - for { - select { - case ts, ok := <-sm.incomingTipSets: - if !ok { - log.Info("shutting down sync scheduler") - return - } - - sm.scheduleIncoming(ts) - case res := <-sm.syncResults: - sm.scheduleProcessResult(res) - case sm.workerChan <- sm.nextSyncTarget.heaviestTipSet(): - sm.scheduleWorkSent() - case <-sm.stop: - log.Info("sync scheduler shutting down") - return - } - } -} - -func (sm *syncManager) scheduleIncoming(ts *types.TipSet) { - log.Debug("scheduling incoming tipset sync: ", ts.Cids()) - if sm.getBootstrapState() == BSStateSelected { - sm.setBootstrapState(BSStateScheduled) - sm.syncTargets <- ts - return - } - - var relatedToActiveSync bool - for _, acts := range sm.activeSyncs { - if ts.Equals(acts) { - // ignore, we are already syncing it - return - } - - if ts.Parents() == acts.Key() { - // sync this next, after that sync process finishes - relatedToActiveSync = true - } - } - - if !relatedToActiveSync && sm.activeSyncTips.RelatedToAny(ts) { - relatedToActiveSync = true - } - - // if this is related to an active sync process, immediately bucket it - // we don't want to start a parallel sync process that duplicates work - if relatedToActiveSync { - sm.activeSyncTips.Insert(ts) - return - } - - if sm.getBootstrapState() == BSStateScheduled { - sm.syncQueue.Insert(ts) - return - } - - if sm.nextSyncTarget != nil && sm.nextSyncTarget.sameChainAs(ts) { - sm.nextSyncTarget.add(ts) - } else { - sm.syncQueue.Insert(ts) - - if sm.nextSyncTarget == nil { - sm.nextSyncTarget = sm.syncQueue.Pop() - sm.workerChan = sm.syncTargets - } - } -} - -func (sm *syncManager) scheduleProcessResult(res *syncResult) { - if res.success && sm.getBootstrapState() != BSStateComplete { - sm.setBootstrapState(BSStateComplete) - } - - delete(sm.activeSyncs, res.ts.Key()) - relbucket := sm.activeSyncTips.PopRelated(res.ts) - if relbucket != nil { - if res.success { - if sm.nextSyncTarget == nil { - sm.nextSyncTarget = relbucket - sm.workerChan = sm.syncTargets - } else { - for _, t := range relbucket.tips { - sm.syncQueue.Insert(t) - } - } - return - } - // TODO: this is the case where we try to sync a chain, and - // fail, and we have more blocks on top of that chain that - // have come in since. The question is, should we try to - // sync these? or just drop them? - log.Error("failed to sync chain but have new unconnected blocks from chain") - } - - if sm.nextSyncTarget == nil && !sm.syncQueue.Empty() { - next := sm.syncQueue.Pop() - if next != nil { - sm.nextSyncTarget = next - sm.workerChan = sm.syncTargets - } - } -} - -func (sm *syncManager) scheduleWorkSent() { - hts := sm.nextSyncTarget.heaviestTipSet() - sm.activeSyncs[hts.Key()] = hts - - if !sm.syncQueue.Empty() { - sm.nextSyncTarget = sm.syncQueue.Pop() - } else { - sm.nextSyncTarget = nil - sm.workerChan = nil - } -} - -func (sm *syncManager) syncWorker(id int) { - ss := sm.syncStates[id] - for { - select { - case ts, ok := <-sm.syncTargets: - if !ok { - log.Info("sync manager worker shutting down") - return - } - - ctx := context.WithValue(context.TODO(), syncStateKey{}, ss) - err := sm.doSync(ctx, ts) - if err != nil { - log.Errorf("sync error: %+v", err) - } - - sm.syncResults <- &syncResult{ - ts: ts, - success: err == nil, - } - } - } -} - -func (sm *syncManager) syncedPeerCount() int { - var count int - for _, ts := range sm.peerHeads { - if ts.Height() > 0 { - count++ - } - } - return count -} - -func (sm *syncManager) getBootstrapState() int { - sm.bssLk.Lock() - defer sm.bssLk.Unlock() - return sm.bootstrapState -} - -func (sm *syncManager) setBootstrapState(v int) { - sm.bssLk.Lock() - defer sm.bssLk.Unlock() - sm.bootstrapState = v -} - -func (sm *syncManager) IsBootstrapped() bool { - sm.bssLk.Lock() - defer sm.bssLk.Unlock() - return sm.bootstrapState == BSStateComplete -} diff --git a/chain/sync_manager_test.go b/chain/sync_manager_test.go index 709e03a41..61985b964 100644 --- a/chain/sync_manager_test.go +++ b/chain/sync_manager_test.go @@ -10,6 +10,10 @@ import ( "github.com/filecoin-project/lotus/chain/types/mock" ) +func init() { + BootstrapPeerThreshold = 1 +} + var genTs = mock.TipSet(mock.MkBlock(nil, 0, 0)) type syncOp struct { @@ -28,7 +32,12 @@ func runSyncMgrTest(t *testing.T, tname string, thresh int, tf func(*testing.T, <-ch return nil }).(*syncManager) - sm.bspThresh = thresh + + oldBootstrapPeerThreshold := BootstrapPeerThreshold + BootstrapPeerThreshold = thresh + defer func() { + BootstrapPeerThreshold = oldBootstrapPeerThreshold + }() sm.Start() defer sm.Stop() @@ -87,49 +96,59 @@ func TestSyncManagerEdgeCase(t *testing.T) { runSyncMgrTest(t, "edgeCase", 1, func(t *testing.T, sm *syncManager, stc chan *syncOp) { sm.SetPeerHead(ctx, "peer1", a) - assertGetSyncOp(t, stc, a) sm.SetPeerHead(ctx, "peer1", b1) sm.SetPeerHead(ctx, "peer1", b2) - // b1 and b2 are being processed - b1op := <-stc - b2op := <-stc - if !b1op.ts.Equals(b1) { - b1op, b2op = b2op, b1op + assertGetSyncOp(t, stc, a) + + // b1 and b2 are in queue after a; the sync manager should pick the heaviest one which is b2 + bop := <-stc + if !bop.ts.Equals(b2) { + t.Fatalf("Expected tipset %s to sync, but got %s", b2, bop.ts) } - sm.SetPeerHead(ctx, "peer2", c2) // c2 is put into activeSyncTips at index 0 - sm.SetPeerHead(ctx, "peer2", c1) // c1 is put into activeSyncTips at index 1 - sm.SetPeerHead(ctx, "peer3", b2) // b2 is related to c2 and even though it is actively synced it is put into activeSyncTips index 0 - sm.SetPeerHead(ctx, "peer1", a) // a is related to b2 and is put into activeSyncTips index 0 + sm.SetPeerHead(ctx, "peer2", c2) + sm.SetPeerHead(ctx, "peer2", c1) + sm.SetPeerHead(ctx, "peer3", b2) + sm.SetPeerHead(ctx, "peer1", a) - b1op.done() // b1 completes first, is related to a, so it pops activeSyncTips index 0 - // even though correct one is index 1 + bop.done() - b2op.done() - // b2 completes and is not related to c1, so it leaves activeSyncTips as it is + // get the next sync target; it should be c1 as the heaviest tipset but added last (same weight as c2) + bop = <-stc + if !bop.ts.Equals(c1) { + t.Fatalf("Expected tipset %s to sync, but got %s", c1, bop.ts) + } - waitUntilAllWorkersAreDone(stc) + sm.SetPeerHead(ctx, "peer4", d1) + sm.SetPeerHead(ctx, "peer5", e1) + bop.done() - if len(sm.activeSyncTips.buckets) != 0 { - t.Errorf("activeSyncTips expected empty but got: %s", sm.activeSyncTips.String()) + // get the last sync target; it should be e1 + var last *types.TipSet + for i := 0; i < 10; { + select { + case bop = <-stc: + bop.done() + if last == nil || bop.ts.Height() > last.Height() { + last = bop.ts + } + default: + i++ + time.Sleep(10 * time.Millisecond) + } + } + if !last.Equals(e1) { + t.Fatalf("Expected tipset %s to sync, but got %s", e1, last) + } + + if len(sm.state) != 0 { + t.Errorf("active syncs expected empty but got: %d", len(sm.state)) } }) } -func waitUntilAllWorkersAreDone(stc chan *syncOp) { - for i := 0; i < 10; { - select { - case so := <-stc: - so.done() - default: - i++ - time.Sleep(10 * time.Millisecond) - } - } -} - func TestSyncManager(t *testing.T) { ctx := context.Background() diff --git a/chain/syncstate.go b/chain/syncstate.go index 26f9f1c39..527d6be48 100644 --- a/chain/syncstate.go +++ b/chain/syncstate.go @@ -12,13 +12,14 @@ import ( ) type SyncerStateSnapshot struct { - Target *types.TipSet - Base *types.TipSet - Stage api.SyncStateStage - Height abi.ChainEpoch - Message string - Start time.Time - End time.Time + WorkerID uint64 + Target *types.TipSet + Base *types.TipSet + Stage api.SyncStateStage + Height abi.ChainEpoch + Message string + Start time.Time + End time.Time } type SyncerState struct { diff --git a/chain/types/fil.go b/chain/types/fil.go index 0ea77660c..3dabb5e77 100644 --- a/chain/types/fil.go +++ b/chain/types/fil.go @@ -61,6 +61,10 @@ func ParseFIL(s string) (FIL, error) { } } + if len(s) > 50 { + return FIL{}, fmt.Errorf("string length too large: %d", len(s)) + } + r, ok := new(big.Rat).SetString(s) if !ok { return FIL{}, fmt.Errorf("failed to parse %q as a decimal number", s) diff --git a/chain/types/message.go b/chain/types/message.go index c53ecc7c1..4f6bb7822 100644 --- a/chain/types/message.go +++ b/chain/types/message.go @@ -5,6 +5,8 @@ import ( "encoding/json" "fmt" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/lotus/build" @@ -144,7 +146,7 @@ func (m *Message) EqualCall(o *Message) bool { return (&m1).Equals(&m2) } -func (m *Message) ValidForBlockInclusion(minGas int64) error { +func (m *Message) ValidForBlockInclusion(minGas int64, version network.Version) error { if m.Version != 0 { return xerrors.New("'Version' unsupported") } @@ -153,6 +155,10 @@ func (m *Message) ValidForBlockInclusion(minGas int64) error { return xerrors.New("'To' address cannot be empty") } + if m.To == build.ZeroAddress && version >= network.Version7 { + return xerrors.New("invalid 'To' address") + } + if m.From == address.Undef { return xerrors.New("'From' address cannot be empty") } diff --git a/chain/vm/gas.go b/chain/vm/gas.go index 95551f153..eef431aef 100644 --- a/chain/vm/gas.go +++ b/chain/vm/gas.go @@ -3,21 +3,17 @@ package vm import ( "fmt" - vmr2 "github.com/filecoin-project/specs-actors/v2/actors/runtime" - proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/go-address" addr "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" + vmr2 "github.com/filecoin-project/specs-actors/v2/actors/runtime" + proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" "github.com/ipfs/go-cid" ) -const ( - GasStorageMulti = 1000 - GasComputeMulti = 1 -) - type GasCharge struct { Name string Extra interface{} @@ -30,7 +26,7 @@ type GasCharge struct { } func (g GasCharge) Total() int64 { - return g.ComputeGas*GasComputeMulti + g.StorageGas*GasStorageMulti + return g.ComputeGas + g.StorageGas } func (g GasCharge) WithVirtual(compute, storage int64) GasCharge { out := g @@ -85,6 +81,9 @@ type Pricelist interface { var prices = map[abi.ChainEpoch]Pricelist{ abi.ChainEpoch(0): &pricelistV0{ + computeGasMulti: 1, + storageGasMulti: 1000, + onChainMessageComputeBase: 38863, onChainMessageStorageBase: 36, onChainMessageStoragePerByte: 1, @@ -129,6 +128,54 @@ var prices = map[abi.ChainEpoch]Pricelist{ verifyPostDiscount: true, verifyConsensusFault: 495422, }, + abi.ChainEpoch(build.UpgradeCalicoHeight): &pricelistV0{ + computeGasMulti: 1, + storageGasMulti: 1300, + + onChainMessageComputeBase: 38863, + onChainMessageStorageBase: 36, + onChainMessageStoragePerByte: 1, + + onChainReturnValuePerByte: 1, + + sendBase: 29233, + sendTransferFunds: 27500, + sendTransferOnlyPremium: 159672, + sendInvokeMethod: -5377, + + ipldGetBase: 114617, + ipldPutBase: 353640, + ipldPutPerByte: 1, + + createActorCompute: 1108454, + createActorStorage: 36 + 40, + deleteActor: -(36 + 40), // -createActorStorage + + verifySignature: map[crypto.SigType]int64{ + crypto.SigTypeBLS: 16598605, + crypto.SigTypeSecp256k1: 1637292, + }, + + hashingBase: 31355, + computeUnsealedSectorCidBase: 98647, + verifySealBase: 2000, // TODO gas , it VerifySeal syscall is not used + verifyPostLookup: map[abi.RegisteredPoStProof]scalingCost{ + abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: { + flat: 117680921, + scale: 43780, + }, + abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: { + flat: 117680921, + scale: 43780, + }, + abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: { + flat: 117680921, + scale: 43780, + }, + }, + verifyPostDiscount: false, + verifyConsensusFault: 495422, + }, } // PricelistByEpoch finds the latest prices for the given epoch diff --git a/chain/vm/gas_v0.go b/chain/vm/gas_v0.go index e4028039b..7c864b7f9 100644 --- a/chain/vm/gas_v0.go +++ b/chain/vm/gas_v0.go @@ -18,6 +18,8 @@ type scalingCost struct { } type pricelistV0 struct { + computeGasMulti int64 + storageGasMulti int64 /////////////////////////////////////////////////////////////////////////// // System operations /////////////////////////////////////////////////////////////////////////// @@ -99,12 +101,12 @@ var _ Pricelist = (*pricelistV0)(nil) // OnChainMessage returns the gas used for storing a message of a given size in the chain. func (pl *pricelistV0) OnChainMessage(msgSize int) GasCharge { return newGasCharge("OnChainMessage", pl.onChainMessageComputeBase, - pl.onChainMessageStorageBase+pl.onChainMessageStoragePerByte*int64(msgSize)) + (pl.onChainMessageStorageBase+pl.onChainMessageStoragePerByte*int64(msgSize))*pl.storageGasMulti) } // OnChainReturnValue returns the gas used for storing the response of a message in the chain. func (pl *pricelistV0) OnChainReturnValue(dataSize int) GasCharge { - return newGasCharge("OnChainReturnValue", 0, int64(dataSize)*pl.onChainReturnValuePerByte) + return newGasCharge("OnChainReturnValue", 0, int64(dataSize)*pl.onChainReturnValuePerByte*pl.storageGasMulti) } // OnMethodInvocation returns the gas used when invoking a method. @@ -131,23 +133,23 @@ func (pl *pricelistV0) OnMethodInvocation(value abi.TokenAmount, methodNum abi.M // OnIpldGet returns the gas used for storing an object func (pl *pricelistV0) OnIpldGet() GasCharge { - return newGasCharge("OnIpldGet", pl.ipldGetBase, 0) + return newGasCharge("OnIpldGet", pl.ipldGetBase, 0).WithVirtual(114617, 0) } // OnIpldPut returns the gas used for storing an object func (pl *pricelistV0) OnIpldPut(dataSize int) GasCharge { - return newGasCharge("OnIpldPut", pl.ipldPutBase, int64(dataSize)*pl.ipldPutPerByte). - WithExtra(dataSize) + return newGasCharge("OnIpldPut", pl.ipldPutBase, int64(dataSize)*pl.ipldPutPerByte*pl.storageGasMulti). + WithExtra(dataSize).WithVirtual(400000, int64(dataSize)*1300) } // OnCreateActor returns the gas used for creating an actor func (pl *pricelistV0) OnCreateActor() GasCharge { - return newGasCharge("OnCreateActor", pl.createActorCompute, pl.createActorStorage) + return newGasCharge("OnCreateActor", pl.createActorCompute, pl.createActorStorage*pl.storageGasMulti) } // OnDeleteActor returns the gas used for deleting an actor func (pl *pricelistV0) OnDeleteActor() GasCharge { - return newGasCharge("OnDeleteActor", 0, pl.deleteActor) + return newGasCharge("OnDeleteActor", 0, pl.deleteActor*pl.storageGasMulti) } // OnVerifySignature @@ -207,6 +209,7 @@ func (pl *pricelistV0) OnVerifyPost(info proof2.WindowPoStVerifyInfo) GasCharge } return newGasCharge("OnVerifyPost", gasUsed, 0). + WithVirtual(117680921+43780*int64(len(info.ChallengedSectors)), 0). WithExtra(map[string]interface{}{ "type": sectorSize, "size": len(info.ChallengedSectors), diff --git a/chain/vm/invoker.go b/chain/vm/invoker.go index 661e31178..e22d69653 100644 --- a/chain/vm/invoker.go +++ b/chain/vm/invoker.go @@ -6,6 +6,8 @@ import ( "fmt" "reflect" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/ipfs/go-cid" @@ -173,9 +175,14 @@ func (*ActorRegistry) transform(instance invokee) (nativeCode, error) { paramT := meth.Type().In(1).Elem() param := reflect.New(paramT) + rt := in[0].Interface().(*Runtime) inBytes := in[1].Interface().([]byte) if err := DecodeParams(inBytes, param.Interface()); err != nil { - aerr := aerrors.Absorb(err, 1, "failed to decode parameters") + ec := exitcode.ErrSerialization + if rt.NetworkVersion() < network.Version7 { + ec = 1 + } + aerr := aerrors.Absorb(err, ec, "failed to decode parameters") return []reflect.Value{ reflect.ValueOf([]byte{}), // Below is a hack, fixed in Go 1.13 @@ -183,7 +190,6 @@ func (*ActorRegistry) transform(instance invokee) (nativeCode, error) { reflect.ValueOf(&aerr).Elem(), } } - rt := in[0].Interface().(*Runtime) rval, aerror := rt.shimCall(func() interface{} { ret := meth.Call([]reflect.Value{ reflect.ValueOf(rt), diff --git a/chain/vm/invoker_test.go b/chain/vm/invoker_test.go index bce385b02..6822e2371 100644 --- a/chain/vm/invoker_test.go +++ b/chain/vm/invoker_test.go @@ -1,10 +1,13 @@ package vm import ( + "context" "fmt" "io" "testing" + "github.com/filecoin-project/go-state-types/network" + cbor "github.com/ipfs/go-ipld-cbor" "github.com/stretchr/testify/assert" cbg "github.com/whyrusleeping/cbor-gen" @@ -105,10 +108,27 @@ func TestInvokerBasic(t *testing.T) { } } - _, aerr := code[1](&Runtime{}, []byte{99}) - if aerrors.IsFatal(aerr) { - t.Fatal("err should not be fatal") + { + _, aerr := code[1](&Runtime{ + vm: &VM{ntwkVersion: func(ctx context.Context, epoch abi.ChainEpoch) network.Version { + return network.Version0 + }}, + }, []byte{99}) + if aerrors.IsFatal(aerr) { + t.Fatal("err should not be fatal") + } + assert.Equal(t, exitcode.ExitCode(1), aerrors.RetCode(aerr), "return code should be 1") } - assert.Equal(t, exitcode.ExitCode(1), aerrors.RetCode(aerr), "return code should be 1") + { + _, aerr := code[1](&Runtime{ + vm: &VM{ntwkVersion: func(ctx context.Context, epoch abi.ChainEpoch) network.Version { + return network.Version7 + }}, + }, []byte{99}) + if aerrors.IsFatal(aerr) { + t.Fatal("err should not be fatal") + } + assert.Equal(t, exitcode.ErrSerialization, aerrors.RetCode(aerr), "return code should be %s", 1) + } } diff --git a/chain/vm/runtime.go b/chain/vm/runtime.go index 6e36e8e87..a7e666d2f 100644 --- a/chain/vm/runtime.go +++ b/chain/vm/runtime.go @@ -244,20 +244,23 @@ func (rt *Runtime) NewActorAddress() address.Address { return addr } -func (rt *Runtime) CreateActor(codeID cid.Cid, address address.Address) { +func (rt *Runtime) CreateActor(codeID cid.Cid, addr address.Address) { + if addr == address.Undef && rt.NetworkVersion() >= network.Version7 { + rt.Abortf(exitcode.SysErrorIllegalArgument, "CreateActor with Undef address") + } act, aerr := rt.vm.areg.Create(codeID, rt) if aerr != nil { rt.Abortf(aerr.RetCode(), aerr.Error()) } - _, err := rt.state.GetActor(address) + _, err := rt.state.GetActor(addr) if err == nil { rt.Abortf(exitcode.SysErrorIllegalArgument, "Actor address already exists") } rt.chargeGas(rt.Pricelist().OnCreateActor()) - err = rt.state.SetActor(address, act) + err = rt.state.SetActor(addr, act) if err != nil { panic(aerrors.Fatalf("creating actor entry: %v", err)) } @@ -266,7 +269,7 @@ func (rt *Runtime) CreateActor(codeID cid.Cid, address address.Address) { // DeleteActor deletes the executing actor from the state tree, transferring // any balance to beneficiary. -// Aborts if the beneficiary does not exist. +// Aborts if the beneficiary does not exist or is the calling actor. // May only be called by the actor itself. func (rt *Runtime) DeleteActor(beneficiary address.Address) { rt.chargeGas(rt.Pricelist().OnDeleteActor()) @@ -278,6 +281,19 @@ func (rt *Runtime) DeleteActor(beneficiary address.Address) { panic(aerrors.Fatalf("failed to get actor: %s", err)) } if !act.Balance.IsZero() { + // TODO: Should be safe to drop the version-check, + // since only the paych actor called this pre-version 7, but let's leave it for now + if rt.NetworkVersion() >= network.Version7 { + beneficiaryId, found := rt.ResolveAddress(beneficiary) + if !found { + rt.Abortf(exitcode.SysErrorIllegalArgument, "beneficiary doesn't exist") + } + + if beneficiaryId == rt.Receiver() { + rt.Abortf(exitcode.SysErrorIllegalArgument, "benefactor cannot be beneficiary") + } + } + // Transfer the executing actor's balance to the beneficiary if err := rt.vm.transfer(rt.Receiver(), beneficiary, act.Balance); err != nil { panic(aerrors.Fatalf("failed to transfer balance to beneficiary actor: %s", err)) @@ -533,12 +549,19 @@ func (rt *Runtime) chargeGasInternal(gas GasCharge, skip int) aerrors.ActorError ComputeGas: gas.ComputeGas, StorageGas: gas.StorageGas, - TotalVirtualGas: gas.VirtualCompute*GasComputeMulti + gas.VirtualStorage*GasStorageMulti, VirtualComputeGas: gas.VirtualCompute, VirtualStorageGas: gas.VirtualStorage, Callers: callers[:cout], } + if gasTrace.VirtualStorageGas == 0 { + gasTrace.VirtualStorageGas = gasTrace.StorageGas + } + if gasTrace.VirtualComputeGas == 0 { + gasTrace.VirtualComputeGas = gasTrace.ComputeGas + } + gasTrace.TotalVirtualGas = gasTrace.VirtualComputeGas + gasTrace.VirtualStorageGas + rt.executionTrace.GasCharges = append(rt.executionTrace.GasCharges, &gasTrace) rt.lastGasChargeTime = now rt.lastGasCharge = &gasTrace @@ -546,9 +569,10 @@ func (rt *Runtime) chargeGasInternal(gas GasCharge, skip int) aerrors.ActorError // overflow safe if rt.gasUsed > rt.gasAvailable-toUse { + gasUsed := rt.gasUsed rt.gasUsed = rt.gasAvailable - return aerrors.Newf(exitcode.SysErrOutOfGas, "not enough gas: used=%d, available=%d", - rt.gasUsed, rt.gasAvailable) + return aerrors.Newf(exitcode.SysErrOutOfGas, "not enough gas: used=%d, available=%d, use=%d", + gasUsed, rt.gasAvailable, toUse) } rt.gasUsed += toUse return nil diff --git a/chain/vm/syscalls.go b/chain/vm/syscalls.go index d2f1f77d3..bf879f8fc 100644 --- a/chain/vm/syscalls.go +++ b/chain/vm/syscalls.go @@ -7,6 +7,10 @@ import ( goruntime "runtime" "sync" + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/go-address" "github.com/ipfs/go-cid" cbor "github.com/ipfs/go-ipld-cbor" @@ -40,7 +44,9 @@ func Syscalls(verifier ffiwrapper.Verifier) SyscallBuilder { return func(ctx context.Context, rt *Runtime) runtime2.Syscalls { return &syscallShim{ - ctx: ctx, + ctx: ctx, + epoch: rt.CurrEpoch(), + networkVersion: rt.NetworkVersion(), actor: rt.Receiver(), cstate: rt.state, @@ -55,11 +61,13 @@ func Syscalls(verifier ffiwrapper.Verifier) SyscallBuilder { type syscallShim struct { ctx context.Context - lbState LookbackStateGetter - actor address.Address - cstate *state.StateTree - cst cbor.IpldStore - verifier ffiwrapper.Verifier + epoch abi.ChainEpoch + networkVersion network.Version + lbState LookbackStateGetter + actor address.Address + cstate *state.StateTree + cst cbor.IpldStore + verifier ffiwrapper.Verifier } func (ss *syscallShim) ComputeUnsealedSectorCID(st abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) { @@ -202,6 +210,10 @@ func (ss *syscallShim) VerifyBlockSig(blk *types.BlockHeader) error { } func (ss *syscallShim) workerKeyAtLookback(height abi.ChainEpoch) (address.Address, error) { + if ss.networkVersion >= network.Version7 && height < ss.epoch-policy.ChainFinality { + return address.Undef, xerrors.Errorf("cannot get worker key (currEpoch %d, height %d)", ss.epoch, height) + } + lbState, err := ss.lbState(ss.ctx, height) if err != nil { return address.Undef, err diff --git a/chain/vm/vm.go b/chain/vm/vm.go index 8b7f78074..0919b8e8a 100644 --- a/chain/vm/vm.go +++ b/chain/vm/vm.go @@ -9,6 +9,7 @@ import ( "time" "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/metrics" block "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" @@ -16,6 +17,7 @@ import ( logging "github.com/ipfs/go-log/v2" mh "github.com/multiformats/go-multihash" cbg "github.com/whyrusleeping/cbor-gen" + "go.opencensus.io/stats" "go.opencensus.io/trace" "golang.org/x/xerrors" @@ -70,6 +72,7 @@ func ResolveToKeyAddr(state types.StateTree, cst cbor.IpldStore, addr address.Ad } var _ cbor.IpldBlockstore = (*gasChargingBlocks)(nil) +var _ blockstore.Viewer = (*gasChargingBlocks)(nil) type gasChargingBlocks struct { chargeGas func(GasCharge) @@ -77,6 +80,24 @@ type gasChargingBlocks struct { under cbor.IpldBlockstore } +func (bs *gasChargingBlocks) View(c cid.Cid, cb func([]byte) error) error { + if v, ok := bs.under.(blockstore.Viewer); ok { + bs.chargeGas(bs.pricelist.OnIpldGet()) + return v.View(c, func(b []byte) error { + // we have successfully retrieved the value; charge for it, even if the user-provided function fails. + bs.chargeGas(newGasCharge("OnIpldViewEnd", 0, 0).WithExtra(len(b))) + bs.chargeGas(gasOnActorExec) + return cb(b) + }) + } + // the underlying blockstore doesn't implement the viewer interface, fall back to normal Get behaviour. + blk, err := bs.Get(c) + if err == nil && blk != nil { + return cb(blk.RawData()) + } + return err +} + func (bs *gasChargingBlocks) Get(c cid.Cid) (block.Block, error) { bs.chargeGas(bs.pricelist.OnIpldGet()) blk, err := bs.under.Get(c) @@ -119,6 +140,10 @@ func (vm *VM) makeRuntime(ctx context.Context, msg *types.Message, parent *Runti } if parent != nil { + // TODO: The version check here should be unnecessary, but we can wait to take it out + if !parent.allowInternal && rt.NetworkVersion() >= network.Version7 { + rt.Abortf(exitcode.SysErrForbidden, "internal calls currently disabled") + } rt.gasUsed = parent.gasUsed rt.origin = parent.origin rt.originNonce = parent.originNonce @@ -130,10 +155,10 @@ func (vm *VM) makeRuntime(ctx context.Context, msg *types.Message, parent *Runti rt.Abortf(exitcode.SysErrForbidden, "message execution exceeds call depth") } - rt.cst = &cbor.BasicIpldStore{ - Blocks: &gasChargingBlocks{rt.chargeGasFunc(2), rt.pricelist, vm.cst.Blocks}, - Atlas: vm.cst.Atlas, - } + cbb := &gasChargingBlocks{rt.chargeGasFunc(2), rt.pricelist, vm.cst.Blocks} + cst := cbor.NewCborStore(cbb) + cst.Atlas = vm.cst.Atlas // associate the atlas. + rt.cst = cst vmm := *msg resF, ok := rt.ResolveAddress(msg.From) @@ -583,6 +608,8 @@ func (vm *VM) ActorBalance(addr address.Address) (types.BigInt, aerrors.ActorErr return act.Balance, nil } +type vmFlushKey struct{} + func (vm *VM) Flush(ctx context.Context) (cid.Cid, error) { _, span := trace.StartSpan(ctx, "vm.Flush") defer span.End() @@ -595,7 +622,7 @@ func (vm *VM) Flush(ctx context.Context) (cid.Cid, error) { return cid.Undef, xerrors.Errorf("flushing vm: %w", err) } - if err := Copy(ctx, from, to, root); err != nil { + if err := Copy(context.WithValue(ctx, vmFlushKey{}, true), from, to, root); err != nil { return cid.Undef, xerrors.Errorf("copying tree: %w", err) } @@ -652,21 +679,48 @@ func linksForObj(blk block.Block, cb func(cid.Cid)) error { func Copy(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid) error { ctx, span := trace.StartSpan(ctx, "vm.Copy") // nolint defer span.End() + start := time.Now() var numBlocks int var totalCopySize int - var batch []block.Block + const batchSize = 128 + const bufCount = 3 + freeBufs := make(chan []block.Block, bufCount) + toFlush := make(chan []block.Block, bufCount) + for i := 0; i < bufCount; i++ { + freeBufs <- make([]block.Block, 0, batchSize) + } + + errFlushChan := make(chan error) + + go func() { + for b := range toFlush { + if err := to.PutMany(b); err != nil { + close(freeBufs) + errFlushChan <- xerrors.Errorf("batch put in copy: %w", err) + return + } + freeBufs <- b[:0] + } + close(errFlushChan) + close(freeBufs) + }() + + var batch = <-freeBufs batchCp := func(blk block.Block) error { numBlocks++ totalCopySize += len(blk.RawData()) batch = append(batch, blk) - if len(batch) > 100 { - if err := to.PutMany(batch); err != nil { - return xerrors.Errorf("batch put in copy: %w", err) + + if len(batch) >= batchSize { + toFlush <- batch + var ok bool + batch, ok = <-freeBufs + if !ok { + return <-errFlushChan } - batch = batch[:0] } return nil } @@ -676,15 +730,22 @@ func Copy(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid) err } if len(batch) > 0 { - if err := to.PutMany(batch); err != nil { - return xerrors.Errorf("batch put in copy: %w", err) - } + toFlush <- batch + } + close(toFlush) // close the toFlush triggering the loop to end + err := <-errFlushChan // get error out or get nil if it was closed + if err != nil { + return err } span.AddAttributes( trace.Int64Attribute("numBlocks", int64(numBlocks)), trace.Int64Attribute("copySize", int64(totalCopySize)), ) + if yes, ok := ctx.Value(vmFlushKey{}).(bool); yes && ok { + took := metrics.SinceInMilliseconds(start) + stats.Record(ctx, metrics.VMFlushCopyCount.M(int64(numBlocks)), metrics.VMFlushCopyDuration.M(took)) + } return nil } diff --git a/chain/wallet/multi.go b/chain/wallet/multi.go index 532ad217b..5b603dbd5 100644 --- a/chain/wallet/multi.go +++ b/chain/wallet/multi.go @@ -90,7 +90,7 @@ func (m MultiWallet) WalletHas(ctx context.Context, address address.Address) (bo } func (m MultiWallet) WalletList(ctx context.Context) ([]address.Address, error) { - var out []address.Address + out := make([]address.Address, 0) seen := map[address.Address]struct{}{} ws := nonNil(m.Remote, m.Ledger, m.Local) diff --git a/chain/wallet/wallet.go b/chain/wallet/wallet.go index 33fa3135e..46ff92861 100644 --- a/chain/wallet/wallet.go +++ b/chain/wallet/wallet.go @@ -305,6 +305,18 @@ func (w *LocalWallet) WalletDelete(ctx context.Context, addr address.Address) er delete(w.keys, addr) + def, err := w.GetDefault() + if err != nil { + return xerrors.Errorf("getting default address: %w", err) + } + + if def == addr { + err = w.SetDefault(address.Undef) + if err != nil { + return xerrors.Errorf("unsetting default address: %w", err) + } + } + return nil } diff --git a/cli/chain.go b/cli/chain.go index e2d0ebb4a..0fa999d17 100644 --- a/cli/chain.go +++ b/cli/chain.go @@ -3,6 +3,7 @@ package cli import ( "bytes" "context" + "encoding/base64" "encoding/hex" "encoding/json" "fmt" @@ -1246,14 +1247,19 @@ var chainDecodeCmd = &cli.Command{ } var chainDecodeParamsCmd = &cli.Command{ - Name: "params", - Usage: "Decode message params", + Name: "params", + Usage: "Decode message params", + ArgsUsage: "[toAddr method params]", Flags: []cli.Flag{ &cli.StringFlag{ Name: "tipset", }, + &cli.StringFlag{ + Name: "encoding", + Value: "base64", + Usage: "specify input encoding to parse", + }, }, - ArgsUsage: "[toAddr method hexParams]", Action: func(cctx *cli.Context) error { api, closer, err := GetFullNodeAPI(cctx) if err != nil { @@ -1276,11 +1282,21 @@ var chainDecodeParamsCmd = &cli.Command{ return xerrors.Errorf("parsing method id: %w", err) } - params, err := hex.DecodeString(cctx.Args().Get(2)) - if err != nil { - return xerrors.Errorf("parsing hex params: %w", err) + var params []byte + switch cctx.String("encoding") { + case "base64": + params, err = base64.StdEncoding.DecodeString(cctx.Args().Get(2)) + if err != nil { + return xerrors.Errorf("decoding base64 value: %w", err) + } + case "hex": + params, err = hex.DecodeString(cctx.Args().Get(2)) + if err != nil { + return xerrors.Errorf("decoding hex value: %w", err) + } + default: + return xerrors.Errorf("unrecognized encoding: %s", cctx.String("encoding")) } - ts, err := LoadTipSet(ctx, cctx, api) if err != nil { return err diff --git a/cli/client.go b/cli/client.go index 07e3cb2c8..20a7231da 100644 --- a/cli/client.go +++ b/cli/client.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" "io" + "math" "math/rand" "os" "path/filepath" @@ -81,6 +82,7 @@ var clientCmd = &cli.Command{ WithCategory("storage", clientListDeals), WithCategory("storage", clientGetDealCmd), WithCategory("storage", clientListAsksCmd), + WithCategory("storage", clientDealStatsCmd), WithCategory("data", clientImportCmd), WithCategory("data", clientDropCmd), WithCategory("data", clientLocalCmd), @@ -1112,6 +1114,80 @@ var clientRetrieveCmd = &cli.Command{ }, } +var clientDealStatsCmd = &cli.Command{ + Name: "deal-stats", + Usage: "Print statistics about local storage deals", + Flags: []cli.Flag{ + &cli.DurationFlag{ + Name: "newer-than", + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + + localDeals, err := api.ClientListDeals(ctx) + if err != nil { + return err + } + + var totalSize uint64 + byState := map[storagemarket.StorageDealStatus][]uint64{} + for _, deal := range localDeals { + if cctx.IsSet("newer-than") { + if time.Now().Sub(deal.CreationTime) > cctx.Duration("newer-than") { + continue + } + } + + totalSize += deal.Size + byState[deal.State] = append(byState[deal.State], deal.Size) + } + + fmt.Printf("Total: %d deals, %s\n", len(localDeals), types.SizeStr(types.NewInt(totalSize))) + + type stateStat struct { + state storagemarket.StorageDealStatus + count int + bytes uint64 + } + + stateStats := make([]stateStat, 0, len(byState)) + for state, deals := range byState { + if state == storagemarket.StorageDealActive { + state = math.MaxUint64 // for sort + } + + st := stateStat{ + state: state, + count: len(deals), + } + for _, b := range deals { + st.bytes += b + } + + stateStats = append(stateStats, st) + } + + sort.Slice(stateStats, func(i, j int) bool { + return int64(stateStats[i].state) < int64(stateStats[j].state) + }) + + for _, st := range stateStats { + if st.state == math.MaxUint64 { + st.state = storagemarket.StorageDealActive + } + fmt.Printf("%s: %d deals, %s\n", storagemarket.DealStates[st.state], st.count, types.SizeStr(types.NewInt(st.bytes))) + } + + return nil + }, +} + var clientListAsksCmd = &cli.Command{ Name: "list-asks", Usage: "List asks for top miners", diff --git a/cli/state.go b/cli/state.go index 427746155..31537cc89 100644 --- a/cli/state.go +++ b/cli/state.go @@ -119,7 +119,7 @@ var stateMinerInfo = &cli.Command{ } fmt.Printf("PeerID:\t%s\n", mi.PeerId) - fmt.Printf("Multiaddrs: \t") + fmt.Printf("Multiaddrs:\t") for _, addr := range mi.Multiaddrs { a, err := multiaddr.NewMultiaddrBytes(addr) if err != nil { @@ -127,6 +127,7 @@ var stateMinerInfo = &cli.Command{ } fmt.Printf("%s ", a) } + fmt.Printf("Consensus Fault End:\t%d\n", mi.ConsensusFaultElapsed) fmt.Printf("SectorSize:\t%s (%d)\n", types.SizeStr(types.NewInt(uint64(mi.SectorSize))), mi.SectorSize) pow, err := api.StateMinerPower(ctx, addr, ts.Key()) diff --git a/cli/sync.go b/cli/sync.go index c3f25eb1d..ff7d4bd65 100644 --- a/cli/sync.go +++ b/cli/sync.go @@ -45,8 +45,8 @@ var syncStatusCmd = &cli.Command{ } fmt.Println("sync status:") - for i, ss := range state.ActiveSyncs { - fmt.Printf("worker %d:\n", i) + for _, ss := range state.ActiveSyncs { + fmt.Printf("worker %d:\n", ss.WorkerID) var base, target []cid.Cid var heightDiff int64 var theight abi.ChainEpoch @@ -263,12 +263,17 @@ func SyncWait(ctx context.Context, napi api.FullNode, watch bool) error { return err } + if len(state.ActiveSyncs) == 0 { + time.Sleep(time.Second) + continue + } + head, err := napi.ChainHead(ctx) if err != nil { return err } - working := 0 + working := -1 for i, ss := range state.ActiveSyncs { switch ss.Stage { case api.StageSyncComplete: @@ -279,7 +284,12 @@ func SyncWait(ctx context.Context, napi api.FullNode, watch bool) error { } } + if working == -1 { + working = len(state.ActiveSyncs) - 1 + } + ss := state.ActiveSyncs[working] + workerID := ss.WorkerID var baseHeight abi.ChainEpoch var target []cid.Cid @@ -302,7 +312,7 @@ func SyncWait(ctx context.Context, napi api.FullNode, watch bool) error { fmt.Print("\r\x1b[2K\x1b[A") } - fmt.Printf("Worker: %d; Base: %d; Target: %d (diff: %d)\n", working, baseHeight, theight, heightDiff) + fmt.Printf("Worker: %d; Base: %d; Target: %d (diff: %d)\n", workerID, baseHeight, theight, heightDiff) fmt.Printf("State: %s; Current Epoch: %d; Todo: %d\n", ss.Stage, ss.Height, theight-ss.Height) lastLines = 2 diff --git a/cli/test/net.go b/cli/test/net.go index 836b81a8f..8e45e3aed 100644 --- a/cli/test/net.go +++ b/cli/test/net.go @@ -32,6 +32,7 @@ func StartOneNodeOneMiner(ctx context.Context, t *testing.T, blocktime time.Dura // Start mining blocks bm := test.NewBlockMiner(ctx, t, miner, blocktime) bm.MineBlocks() + t.Cleanup(bm.Stop) // Get the full node's wallet address fullAddr, err := full.WalletDefaultAddress(ctx) @@ -67,6 +68,7 @@ func StartTwoNodesOneMiner(ctx context.Context, t *testing.T, blocktime time.Dur // Start mining blocks bm := test.NewBlockMiner(ctx, t, miner, blocktime) bm.MineBlocks() + t.Cleanup(bm.Stop) // Send some funds to register the second node fullNodeAddr2, err := fullNode2.WalletNew(ctx, types.KTSecp256k1) diff --git a/cmd/lotus-bench/import.go b/cmd/lotus-bench/import.go index a4200c447..9fa6731aa 100644 --- a/cmd/lotus-bench/import.go +++ b/cmd/lotus-bench/import.go @@ -16,21 +16,30 @@ import ( "sort" "time" + ocprom "contrib.go.opencensus.io/exporter/prometheus" "github.com/cockroachdb/pebble" "github.com/cockroachdb/pebble/bloom" + "github.com/ipfs/go-cid" + metricsi "github.com/ipfs/go-metrics-interface" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" + lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/lib/blockstore" + badgerbs "github.com/filecoin-project/lotus/lib/blockstore/badger" _ "github.com/filecoin-project/lotus/lib/sigs/bls" _ "github.com/filecoin-project/lotus/lib/sigs/secp" - metricsprometheus "github.com/ipfs/go-metrics-prometheus" - "github.com/ipld/go-car" - "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/filecoin-project/lotus/node/repo" "github.com/filecoin-project/go-state-types/abi" + metricsprometheus "github.com/ipfs/go-metrics-prometheus" + "github.com/ipld/go-car" + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" bdg "github.com/dgraph-io/badger/v2" @@ -56,9 +65,25 @@ var importBenchCmd = &cli.Command{ importAnalyzeCmd, }, Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "start-tipset", + Usage: "start validation at the given tipset key; in format cid1,cid2,cid3...", + }, + &cli.StringFlag{ + Name: "end-tipset", + Usage: "halt validation at the given tipset key; in format cid1,cid2,cid3...", + }, + &cli.StringFlag{ + Name: "genesis-tipset", + Usage: "genesis tipset key; in format cid1,cid2,cid3...", + }, &cli.Int64Flag{ - Name: "height", - Usage: "halt validation after given height", + Name: "start-height", + Usage: "start validation at given height; beware that chain traversal by height is very slow", + }, + &cli.Int64Flag{ + Name: "end-height", + Usage: "halt validation after given height; beware that chain traversal by height is very slow", }, &cli.IntFlag{ Name: "batch-seal-verify-threads", @@ -86,32 +111,52 @@ var importBenchCmd = &cli.Command{ Name: "global-profile", Value: true, }, - &cli.Int64Flag{ - Name: "start-at", - }, &cli.BoolFlag{ Name: "only-import", }, &cli.BoolFlag{ Name: "use-pebble", }, + &cli.BoolFlag{ + Name: "use-native-badger", + }, + &cli.StringFlag{ + Name: "car", + Usage: "path to CAR file; required for import; on validation, either " + + "a CAR path or the --head flag are required", + }, + &cli.StringFlag{ + Name: "head", + Usage: "tipset key of the head, useful when benchmarking validation " + + "on an existing chain store, where a CAR is not available; " + + "if both --car and --head are provided, --head takes precedence " + + "over the CAR root; the format is cid1,cid2,cid3...", + }, }, Action: func(cctx *cli.Context) error { metricsprometheus.Inject() //nolint:errcheck vm.BatchSealVerifyParallelism = cctx.Int("batch-seal-verify-threads") - if !cctx.Args().Present() { - fmt.Println("must pass car file of chain to benchmark importing") - return nil - } - - cfi, err := os.Open(cctx.Args().First()) - if err != nil { - return err - } - defer cfi.Close() //nolint:errcheck // read only file go func() { - http.Handle("/debug/metrics/prometheus", promhttp.Handler()) + // Prometheus globals are exposed as interfaces, but the prometheus + // OpenCensus exporter expects a concrete *Registry. The concrete type of + // the globals are actually *Registry, so we downcast them, staying + // defensive in case things change under the hood. + registry, ok := prometheus.DefaultRegisterer.(*prometheus.Registry) + if !ok { + log.Warnf("failed to export default prometheus registry; some metrics will be unavailable; unexpected type: %T", prometheus.DefaultRegisterer) + return + } + exporter, err := ocprom.NewExporter(ocprom.Options{ + Registry: registry, + Namespace: "lotus", + }) + if err != nil { + log.Fatalf("could not create the prometheus stats exporter: %v", err) + } + + http.Handle("/debug/metrics", exporter) + http.ListenAndServe("localhost:6060", nil) //nolint:errcheck }() @@ -126,17 +171,17 @@ var importBenchCmd = &cli.Command{ tdir = tmp } - bdgOpt := badger.DefaultOptions - bdgOpt.GcInterval = 0 - bdgOpt.Options = bdg.DefaultOptions("") - bdgOpt.Options.SyncWrites = false - bdgOpt.Options.Truncate = true - bdgOpt.Options.DetectConflicts = false + var ( + ds datastore.Batching + bs blockstore.Blockstore + err error + ) - var bds datastore.Batching - if cctx.Bool("use-pebble") { + switch { + case cctx.Bool("use-pebble"): + log.Info("using pebble") cache := 512 - bds, err = pebbleds.NewDatastore(tdir, &pebble.Options{ + ds, err = pebbleds.NewDatastore(tdir, &pebble.Options{ // Pebble has a single combined cache area and the write // buffers are taken from this too. Assign all available // memory allowance for cache. @@ -155,30 +200,53 @@ var importBenchCmd = &cli.Command{ }, Logger: log, }) - } else { - bds, err = badger.NewDatastore(tdir, &bdgOpt) + + case cctx.Bool("use-native-badger"): + log.Info("using native badger") + var opts badgerbs.Options + if opts, err = repo.BadgerBlockstoreOptions(repo.BlockstoreChain, tdir, false); err != nil { + return err + } + opts.SyncWrites = false + bs, err = badgerbs.Open(opts) + + default: // legacy badger via datastore. + log.Info("using legacy badger") + bdgOpt := badger.DefaultOptions + bdgOpt.GcInterval = 0 + bdgOpt.Options = bdg.DefaultOptions("") + bdgOpt.Options.SyncWrites = false + bdgOpt.Options.Truncate = true + bdgOpt.Options.DetectConflicts = false + + ds, err = badger.NewDatastore(tdir, &bdgOpt) } + if err != nil { return err } - defer bds.Close() //nolint:errcheck - bds = measure.New("dsbench", bds) + if ds != nil { + ds = measure.New("dsbench", ds) + defer ds.Close() //nolint:errcheck + bs = blockstore.NewBlockstore(ds) + } - bs := blockstore.NewBlockstore(bds) + if c, ok := bs.(io.Closer); ok { + defer c.Close() //nolint:errcheck + } + + ctx := metricsi.CtxScope(context.Background(), "lotus") cacheOpts := blockstore.DefaultCacheOpts() cacheOpts.HasBloomFilterSize = 0 - - cbs, err := blockstore.CachedBlockstore(context.TODO(), bs, cacheOpts) + bs, err = blockstore.CachedBlockstore(ctx, bs, cacheOpts) if err != nil { return err } - bs = cbs - ds := datastore.NewMapDatastore() var verifier ffiwrapper.Verifier = ffiwrapper.ProofVerifier if cctx.IsSet("syscall-cache") { - scds, err := badger.NewDatastore(cctx.String("syscall-cache"), &bdgOpt) + scds, err := badger.NewDatastore(cctx.String("syscall-cache"), &badger.DefaultOptions) if err != nil { return xerrors.Errorf("opening syscall-cache datastore: %w", err) } @@ -193,11 +261,223 @@ var importBenchCmd = &cli.Command{ return nil } - cs := store.NewChainStore(bs, ds, vm.Syscalls(verifier), nil) + metadataDs := datastore.NewMapDatastore() + cs := store.NewChainStore(bs, bs, metadataDs, vm.Syscalls(verifier), nil) + defer cs.Close() //nolint:errcheck + stm := stmgr.NewStateManager(cs) + startTime := time.Now() + + // register a gauge that reports how long since the measurable + // operation began. + promauto.NewGaugeFunc(prometheus.GaugeOpts{ + Name: "lotus_bench_time_taken_secs", + }, func() float64 { + return time.Since(startTime).Seconds() + }) + + defer func() { + end := time.Now().Format(time.RFC3339) + + resp, err := http.Get("http://localhost:6060/debug/metrics") + if err != nil { + log.Warnf("failed to scape prometheus: %s", err) + } + + metricsfi, err := os.Create("bench.metrics") + if err != nil { + log.Warnf("failed to write prometheus data: %s", err) + } + + _, _ = io.Copy(metricsfi, resp.Body) //nolint:errcheck + _ = metricsfi.Close() //nolint:errcheck + + writeProfile := func(name string) { + if file, err := os.Create(fmt.Sprintf("%s.%s.%s.pprof", name, startTime.Format(time.RFC3339), end)); err == nil { + if err := pprof.Lookup(name).WriteTo(file, 0); err != nil { + log.Warnf("failed to write %s pprof: %s", name, err) + } + _ = file.Close() + } else { + log.Warnf("failed to create %s pprof file: %s", name, err) + } + } + + writeProfile("heap") + writeProfile("allocs") + }() + + var carFile *os.File + + // open the CAR file if one is provided. + if path := cctx.String("car"); path != "" { + var err error + if carFile, err = os.Open(path); err != nil { + return xerrors.Errorf("failed to open provided CAR file: %w", err) + } + } + + var head *types.TipSet + + // --- IMPORT --- + if !cctx.Bool("no-import") { + if cctx.Bool("global-profile") { + prof, err := os.Create("bench.import.pprof") + if err != nil { + return err + } + defer prof.Close() //nolint:errcheck + + if err := pprof.StartCPUProfile(prof); err != nil { + return err + } + } + + // import is NOT suppressed; do it. + if carFile == nil { // a CAR is compulsory for the import. + return fmt.Errorf("no CAR file provided for import") + } + + head, err = cs.Import(carFile) + if err != nil { + return err + } + + pprof.StopCPUProfile() + } + + if cctx.Bool("only-import") { + return nil + } + + // --- VALIDATION --- + // + // we are now preparing for the validation benchmark. + // a HEAD needs to be set; --head takes precedence over the root + // of the CAR, if both are provided. + if h := cctx.String("head"); h != "" { + cids, err := lcli.ParseTipSetString(h) + if err != nil { + return xerrors.Errorf("failed to parse head tipset key: %w", err) + } + + head, err = cs.LoadTipSet(types.NewTipSetKey(cids...)) + if err != nil { + return err + } + } else if carFile != nil && head == nil { + cr, err := car.NewCarReader(carFile) + if err != nil { + return err + } + head, err = cs.LoadTipSet(types.NewTipSetKey(cr.Header.Roots...)) + if err != nil { + return err + } + } else if h == "" && carFile == nil { + return xerrors.Errorf("neither --car nor --head flags supplied") + } + + log.Infof("chain head is tipset: %s", head.Key()) + + var genesis *types.TipSet + log.Infof("getting genesis block") + if tsk := cctx.String("genesis-tipset"); tsk != "" { + var cids []cid.Cid + if cids, err = lcli.ParseTipSetString(tsk); err != nil { + return xerrors.Errorf("failed to parse genesis tipset key: %w", err) + } + genesis, err = cs.LoadTipSet(types.NewTipSetKey(cids...)) + } else { + log.Warnf("getting genesis by height; this will be slow; pass in the genesis tipset through --genesis-tipset") + // fallback to the slow path of walking the chain. + genesis, err = cs.GetTipsetByHeight(context.TODO(), 0, head, true) + } + + if err != nil { + return err + } + + if err = cs.SetGenesis(genesis.Blocks()[0]); err != nil { + return err + } + + // Resolve the end tipset, falling back to head if not provided. + end := head + if tsk := cctx.String("end-tipset"); tsk != "" { + var cids []cid.Cid + if cids, err = lcli.ParseTipSetString(tsk); err != nil { + return xerrors.Errorf("failed to end genesis tipset key: %w", err) + } + end, err = cs.LoadTipSet(types.NewTipSetKey(cids...)) + } else if h := cctx.Int64("end-height"); h != 0 { + log.Infof("getting end tipset at height %d...", h) + end, err = cs.GetTipsetByHeight(context.TODO(), abi.ChainEpoch(h), head, true) + } + + if err != nil { + return err + } + + // Resolve the start tipset, if provided; otherwise, fallback to + // height 1 for a start point. + var ( + startEpoch = abi.ChainEpoch(1) + start *types.TipSet + ) + + if tsk := cctx.String("start-tipset"); tsk != "" { + var cids []cid.Cid + if cids, err = lcli.ParseTipSetString(tsk); err != nil { + return xerrors.Errorf("failed to start genesis tipset key: %w", err) + } + start, err = cs.LoadTipSet(types.NewTipSetKey(cids...)) + } else if h := cctx.Int64("start-height"); h != 0 { + log.Infof("getting start tipset at height %d...", h) + // lookback from the end tipset (which falls back to head if not supplied). + start, err = cs.GetTipsetByHeight(context.TODO(), abi.ChainEpoch(h), end, true) + } + + if err != nil { + return err + } + + if start != nil { + startEpoch = start.Height() + if err := cs.ForceHeadSilent(context.Background(), start); err != nil { + // if err := cs.SetHead(start); err != nil { + return err + } + } + + inverseChain := append(make([]*types.TipSet, 0, end.Height()), end) + for ts := end; ts.Height() > startEpoch; { + if h := ts.Height(); h%100 == 0 { + log.Infof("walking back the chain; loaded tipset at height %d...", h) + } + next, err := cs.LoadTipSet(ts.Parents()) + if err != nil { + return err + } + + inverseChain = append(inverseChain, next) + ts = next + } + + var enc *json.Encoder + if cctx.Bool("export-traces") { + ibj, err := os.Create("bench.json") + if err != nil { + return err + } + defer ibj.Close() //nolint:errcheck + + enc = json.NewEncoder(ibj) + } + if cctx.Bool("global-profile") { - prof, err := os.Create("import-bench.prof") + prof, err := os.Create("bench.validation.pprof") if err != nil { return err } @@ -208,84 +488,8 @@ var importBenchCmd = &cli.Command{ } } - var head *types.TipSet - if !cctx.Bool("no-import") { - head, err = cs.Import(cfi) - if err != nil { - return err - } - } else { - cr, err := car.NewCarReader(cfi) - if err != nil { - return err - } - head, err = cs.LoadTipSet(types.NewTipSetKey(cr.Header.Roots...)) - if err != nil { - return err - } - } - - if cctx.Bool("only-import") { - return nil - } - - gb, err := cs.GetTipsetByHeight(context.TODO(), 0, head, true) - if err != nil { - return err - } - - err = cs.SetGenesis(gb.Blocks()[0]) - if err != nil { - return err - } - - startEpoch := abi.ChainEpoch(1) - if cctx.IsSet("start-at") { - startEpoch = abi.ChainEpoch(cctx.Int64("start-at")) - start, err := cs.GetTipsetByHeight(context.TODO(), abi.ChainEpoch(cctx.Int64("start-at")), head, true) - if err != nil { - return err - } - - err = cs.SetHead(start) - if err != nil { - return err - } - } - - if h := cctx.Int64("height"); h != 0 { - tsh, err := cs.GetTipsetByHeight(context.TODO(), abi.ChainEpoch(h), head, true) - if err != nil { - return err - } - head = tsh - } - - ts := head - tschain := []*types.TipSet{ts} - for ts.Height() > startEpoch { - next, err := cs.LoadTipSet(ts.Parents()) - if err != nil { - return err - } - - tschain = append(tschain, next) - ts = next - } - - var enc *json.Encoder - if cctx.Bool("export-traces") { - ibj, err := os.Create("import-bench.json") - if err != nil { - return err - } - defer ibj.Close() //nolint:errcheck - - enc = json.NewEncoder(ibj) - } - - for i := len(tschain) - 1; i >= 1; i-- { - cur := tschain[i] + for i := len(inverseChain) - 1; i >= 1; i-- { + cur := inverseChain[i] start := time.Now() log.Infof("computing state (height: %d, ts=%s)", cur.Height(), cur.Cids()) st, trace, err := stm.ExecutionTrace(context.TODO(), cur) @@ -304,7 +508,7 @@ var importBenchCmd = &cli.Command{ return xerrors.Errorf("failed to write out tipsetexec: %w", err) } } - if tschain[i-1].ParentState() != st { + if inverseChain[i-1].ParentState() != st { stripCallers(tse.Trace) lastTrace := tse.Trace d, err := json.MarshalIndent(lastTrace, "", " ") @@ -320,23 +524,7 @@ var importBenchCmd = &cli.Command{ pprof.StopCPUProfile() - if true { - resp, err := http.Get("http://localhost:6060/debug/metrics/prometheus") - if err != nil { - return err - } - - metricsfi, err := os.Create("import-bench.metrics") - if err != nil { - return err - } - - io.Copy(metricsfi, resp.Body) //nolint:errcheck - metricsfi.Close() //nolint:errcheck - } - return nil - }, } diff --git a/cmd/lotus-bench/main.go b/cmd/lotus-bench/main.go index 0ca532d74..d99b0d29b 100644 --- a/cmd/lotus-bench/main.go +++ b/cmd/lotus-bench/main.go @@ -31,6 +31,7 @@ import ( lapi "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/genesis" @@ -240,15 +241,6 @@ var sealBenchCmd = &cli.Command{ } sectorSize := abi.SectorSize(sectorSizeInt) - spt, err := ffiwrapper.SealProofTypeFromSectorSize(sectorSize) - if err != nil { - return err - } - - cfg := &ffiwrapper.Config{ - SealProofType: spt, - } - // Only fetch parameters if actually needed skipc2 := c.Bool("skip-commit2") if !skipc2 { @@ -261,7 +253,7 @@ var sealBenchCmd = &cli.Command{ Root: sbdir, } - sb, err := ffiwrapper.New(sbfs, cfg) + sb, err := ffiwrapper.New(sbfs) if err != nil { return err } @@ -329,7 +321,7 @@ var sealBenchCmd = &cli.Command{ if !skipc2 { log.Info("generating winning post candidates") - wipt, err := spt.RegisteredWinningPoStProof() + wipt, err := spt(sectorSize).RegisteredWinningPoStProof() if err != nil { return err } @@ -509,11 +501,13 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par if numSectors%par.PreCommit1 != 0 { return nil, nil, fmt.Errorf("parallelism factor must cleanly divide numSectors") } - - for i := abi.SectorNumber(0); i < abi.SectorNumber(numSectors); i++ { - sid := abi.SectorID{ - Miner: mid, - Number: i, + for i := abi.SectorNumber(1); i <= abi.SectorNumber(numSectors); i++ { + sid := storage.SectorRef{ + ID: abi.SectorID{ + Miner: mid, + Number: i, + }, + ProofType: spt(sectorSize), } start := time.Now() @@ -540,9 +534,13 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par start := worker * sectorsPerWorker end := start + sectorsPerWorker for i := abi.SectorNumber(start); i < abi.SectorNumber(end); i++ { - sid := abi.SectorID{ - Miner: mid, - Number: i, + ix := int(i - 1) + sid := storage.SectorRef{ + ID: abi.SectorID{ + Miner: mid, + Number: i, + }, + ProofType: spt(sectorSize), } start := time.Now() @@ -570,8 +568,8 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par precommit2 := time.Now() <-preCommit2Sema - sealedSectors[i] = saproof2.SectorInfo{ - SealProof: sb.SealProofType(), + sealedSectors[ix] = saproof2.SectorInfo{ + SealProof: sid.ProofType, SectorNumber: i, SealedCID: cids.Sealed, } @@ -625,7 +623,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par svi := saproof2.SealVerifyInfo{ SectorID: sid, SealedCID: cids.Sealed, - SealProof: sb.SealProofType(), + SealProof: sid.ProofType, Proof: proof, DealIDs: nil, Randomness: ticket, @@ -742,24 +740,25 @@ var proveCmd = &cli.Command{ return err } - spt, err := ffiwrapper.SealProofTypeFromSectorSize(abi.SectorSize(c2in.SectorSize)) + sb, err := ffiwrapper.New(nil) if err != nil { return err } - cfg := &ffiwrapper.Config{ - SealProofType: spt, - } + start := time.Now() - sb, err := ffiwrapper.New(nil, cfg) - if err != nil { - return err + ref := storage.SectorRef{ + ID: abi.SectorID{ + Miner: abi.ActorID(mid), + Number: abi.SectorNumber(c2in.SectorNum), + }, + ProofType: spt(abi.SectorSize(c2in.SectorSize)), } fmt.Printf("----\nstart proof computation\n") start := time.Now() - proof, err := sb.SealCommit2(context.TODO(), abi.SectorID{Miner: abi.ActorID(mid), Number: abi.SectorNumber(c2in.SectorNum)}, c2in.Phase1Out) + proof, err := sb.SealCommit2(context.TODO(), ref, c2in.Phase1Out) if err != nil { return err } @@ -783,3 +782,12 @@ func bps(sectorSize abi.SectorSize, sectorNum int, d time.Duration) string { bps := bdata.Div(bdata, big.NewInt(d.Nanoseconds())) return types.SizeStr(types.BigInt{Int: bps}) + "/s" } + +func spt(ssize abi.SectorSize) abi.RegisteredSealProof { + spt, err := miner.SealProofTypeFromSectorSize(ssize, build.NewestNetworkVersion) + if err != nil { + panic(err) + } + + return spt +} diff --git a/cmd/lotus-gateway/endtoend_test.go b/cmd/lotus-gateway/endtoend_test.go index 1e1e5e229..f0b950f5e 100644 --- a/cmd/lotus-gateway/endtoend_test.go +++ b/cmd/lotus-gateway/endtoend_test.go @@ -278,6 +278,7 @@ func startNodes( // Start mining blocks bm := test.NewBlockMiner(ctx, t, miner, blocktime) bm.MineBlocks() + t.Cleanup(bm.Stop) return &testNodes{lite: lite, full: full, miner: miner, closer: closer} } diff --git a/cmd/lotus-seal-worker/info.go b/cmd/lotus-seal-worker/info.go index 3388d8a59..65f26dc86 100644 --- a/cmd/lotus-seal-worker/info.go +++ b/cmd/lotus-seal-worker/info.go @@ -42,7 +42,7 @@ var infoCmd = &cli.Command{ if err != nil { return xerrors.Errorf("checking worker status: %w", err) } - fmt.Printf("Enabled: %t", enabled) + fmt.Printf("Enabled: %t\n", enabled) info, err := api.Info(ctx) if err != nil { @@ -64,7 +64,6 @@ var infoCmd = &cli.Command{ fmt.Printf("%s:\n", path.ID) fmt.Printf("\tWeight: %d; Use: ", path.Weight) if path.CanSeal || path.CanStore { - fmt.Printf("Weight: %d; Use: ", path.Weight) if path.CanSeal { fmt.Print("Seal ") } diff --git a/cmd/lotus-seal-worker/main.go b/cmd/lotus-seal-worker/main.go index 520964867..1f20bffbd 100644 --- a/cmd/lotus-seal-worker/main.go +++ b/cmd/lotus-seal-worker/main.go @@ -32,7 +32,6 @@ import ( "github.com/filecoin-project/lotus/build" lcli "github.com/filecoin-project/lotus/cli" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" - "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" "github.com/filecoin-project/lotus/extern/sector-storage/stores" "github.com/filecoin-project/lotus/lib/lotuslog" @@ -356,11 +355,6 @@ var runCmd = &cli.Command{ } // Setup remote sector store - spt, err := ffiwrapper.SealProofTypeFromSectorSize(ssize) - if err != nil { - return xerrors.Errorf("getting proof type: %w", err) - } - sminfo, err := lcli.GetAPIInfo(cctx, repo.StorageMiner) if err != nil { return xerrors.Errorf("could not get api info: %w", err) @@ -374,7 +368,6 @@ var runCmd = &cli.Command{ workerApi := &worker{ LocalWorker: sectorstorage.NewLocalWorker(sectorstorage.WorkerConfig{ - SealProof: spt, TaskTypes: taskTypes, NoSwap: cctx.Bool("no-swap"), }, remote, localStore, nodeApi, nodeApi, wsts), diff --git a/cmd/lotus-seed/main.go b/cmd/lotus-seed/main.go index d365f6493..7822900e4 100644 --- a/cmd/lotus-seed/main.go +++ b/cmd/lotus-seed/main.go @@ -8,8 +8,6 @@ import ( "os" "github.com/docker/go-units" - "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" - logging "github.com/ipfs/go-log/v2" "github.com/mitchellh/go-homedir" "github.com/urfave/cli/v2" @@ -19,6 +17,7 @@ import ( "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/cmd/lotus-seed/seed" "github.com/filecoin-project/lotus/genesis" @@ -128,12 +127,12 @@ var preSealCmd = &cli.Command{ } sectorSize := abi.SectorSize(sectorSizeInt) - rp, err := ffiwrapper.SealProofTypeFromSectorSize(sectorSize) + spt, err := miner.SealProofTypeFromSectorSize(sectorSize, build.NewestNetworkVersion) if err != nil { return err } - gm, key, err := seed.PreSeal(maddr, rp, abi.SectorNumber(c.Uint64("sector-offset")), c.Int("num-sectors"), sbroot, []byte(c.String("ticket-preimage")), k, c.Bool("fake-sectors")) + gm, key, err := seed.PreSeal(maddr, spt, abi.SectorNumber(c.Uint64("sector-offset")), c.Int("num-sectors"), sbroot, []byte(c.String("ticket-preimage")), k, c.Bool("fake-sectors")) if err != nil { return err } diff --git a/cmd/lotus-seed/seed/seed.go b/cmd/lotus-seed/seed/seed.go index ab8e5a52a..b52490928 100644 --- a/cmd/lotus-seed/seed/seed.go +++ b/cmd/lotus-seed/seed/seed.go @@ -22,6 +22,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/lotus/extern/sector-storage/zerocomm" + "github.com/filecoin-project/specs-storage/storage" market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" @@ -42,10 +43,6 @@ func PreSeal(maddr address.Address, spt abi.RegisteredSealProof, offset abi.Sect return nil, nil, err } - cfg := &ffiwrapper.Config{ - SealProofType: spt, - } - if err := os.MkdirAll(sbroot, 0775); err != nil { //nolint:gosec return nil, nil, err } @@ -56,7 +53,7 @@ func PreSeal(maddr address.Address, spt abi.RegisteredSealProof, offset abi.Sect Root: sbroot, } - sb, err := ffiwrapper.New(sbfs, cfg) + sb, err := ffiwrapper.New(sbfs) if err != nil { return nil, nil, err } @@ -69,16 +66,17 @@ func PreSeal(maddr address.Address, spt abi.RegisteredSealProof, offset abi.Sect var sealedSectors []*genesis.PreSeal for i := 0; i < sectors; i++ { sid := abi.SectorID{Miner: abi.ActorID(mid), Number: next} + ref := storage.SectorRef{ID: sid, ProofType: spt} next++ var preseal *genesis.PreSeal if !fakeSectors { - preseal, err = presealSector(sb, sbfs, sid, spt, ssize, preimage) + preseal, err = presealSector(sb, sbfs, ref, ssize, preimage) if err != nil { return nil, nil, err } } else { - preseal, err = presealSectorFake(sbfs, sid, spt, ssize) + preseal, err = presealSectorFake(sbfs, ref, ssize) if err != nil { return nil, nil, err } @@ -148,7 +146,7 @@ func PreSeal(maddr address.Address, spt abi.RegisteredSealProof, offset abi.Sect return miner, &minerAddr.KeyInfo, nil } -func presealSector(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, sid abi.SectorID, spt abi.RegisteredSealProof, ssize abi.SectorSize, preimage []byte) (*genesis.PreSeal, error) { +func presealSector(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, sid storage.SectorRef, ssize abi.SectorSize, preimage []byte) (*genesis.PreSeal, error) { pi, err := sb.AddPiece(context.TODO(), sid, nil, abi.PaddedPieceSize(ssize).Unpadded(), rand.Reader) if err != nil { return nil, err @@ -182,12 +180,12 @@ func presealSector(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, sid abi.Sector return &genesis.PreSeal{ CommR: cids.Sealed, CommD: cids.Unsealed, - SectorID: sid.Number, - ProofType: spt, + SectorID: sid.ID.Number, + ProofType: sid.ProofType, }, nil } -func presealSectorFake(sbfs *basicfs.Provider, sid abi.SectorID, spt abi.RegisteredSealProof, ssize abi.SectorSize) (*genesis.PreSeal, error) { +func presealSectorFake(sbfs *basicfs.Provider, sid storage.SectorRef, ssize abi.SectorSize) (*genesis.PreSeal, error) { paths, done, err := sbfs.AcquireSector(context.TODO(), sid, 0, storiface.FTSealed|storiface.FTCache, storiface.PathSealing) if err != nil { return nil, xerrors.Errorf("acquire unsealed sector: %w", err) @@ -198,7 +196,7 @@ func presealSectorFake(sbfs *basicfs.Provider, sid abi.SectorID, spt abi.Registe return nil, xerrors.Errorf("mkdir cache: %w", err) } - commr, err := ffi.FauxRep(spt, paths.Cache, paths.Sealed) + commr, err := ffi.FauxRep(sid.ProofType, paths.Cache, paths.Sealed) if err != nil { return nil, xerrors.Errorf("fauxrep: %w", err) } @@ -206,13 +204,13 @@ func presealSectorFake(sbfs *basicfs.Provider, sid abi.SectorID, spt abi.Registe return &genesis.PreSeal{ CommR: commr, CommD: zerocomm.ZeroPieceCommitment(abi.PaddedPieceSize(ssize).Unpadded()), - SectorID: sid.Number, - ProofType: spt, + SectorID: sid.ID.Number, + ProofType: sid.ProofType, }, nil } -func cleanupUnsealed(sbfs *basicfs.Provider, sid abi.SectorID) error { - paths, done, err := sbfs.AcquireSector(context.TODO(), sid, storiface.FTUnsealed, storiface.FTNone, storiface.PathSealing) +func cleanupUnsealed(sbfs *basicfs.Provider, ref storage.SectorRef) error { + paths, done, err := sbfs.AcquireSector(context.TODO(), ref, storiface.FTUnsealed, storiface.FTNone, storiface.PathSealing) if err != nil { return err } diff --git a/cmd/lotus-shed/balances.go b/cmd/lotus-shed/balances.go index b12c069f5..da1263408 100644 --- a/cmd/lotus-shed/balances.go +++ b/cmd/lotus-shed/balances.go @@ -3,6 +3,7 @@ package main import ( "context" "fmt" + "io" "strconv" "github.com/filecoin-project/lotus/chain/gen/genesis" @@ -10,6 +11,7 @@ import ( _init "github.com/filecoin-project/lotus/chain/actors/builtin/init" "github.com/docker/go-units" + "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin/multisig" "github.com/filecoin-project/lotus/chain/actors/builtin/power" @@ -24,6 +26,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/state" @@ -33,7 +36,6 @@ import ( "github.com/filecoin-project/lotus/chain/vm" lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" - "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/node/repo" ) @@ -168,19 +170,26 @@ var chainBalanceStateCmd = &cli.Command{ defer lkrepo.Close() //nolint:errcheck - ds, err := lkrepo.Datastore("/chain") + bs, err := lkrepo.Blockstore(repo.BlockstoreChain) if err != nil { - return err + return fmt.Errorf("failed to open blockstore: %w", err) } + defer func() { + if c, ok := bs.(io.Closer); ok { + if err := c.Close(); err != nil { + log.Warnf("failed to close blockstore: %s", err) + } + } + }() + mds, err := lkrepo.Datastore("/metadata") if err != nil { return err } - bs := blockstore.NewBlockstore(ds) - - cs := store.NewChainStore(bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), nil) + cs := store.NewChainStore(bs, bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), nil) + defer cs.Close() //nolint:errcheck cst := cbor.NewCborStore(bs) store := adt.WrapStore(ctx, cst) @@ -382,19 +391,26 @@ var chainPledgeCmd = &cli.Command{ defer lkrepo.Close() //nolint:errcheck - ds, err := lkrepo.Datastore("/chain") + bs, err := lkrepo.Blockstore(repo.BlockstoreChain) if err != nil { - return err + return xerrors.Errorf("failed to open blockstore: %w", err) } + defer func() { + if c, ok := bs.(io.Closer); ok { + if err := c.Close(); err != nil { + log.Warnf("failed to close blockstore: %s", err) + } + } + }() + mds, err := lkrepo.Datastore("/metadata") if err != nil { return err } - bs := blockstore.NewBlockstore(ds) - - cs := store.NewChainStore(bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), nil) + cs := store.NewChainStore(bs, bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), nil) + defer cs.Close() //nolint:errcheck cst := cbor.NewCborStore(bs) store := adt.WrapStore(ctx, cst) diff --git a/cmd/lotus-shed/cid.go b/cmd/lotus-shed/cid.go new file mode 100644 index 000000000..7839ec601 --- /dev/null +++ b/cmd/lotus-shed/cid.go @@ -0,0 +1,48 @@ +package main + +import ( + "encoding/base64" + "encoding/hex" + "fmt" + + "github.com/urfave/cli/v2" + + "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" +) + +var cidCmd = &cli.Command{ + Name: "cid", + Subcommands: cli.Commands{ + cidIdCmd, + }, +} + +var cidIdCmd = &cli.Command{ + Name: "id", + Usage: "create identity CID from hex or base64 data", + Action: func(cctx *cli.Context) error { + if !cctx.Args().Present() { + return fmt.Errorf("must specify data") + } + + dec, err := hex.DecodeString(cctx.Args().First()) + if err != nil { + dec, err = base64.StdEncoding.DecodeString(cctx.Args().First()) + if err != nil { + return err + } + + } + + builder := cid.V1Builder{Codec: cid.Raw, MhType: mh.IDENTITY} + + c, err := builder.Sum(dec) + if err != nil { + return err + } + + fmt.Println(c) + return nil + }, +} diff --git a/cmd/lotus-shed/datastore.go b/cmd/lotus-shed/datastore.go index 83422e77b..8cdc1630c 100644 --- a/cmd/lotus-shed/datastore.go +++ b/cmd/lotus-shed/datastore.go @@ -8,10 +8,10 @@ import ( "os" "strings" + "github.com/dgraph-io/badger/v2" "github.com/docker/go-units" "github.com/ipfs/go-datastore" dsq "github.com/ipfs/go-datastore/query" - badgerds "github.com/ipfs/go-ds-badger2" logging "github.com/ipfs/go-log" "github.com/mitchellh/go-homedir" "github.com/polydawn/refmt/cbor" @@ -312,30 +312,41 @@ var datastoreRewriteCmd = &cli.Command{ return xerrors.Errorf("cannot get toPath: %w", err) } - opts := repo.ChainBadgerOptions() - opts.Options = opts.Options.WithSyncWrites(false) - to, err := badgerds.NewDatastore(toPath, &opts) + var ( + from *badger.DB + to *badger.DB + ) + + // open the destination (to) store. + opts, err := repo.BadgerBlockstoreOptions(repo.BlockstoreChain, toPath, false) if err != nil { - return xerrors.Errorf("opennig 'to' datastore: %w", err) + return xerrors.Errorf("failed to get badger options: %w", err) + } + opts.SyncWrites = false + if to, err = badger.Open(opts.Options); err != nil { + return xerrors.Errorf("opening 'to' badger store: %w", err) } - opts.Options = opts.Options.WithReadOnly(false) - from, err := badgerds.NewDatastore(fromPath, &opts) + // open the source (from) store. + opts, err = repo.BadgerBlockstoreOptions(repo.BlockstoreChain, fromPath, true) if err != nil { - return xerrors.Errorf("opennig 'from' datastore: %w", err) + return xerrors.Errorf("failed to get badger options: %w", err) + } + if from, err = badger.Open(opts.Options); err != nil { + return xerrors.Errorf("opening 'from' datastore: %w", err) } pr, pw := io.Pipe() errCh := make(chan error) go func() { bw := bufio.NewWriterSize(pw, 64<<20) - _, err := from.DB.Backup(bw, 0) + _, err := from.Backup(bw, 0) _ = bw.Flush() _ = pw.CloseWithError(err) errCh <- err }() go func() { - err := to.DB.Load(pr, 256) + err := to.Load(pr, 256) errCh <- err }() diff --git a/cmd/lotus-shed/export.go b/cmd/lotus-shed/export.go index 3be49f0e0..9b0703445 100644 --- a/cmd/lotus-shed/export.go +++ b/cmd/lotus-shed/export.go @@ -3,16 +3,17 @@ package main import ( "context" "fmt" + "io" "os" "github.com/urfave/cli/v2" "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" - "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/node/repo" ) @@ -71,19 +72,27 @@ var exportChainCmd = &cli.Command{ defer fi.Close() //nolint:errcheck - ds, err := lr.Datastore("/chain") + bs, err := lr.Blockstore(repo.BlockstoreChain) if err != nil { - return err + return fmt.Errorf("failed to open blockstore: %w", err) } + defer func() { + if c, ok := bs.(io.Closer); ok { + if err := c.Close(); err != nil { + log.Warnf("failed to close blockstore: %s", err) + } + } + }() + mds, err := lr.Datastore("/metadata") if err != nil { return err } - bs := blockstore.NewBlockstore(ds) + cs := store.NewChainStore(bs, bs, mds, nil, nil) + defer cs.Close() //nolint:errcheck - cs := store.NewChainStore(bs, mds, nil, nil) if err := cs.Load(); err != nil { return err } diff --git a/cmd/lotus-shed/genesis-verify.go b/cmd/lotus-shed/genesis-verify.go index 4b197c58f..20561eb5a 100644 --- a/cmd/lotus-shed/genesis-verify.go +++ b/cmd/lotus-shed/genesis-verify.go @@ -52,7 +52,8 @@ var genesisVerifyCmd = &cli.Command{ } bs := blockstore.NewBlockstore(datastore.NewMapDatastore()) - cs := store.NewChainStore(bs, datastore.NewMapDatastore(), nil, nil) + cs := store.NewChainStore(bs, bs, datastore.NewMapDatastore(), nil, nil) + defer cs.Close() //nolint:errcheck cf := cctx.Args().Get(0) f, err := os.Open(cf) diff --git a/cmd/lotus-shed/import-car.go b/cmd/lotus-shed/import-car.go index 9cbff953b..9fa853728 100644 --- a/cmd/lotus-shed/import-car.go +++ b/cmd/lotus-shed/import-car.go @@ -12,7 +12,6 @@ import ( "github.com/urfave/cli/v2" "golang.org/x/xerrors" - "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/node/repo" ) @@ -45,12 +44,18 @@ var importCarCmd = &cli.Command{ return xerrors.Errorf("opening the car file: %w", err) } - ds, err := lr.Datastore("/chain") + bs, err := lr.Blockstore(repo.BlockstoreChain) if err != nil { return err } - bs := blockstore.NewBlockstore(ds) + defer func() { + if c, ok := bs.(io.Closer); ok { + if err := c.Close(); err != nil { + log.Warnf("failed to close blockstore: %s", err) + } + } + }() cr, err := car.NewCarReader(f) if err != nil { @@ -65,7 +70,7 @@ var importCarCmd = &cli.Command{ return err } fmt.Println() - return ds.Close() + return nil default: if err := f.Close(); err != nil { return err @@ -108,12 +113,18 @@ var importObjectCmd = &cli.Command{ } defer lr.Close() //nolint:errcheck - ds, err := lr.Datastore("/chain") + bs, err := lr.Blockstore(repo.BlockstoreChain) if err != nil { - return err + return fmt.Errorf("failed to open blockstore: %w", err) } - bs := blockstore.NewBlockstore(ds) + defer func() { + if c, ok := bs.(io.Closer); ok { + if err := c.Close(); err != nil { + log.Warnf("failed to close blockstore: %s", err) + } + } + }() c, err := cid.Decode(cctx.Args().Get(0)) if err != nil { diff --git a/cmd/lotus-shed/main.go b/cmd/lotus-shed/main.go index eef357596..5da5c0188 100644 --- a/cmd/lotus-shed/main.go +++ b/cmd/lotus-shed/main.go @@ -47,6 +47,8 @@ func main() { sectorsCmd, msgCmd, electionCmd, + rpcCmd, + cidCmd, } app := &cli.App{ diff --git a/cmd/lotus-shed/pruning.go b/cmd/lotus-shed/pruning.go index 6cf4f8c6f..8728163c9 100644 --- a/cmd/lotus-shed/pruning.go +++ b/cmd/lotus-shed/pruning.go @@ -3,20 +3,19 @@ package main import ( "context" "fmt" + "io" "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/bbloom" + "github.com/ipfs/go-cid" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/vm" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" - "github.com/filecoin-project/lotus/lib/blockstore" + badgerbs "github.com/filecoin-project/lotus/lib/blockstore/badger" "github.com/filecoin-project/lotus/node/repo" - "github.com/ipfs/bbloom" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/query" - dshelp "github.com/ipfs/go-ipfs-ds-help" - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" ) type cidSet interface { @@ -132,12 +131,25 @@ var stateTreePruneCmd = &cli.Command{ defer lkrepo.Close() //nolint:errcheck - ds, err := lkrepo.Datastore("/chain") + bs, err := lkrepo.Blockstore(repo.BlockstoreChain) if err != nil { - return err + return fmt.Errorf("failed to open blockstore: %w", err) } - defer ds.Close() //nolint:errcheck + defer func() { + if c, ok := bs.(io.Closer); ok { + if err := c.Close(); err != nil { + log.Warnf("failed to close blockstore: %s", err) + } + } + }() + + // After migrating to native blockstores, this has been made + // database-specific. + badgbs, ok := bs.(*badgerbs.Blockstore) + if !ok { + return fmt.Errorf("only badger blockstores are supported") + } mds, err := lkrepo.Datastore("/metadata") if err != nil { @@ -145,24 +157,21 @@ var stateTreePruneCmd = &cli.Command{ } defer mds.Close() //nolint:errcheck + const DiscardRatio = 0.2 if cctx.Bool("only-ds-gc") { - gcds, ok := ds.(datastore.GCDatastore) - if ok { - fmt.Println("running datastore gc....") - for i := 0; i < cctx.Int("gc-count"); i++ { - if err := gcds.CollectGarbage(); err != nil { - return xerrors.Errorf("datastore GC failed: %w", err) - } + fmt.Println("running datastore gc....") + for i := 0; i < cctx.Int("gc-count"); i++ { + if err := badgbs.DB.RunValueLogGC(DiscardRatio); err != nil { + return xerrors.Errorf("datastore GC failed: %w", err) } - fmt.Println("gc complete!") - return nil } - return fmt.Errorf("datastore doesnt support gc") + fmt.Println("gc complete!") + return nil } - bs := blockstore.NewBlockstore(ds) + cs := store.NewChainStore(bs, bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), nil) + defer cs.Close() //nolint:errcheck - cs := store.NewChainStore(bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), nil) if err := cs.Load(); err != nil { return fmt.Errorf("loading chainstore: %w", err) } @@ -199,63 +208,30 @@ var stateTreePruneCmd = &cli.Command{ return nil } - var b datastore.Batch - var batchCount int + b := badgbs.DB.NewWriteBatch() + defer b.Cancel() + markForRemoval := func(c cid.Cid) error { - if b == nil { - nb, err := ds.Batch() - if err != nil { - return fmt.Errorf("opening batch: %w", err) - } - - b = nb - } - batchCount++ - - if err := b.Delete(dshelp.MultihashToDsKey(c.Hash())); err != nil { - return err - } - - if batchCount > 100 { - if err := b.Commit(); err != nil { - return xerrors.Errorf("failed to commit batch deletes: %w", err) - } - b = nil - batchCount = 0 - } - return nil + return b.Delete(badgbs.StorageKey(nil, c)) } - res, err := ds.Query(query.Query{KeysOnly: true}) + keys, err := bs.AllKeysChan(context.Background()) if err != nil { - return xerrors.Errorf("failed to query datastore: %w", err) + return xerrors.Errorf("failed to query blockstore: %w", err) } dupTo := cctx.Int("delete-up-to") var deleteCount int var goodHits int - for { - v, ok := res.NextSync() - if !ok { - break - } - - bk, err := dshelp.BinaryFromDsKey(datastore.RawKey(v.Key[len("/blocks"):])) - if err != nil { - return xerrors.Errorf("failed to parse key: %w", err) - } - - if goodSet.HasRaw(bk) { + for k := range keys { + if goodSet.HasRaw(k.Bytes()) { goodHits++ continue } - nc := cid.NewCidV1(cid.Raw, bk) - - deleteCount++ - if err := markForRemoval(nc); err != nil { - return fmt.Errorf("failed to remove cid %s: %w", nc, err) + if err := markForRemoval(k); err != nil { + return fmt.Errorf("failed to remove cid %s: %w", k, err) } if deleteCount%20 == 0 { @@ -267,22 +243,17 @@ var stateTreePruneCmd = &cli.Command{ } } - if b != nil { - if err := b.Commit(); err != nil { - return xerrors.Errorf("failed to commit final batch delete: %w", err) - } + if err := b.Flush(); err != nil { + return xerrors.Errorf("failed to flush final batch delete: %w", err) } - gcds, ok := ds.(datastore.GCDatastore) - if ok { - fmt.Println("running datastore gc....") - for i := 0; i < cctx.Int("gc-count"); i++ { - if err := gcds.CollectGarbage(); err != nil { - return xerrors.Errorf("datastore GC failed: %w", err) - } + fmt.Println("running datastore gc....") + for i := 0; i < cctx.Int("gc-count"); i++ { + if err := badgbs.DB.RunValueLogGC(DiscardRatio); err != nil { + return xerrors.Errorf("datastore GC failed: %w", err) } - fmt.Println("gc complete!") } + fmt.Println("gc complete!") return nil }, diff --git a/cmd/lotus-shed/rpc.go b/cmd/lotus-shed/rpc.go new file mode 100644 index 000000000..924bd197c --- /dev/null +++ b/cmd/lotus-shed/rpc.go @@ -0,0 +1,136 @@ +package main + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "text/scanner" + + "github.com/chzyer/readline" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/node/repo" +) + +var rpcCmd = &cli.Command{ + Name: "rpc", + Usage: "Interactive JsonPRC shell", + // TODO: flag for miner/worker + Action: func(cctx *cli.Context) error { + addr, headers, err := lcli.GetRawAPI(cctx, repo.FullNode) + if err != nil { + return err + } + + u, err := url.Parse(addr) + if err != nil { + return xerrors.Errorf("parsing api URL: %w", err) + } + + switch u.Scheme { + case "ws": + u.Scheme = "http" + case "wss": + u.Scheme = "https" + } + + addr = u.String() + + ctx := lcli.ReqContext(cctx) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + afmt := lcli.NewAppFmt(cctx.App) + + cs := readline.NewCancelableStdin(afmt.Stdin) + go func() { + <-ctx.Done() + cs.Close() // nolint:errcheck + }() + + cctx.App.Metadata["repoType"] = repo.FullNode + if err := lcli.VersionCmd.Action(cctx); err != nil { + return err + } + fmt.Println("Usage: > Method [Param1, Param2, ...]") + + rl, err := readline.NewEx(&readline.Config{ + Stdin: cs, + HistoryFile: "/tmp/lotusrpc.tmp", + Prompt: "> ", + EOFPrompt: "exit", + HistorySearchFold: true, + + // TODO: Some basic auto completion + }) + if err != nil { + return err + } + + for { + line, err := rl.Readline() + if err == readline.ErrInterrupt { + if len(line) == 0 { + break + } else { + continue + } + } else if err == io.EOF { + break + } + + var s scanner.Scanner + s.Init(strings.NewReader(line)) + s.Scan() + method := s.TokenText() + + s.Scan() + params := line[s.Position.Offset:] + + jreq, err := json.Marshal(struct { + Jsonrpc string `json:"jsonrpc"` + ID int `json:"id"` + Method string `json:"method"` + Params json.RawMessage `json:"params"` + }{ + Jsonrpc: "2.0", + Method: "Filecoin." + method, + Params: json.RawMessage(params), + ID: 0, + }) + if err != nil { + return err + } + + req, err := http.NewRequest("POST", addr, bytes.NewReader(jreq)) + if err != nil { + return err + } + req.Header = headers + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + + fmt.Println(string(rb)) + + if err := resp.Body.Close(); err != nil { + return err + } + } + + return nil + }, +} diff --git a/cmd/lotus-shed/sectors.go b/cmd/lotus-shed/sectors.go index 2e78469fa..64f3faf79 100644 --- a/cmd/lotus-shed/sectors.go +++ b/cmd/lotus-shed/sectors.go @@ -25,6 +25,7 @@ var sectorsCmd = &cli.Command{ Flags: []cli.Flag{}, Subcommands: []*cli.Command{ terminateSectorCmd, + terminateSectorPenaltyEstimationCmd, }, } @@ -131,3 +132,101 @@ var terminateSectorCmd = &cli.Command{ return nil }, } + +func findPenaltyInInternalExecutions(prefix string, trace []types.ExecutionTrace) { + for _, im := range trace { + if im.Msg.To.String() == "f099" /*Burn actor*/ { + fmt.Printf("Estimated termination penalty: %s attoFIL\n", im.Msg.Value) + return + } + findPenaltyInInternalExecutions(prefix+"\t", im.Subcalls) + } +} + +var terminateSectorPenaltyEstimationCmd = &cli.Command{ + Name: "termination-estimate", + Usage: "Estimate the termination penalty", + ArgsUsage: "[sectorNum1 sectorNum2 ...]", + Action: func(cctx *cli.Context) error { + if cctx.Args().Len() < 1 { + return fmt.Errorf("at least one sector must be specified") + } + + nodeApi, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + api, acloser, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer acloser() + + ctx := lcli.ReqContext(cctx) + + maddr, err := api.ActorAddress(ctx) + if err != nil { + return err + } + + mi, err := nodeApi.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + terminationDeclarationParams := []miner2.TerminationDeclaration{} + + for _, sn := range cctx.Args().Slice() { + sectorNum, err := strconv.ParseUint(sn, 10, 64) + if err != nil { + return fmt.Errorf("could not parse sector number: %w", err) + } + + sectorbit := bitfield.New() + sectorbit.Set(sectorNum) + + loca, err := nodeApi.StateSectorPartition(ctx, maddr, abi.SectorNumber(sectorNum), types.EmptyTSK) + if err != nil { + return fmt.Errorf("get state sector partition %s", err) + } + + para := miner2.TerminationDeclaration{ + Deadline: loca.Deadline, + Partition: loca.Partition, + Sectors: sectorbit, + } + + terminationDeclarationParams = append(terminationDeclarationParams, para) + } + + terminateSectorParams := &miner2.TerminateSectorsParams{ + Terminations: terminationDeclarationParams, + } + + sp, err := actors.SerializeParams(terminateSectorParams) + if err != nil { + return xerrors.Errorf("serializing params: %w", err) + } + + msg := &types.Message{ + From: mi.Owner, + To: maddr, + Method: miner.Methods.TerminateSectors, + + Value: big.Zero(), + Params: sp, + } + + //TODO: 4667 add an option to give a more precise estimation with pending termination penalty excluded + + invocResult, err := nodeApi.StateCall(ctx, msg, types.TipSetKey{}) + if err != nil { + return xerrors.Errorf("fail to state call: %w", err) + } + + findPenaltyInInternalExecutions("\t", invocResult.ExecutionTrace.Subcalls) + return nil + }, +} diff --git a/cmd/lotus-stats/chain.dashboard.json b/cmd/lotus-stats/chain.dashboard.json index 5ff7654d0..8083c96b1 100644 --- a/cmd/lotus-stats/chain.dashboard.json +++ b/cmd/lotus-stats/chain.dashboard.json @@ -1,20 +1,11 @@ { - "__inputs": [ - { - "name": "DS_INFLUXDB", - "label": "InfluxDB", - "description": "", - "type": "datasource", - "pluginId": "influxdb", - "pluginName": "InfluxDB" - } - ], + "__inputs": [], "__requires": [ { "type": "grafana", "id": "grafana", "name": "Grafana", - "version": "6.5.0-pre" + "version": "7.3.0" }, { "type": "panel", @@ -36,8 +27,8 @@ }, { "type": "panel", - "id": "table", - "name": "Table", + "id": "table-old", + "name": "Table (old)", "version": "" } ], @@ -58,6 +49,7 @@ "gnetId": null, "graphTooltip": 0, "id": null, + "iteration": 1604018016916, "links": [], "panels": [ { @@ -65,8 +57,15 @@ "bars": true, "dashLength": 10, "dashes": false, - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", "decimals": 2, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 3, "fillGradient": 0, "gridPos": { @@ -75,6 +74,7 @@ "x": 0, "y": 0 }, + "hiddenSeries": false, "hideTimeOverride": false, "id": 38, "interval": "", @@ -93,15 +93,25 @@ }, "lines": false, "linewidth": 1, - "nullPointMode": "null", + "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.0", "pointradius": 2, "points": false, "renderer": "flot", - "seriesOverrides": [], + "seriesOverrides": [ + { + "alias": "all", + "bars": false, + "color": "rgb(99, 99, 99)", + "fill": 1, + "lines": true, + "stack": false + } + ], "spaceLength": 10, "stack": true, "steppedLine": false, @@ -128,10 +138,11 @@ "type": "fill" } ], + "hide": false, "measurement": "chain.election", "orderByTime": "ASC", "policy": "default", - "query": "SELECT count(\"value\") FROM \"chain.election\" WHERE $timeFilter -10m GROUP BY time($__interval), \"miner\" fill(null)", + "query": "SELECT sum(\"value\") FROM \"chain.election\" WHERE $timeFilter GROUP BY time($blockInterval), \"miner\" fill(null)", "rawQuery": true, "refId": "A", "resultFormat": "time_series", @@ -156,13 +167,52 @@ ] ], "tags": [] + }, + { + "alias": "all", + "groupBy": [ + { + "params": [ + "$__interval" + ], + "type": "time" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "hide": false, + "orderByTime": "ASC", + "policy": "defult", + "query": "SELECT TRIPLE_EXPONENTIAL_MOVING_AVERAGE(sum(\"value\"), 40) FROM \"chain.election\" WHERE $timeFilter -$blockInterval*40 AND time < now() - $blockInterval*3 GROUP BY time($blockInterval) fill(0)", + "rawQuery": true, + "refId": "B", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "mean" + } + ] + ], + "tags": [] } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Blocks Won", + "title": "Blocks and Win Counts", "tooltip": { "shared": true, "sort": 2, @@ -207,7 +257,14 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -216,6 +273,7 @@ "x": 0, "y": 9 }, + "hiddenSeries": false, "id": 22, "interval": "", "legend": { @@ -232,9 +290,10 @@ "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.0", "pointradius": 2, "points": false, "renderer": "flot", @@ -318,7 +377,13 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "format": "s", "gauge": { "maxValue": 100, @@ -350,7 +415,6 @@ "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, - "options": {}, "postfix": "", "postfixFontSize": "50%", "prefix": "", @@ -422,7 +486,13 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "format": "bytes", "gauge": { "maxValue": 100, @@ -454,7 +524,6 @@ "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, - "options": {}, "postfix": "", "postfixFontSize": "50%", "prefix": "", @@ -493,7 +562,7 @@ ], "orderByTime": "ASC", "policy": "default", - "query": "SELECT sum(\"value\") FROM \"chain.miner_power\" WHERE $timeFilter GROUP BY time(45s)", + "query": "SELECT sum(\"value\") FROM \"chain.power\" WHERE $timeFilter GROUP BY time(25s)", "rawQuery": true, "refId": "A", "resultFormat": "time_series", @@ -538,7 +607,13 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "format": "none", "gauge": { "maxValue": 100, @@ -570,7 +645,6 @@ "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, - "options": {}, "postfix": "", "postfixFontSize": "50%", "prefix": "", @@ -596,7 +670,7 @@ "groupBy": [ { "params": [ - "$interval" + "$blockInterval" ], "type": "time" } @@ -616,7 +690,7 @@ }, { "params": [], - "type": "sum" + "type": "count" } ] ], @@ -648,7 +722,13 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "format": "none", "gauge": { "maxValue": 100, @@ -680,7 +760,6 @@ "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, - "options": {}, "postfix": "", "postfixFontSize": "50%", "prefix": "", @@ -746,7 +825,13 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "format": "s", "gauge": { "maxValue": 100, @@ -778,7 +863,6 @@ "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, - "options": {}, "postfix": "", "postfixFontSize": "50%", "prefix": "", @@ -848,7 +932,13 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "format": "none", "gauge": { "maxValue": 100, @@ -880,7 +970,6 @@ "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, - "options": {}, "postfix": "", "postfixFontSize": "50%", "prefix": "", @@ -906,7 +995,7 @@ "groupBy": [ { "params": [ - "$__interval" + "$blockInterval" ], "type": "time" }, @@ -917,7 +1006,7 @@ "type": "fill" } ], - "measurement": "chain.message_gasprice", + "measurement": "chain.message_gaspremium", "orderByTime": "ASC", "policy": "default", "refId": "A", @@ -932,7 +1021,7 @@ }, { "params": [], - "type": "mean" + "type": "median" } ] ], @@ -942,7 +1031,7 @@ "thresholds": "", "timeFrom": null, "timeShift": null, - "title": "Avg Gas Price", + "title": "Avg Gas Premium", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -963,7 +1052,13 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "format": "decbytes", "gauge": { "maxValue": 100, @@ -995,7 +1090,6 @@ "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, - "options": {}, "postfix": "", "postfixFontSize": "50%", "prefix": "", @@ -1021,7 +1115,7 @@ "groupBy": [ { "params": [ - "$__interval" + "$blockInterval" ], "type": "time" }, @@ -1078,7 +1172,13 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "format": "bytes", "gauge": { "maxValue": 100, @@ -1110,7 +1210,6 @@ "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, - "options": {}, "postfix": "", "postfixFontSize": "50%", "prefix": "", @@ -1136,7 +1235,7 @@ "groupBy": [ { "params": [ - "$__interval" + "$blockInterval" ], "type": "time" }, @@ -1193,7 +1292,13 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "format": "none", "gauge": { "maxValue": 100, @@ -1225,7 +1330,6 @@ "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, - "options": {}, "pluginVersion": "6.4.2", "postfix": "", "postfixFontSize": "50%", @@ -1252,7 +1356,7 @@ "groupBy": [ { "params": [ - "$__interval" + "$blockInterval" ], "type": "time" }, @@ -1311,8 +1415,14 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", "decimals": 0, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "format": "dateTimeFromNow", "gauge": { "maxValue": 100, @@ -1344,7 +1454,6 @@ "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, - "options": {}, "postfix": "", "postfixFontSize": "50%", "prefix": "", @@ -1413,7 +1522,14 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -1422,6 +1538,7 @@ "x": 4, "y": 16 }, + "hiddenSeries": false, "id": 2, "legend": { "alignAsTable": true, @@ -1441,9 +1558,10 @@ "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.0", "pointradius": 2, "points": false, "renderer": "flot", @@ -1569,7 +1687,13 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "format": "none", "gauge": { "maxValue": 100, @@ -1601,7 +1725,6 @@ "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, - "options": {}, "postfix": "FIL", "postfixFontSize": "50%", "prefix": "", @@ -1660,7 +1783,13 @@ }, { "columns": [], - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "fontSize": "100%", "gridPos": { "h": 21, @@ -1669,7 +1798,6 @@ "y": 19 }, "id": 28, - "options": {}, "pageSize": null, "showHeader": true, "sort": { @@ -1679,12 +1807,14 @@ "styles": [ { "alias": "Time", + "align": "auto", "dateFormat": "YYYY-MM-DD HH:mm:ss", "pattern": "Time", "type": "hidden" }, { "alias": "", + "align": "auto", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -1701,6 +1831,7 @@ }, { "alias": "", + "align": "auto", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -1741,7 +1872,7 @@ "timeShift": null, "title": "Top Power Table", "transform": "table", - "type": "table" + "type": "table-old" }, { "aliasColors": {}, @@ -1749,7 +1880,14 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 5, "fillGradient": 0, "gridPos": { @@ -1758,8 +1896,9 @@ "x": 4, "y": 19 }, + "hiddenSeries": false, "id": 40, - "interval": "", + "interval": "300s", "legend": { "alignAsTable": true, "avg": false, @@ -1778,11 +1917,12 @@ "lines": true, "linewidth": 1, "links": [], - "nullPointMode": "null", + "nullPointMode": "connected", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": true, + "pluginVersion": "7.3.0", "pointradius": 2, "points": false, "renderer": "flot", @@ -1817,7 +1957,7 @@ "measurement": "chain.miner_power", "orderByTime": "ASC", "policy": "default", - "query": "SELECT mean(\"value\") FROM \"chain.miner_power\" WHERE $timeFilter GROUP BY time($__interval), \"miner\" fill(previous)", + "query": "SELECT mean(\"value\") FROM \"chain.miner_power\" WHERE $timeFilter GROUP BY time($__interval), \"miner\" fill(null)", "rawQuery": true, "refId": "A", "resultFormat": "time_series", @@ -1885,7 +2025,13 @@ }, { "columns": [], - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "fontSize": "100%", "gridPos": { "h": 21, @@ -1894,7 +2040,6 @@ "y": 19 }, "id": 18, - "options": {}, "pageSize": null, "showHeader": true, "sort": { @@ -1904,6 +2049,7 @@ "styles": [ { "alias": "Height", + "align": "auto", "dateFormat": "YYYY-MM-DD HH:mm:ss", "link": false, "mappingType": 1, @@ -1914,6 +2060,7 @@ }, { "alias": "Tipset", + "align": "auto", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -1930,6 +2077,7 @@ }, { "alias": "", + "align": "auto", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -1973,74 +2121,77 @@ "timeShift": null, "title": "Chain Table", "transform": "timeseries_to_columns", - "type": "table" + "type": "table-old" }, { "aliasColors": {}, "bars": false, - "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { - "h": 6, + "h": 7, "w": 12, "x": 4, "y": 27 }, - "id": 24, + "hiddenSeries": false, + "id": 50, "legend": { - "alignAsTable": false, "avg": false, "current": false, "max": false, "min": false, - "rightSide": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, - "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.0", "pointradius": 2, "points": false, "renderer": "flot", - "seriesOverrides": [ - { - "alias": "/.*/", - "color": "rgb(31, 120, 193)" - } - ], + "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { + "alias": "Total GasLimit", "groupBy": [ { "params": [ - "$__interval" + "$blockInterval" ], "type": "time" }, { "params": [ - "previous" + "null" ], "type": "fill" } ], - "measurement": "chain.pledge_collateral", + "measurement": "chain.gas_limit_total", "orderByTime": "ASC", "policy": "default", + "query": "SELECT max(\"value\") FROM \"chain.gas_limit_total\" WHERE $timeFilter GROUP BY time($blockInterval) fill(null)", + "rawQuery": false, "refId": "A", "resultFormat": "time_series", "select": [ @@ -2053,18 +2204,107 @@ }, { "params": [], - "type": "mean" + "type": "max" + } + ] + ], + "tags": [] + }, + { + "alias": "Total GasUsed", + "groupBy": [ + { + "params": [ + "$blockInterval" + ], + "type": "time" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "measurement": "chain.gas_used_total", + "orderByTime": "ASC", + "policy": "default", + "query": "SELECT max(\"value\") FROM \"chain.gas_used_total\" WHERE $timeFilter GROUP BY time($blockInterval) fill(null)", + "rawQuery": false, + "refId": "B", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "max" + } + ] + ], + "tags": [] + }, + { + "alias": "Total Unique GasLimit", + "groupBy": [ + { + "params": [ + "$blockInterval" + ], + "type": "time" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "measurement": "chain.gas_limit_uniq_total", + "orderByTime": "ASC", + "policy": "default", + "query": "SELECT max(\"value\") FROM \"chain.gas_limit_total\" WHERE $timeFilter GROUP BY time($blockInterval) fill(null)", + "rawQuery": false, + "refId": "C", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "max" } ] ], "tags": [] } ], - "thresholds": [], + "thresholds": [ + { + "colorMode": "custom", + "fill": false, + "fillColor": "rgba(50, 116, 217, 0.2)", + "line": true, + "lineColor": "rgba(31, 96, 196, 0.6)", + "op": "gt", + "value": 25000000000, + "yaxis": "left" + } + ], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Pledge Collateral", + "title": "Network Gas", "tooltip": { "shared": true, "sort": 0, @@ -2081,7 +2321,7 @@ "yaxes": [ { "format": "short", - "label": "FIL", + "label": null, "logBase": 1, "max": null, "min": null, @@ -2107,15 +2347,23 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { - "h": 7, + "h": 6, "w": 12, "x": 4, - "y": 33 + "y": 34 }, + "hiddenSeries": false, "id": 44, "legend": { "avg": false, @@ -2131,9 +2379,10 @@ "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.0", "pointradius": 2, "points": false, "renderer": "flot", @@ -2146,7 +2395,7 @@ "groupBy": [ { "params": [ - "$__interval" + "$blockInterval" ], "type": "time" }, @@ -2228,7 +2477,14 @@ "bars": true, "dashLength": 10, "dashes": false, - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -2237,6 +2493,7 @@ "x": 0, "y": 40 }, + "hiddenSeries": false, "id": 34, "legend": { "alignAsTable": true, @@ -2251,11 +2508,12 @@ }, "lines": false, "linewidth": 1, - "nullPointMode": "null", + "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.0", "pointradius": 2, "points": false, "renderer": "flot", @@ -2269,7 +2527,7 @@ "groupBy": [ { "params": [ - "$__interval" + "$blockInterval" ], "type": "time" }, @@ -2360,7 +2618,14 @@ "bars": true, "dashLength": 10, "dashes": false, - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -2369,6 +2634,7 @@ "x": 12, "y": 40 }, + "hiddenSeries": false, "id": 36, "legend": { "alignAsTable": true, @@ -2387,11 +2653,12 @@ }, "lines": false, "linewidth": 1, - "nullPointMode": "null", + "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.0", "pointradius": 2, "points": false, "renderer": "flot", @@ -2437,7 +2704,7 @@ "measurement": "chain.message_count", "orderByTime": "ASC", "policy": "default", - "query": "SELECT sum(\"value\") FROM \"chain.message_count\" WHERE $timeFilter GROUP BY time($__interval), \"method\", \"exitcode\", \"actor\" fill(null)", + "query": "SELECT sum(\"value\") FROM \"chain.message_count\" WHERE $timeFilter GROUP BY time($blockInterval), \"method\", \"exitcode\", \"actor\" fill(null)", "rawQuery": true, "refId": "A", "resultFormat": "time_series", @@ -2498,14 +2765,701 @@ "align": false, "alignLevel": null } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 49 + }, + "hiddenSeries": false, + "id": 48, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.3.0", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "Transfer Fee", + "groupBy": [ + { + "params": [ + "$blockInterval" + ], + "type": "time" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "measurement": "chain.basefee", + "orderByTime": "ASC", + "policy": "default", + "query": "SELECT mean(\"value\")*1000000 FROM \"chain.basefee\" WHERE $timeFilter GROUP BY time($blockInterval) fill(null)", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "mean" + } + ] + ], + "tags": [] + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Cost of simple transfer [FIL]", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 2, + "format": "sci", + "label": "", + "logBase": 10, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 49 + }, + "hiddenSeries": false, + "id": 46, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.3.0", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "Transfer Fee", + "groupBy": [ + { + "params": [ + "$blockInterval" + ], + "type": "time" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "measurement": "chain.basefee", + "orderByTime": "ASC", + "policy": "default", + "query": "SELECT mean(\"value\") FROM \"chain.basefee\" WHERE $timeFilter GROUP BY time($blockInterval) fill(null)", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "mean" + } + ] + ], + "tags": [] + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Base Fee[FIL]", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$network", + "decimals": null, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 57 + }, + "hiddenSeries": false, + "id": 51, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": false, + "max": true, + "min": true, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.3.0", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "Precommit Transfer Fee", + "groupBy": [ + { + "params": [ + "$blockInterval" + ], + "type": "time" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "measurement": "chain.basefee", + "orderByTime": "ASC", + "policy": "default", + "query": "SELECT mean(\"value\")*24000000 FROM \"chain.basefee\" WHERE $timeFilter GROUP BY time($blockInterval) fill(null)", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "mean" + } + ] + ], + "tags": [] + }, + { + "alias": "Commit Transfer Fee", + "groupBy": [ + { + "params": [ + "$blockInterval" + ], + "type": "time" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "measurement": "chain.basefee", + "orderByTime": "ASC", + "policy": "default", + "query": "SELECT mean(\"value\")*56000000 FROM \"chain.basefee\" WHERE $timeFilter GROUP BY time($blockInterval) fill(null)", + "rawQuery": true, + "refId": "B", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "mean" + } + ] + ], + "tags": [] + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Message Gas fees [FIL]", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 2, + "format": "none", + "label": null, + "logBase": 10, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 57 + }, + "hiddenSeries": false, + "id": 52, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.3.0", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "10 PIB PoSt Fee", + "groupBy": [ + { + "params": [ + "$blockInterval" + ], + "type": "time" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "measurement": "chain.basefee", + "orderByTime": "ASC", + "policy": "default", + "query": "SELECT mean(\"value\")*940000000 FROM \"chain.basefee\" WHERE $timeFilter GROUP BY time($blockInterval) fill(null)", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "mean" + } + ] + ], + "tags": [] + }, + { + "alias": "750TiB miner PoSt Fee", + "groupBy": [ + { + "params": [ + "$blockInterval" + ], + "type": "time" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "measurement": "chain.basefee", + "orderByTime": "ASC", + "policy": "default", + "query": "SELECT mean(\"value\")*580000000 FROM \"chain.basefee\" WHERE $timeFilter GROUP BY time($blockInterval) fill(null)", + "rawQuery": true, + "refId": "B", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "mean" + } + ] + ], + "tags": [] + }, + { + "alias": "10TiB miner PoSt Fee", + "groupBy": [ + { + "params": [ + "$blockInterval" + ], + "type": "time" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "measurement": "chain.basefee", + "orderByTime": "ASC", + "policy": "default", + "query": "SELECT mean(\"value\")*380000000 FROM \"chain.basefee\" WHERE $timeFilter GROUP BY time($blockInterval) fill(null)", + "rawQuery": true, + "refId": "C", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "mean" + } + ] + ], + "tags": [] + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Message Gas fees [FIL]", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 2, + "format": "none", + "label": null, + "logBase": 10, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } } ], - "refresh": "45s", - "schemaVersion": 20, + "refresh": false, + "schemaVersion": 26, "style": "dark", "tags": [], "templating": { - "list": [] + "list": [ + { + "current": { + "selected": false, + "text": "filecoin-ntwk-testnet", + "value": "filecoin-ntwk-testnet" + }, + "error": null, + "hide": 0, + "includeAll": false, + "label": "Network", + "multi": false, + "name": "network", + "options": [], + "query": "influxdb", + "queryValue": "", + "refresh": 1, + "regex": "/^filecoin-ntwk-/", + "skipUrlSync": false, + "type": "datasource" + }, + { + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "selected": false, + "text": "30s", + "value": "30s" + }, + "error": null, + "hide": 2, + "label": null, + "name": "blockInterval", + "options": [ + { + "selected": true, + "text": "30s", + "value": "30s" + } + ], + "query": "30s", + "refresh": 2, + "skipUrlSync": false, + "type": "interval" + } + ] }, "time": { "from": "now-30m", @@ -2515,6 +3469,7 @@ "refresh_intervals": [ "5s", "10s", + "25s", "30s", "45s", "1m", @@ -2527,7 +3482,7 @@ ] }, "timezone": "", - "title": "Chain", + "title": "Filecoin Chain Stats", "uid": "z6FtI92Zz", - "version": 9 + "version": 4 } diff --git a/cmd/lotus-stats/docker-compose.yml b/cmd/lotus-stats/docker-compose.yml index 03d573b94..b08a2157e 100644 --- a/cmd/lotus-stats/docker-compose.yml +++ b/cmd/lotus-stats/docker-compose.yml @@ -4,10 +4,10 @@ services: influxdb: image: influxdb:latest container_name: influxdb + ports: + - "18086:8086" environment: - INFLUXDB_DB=lotus - ports: - - "8086:8086" volumes: - influxdb:/var/lib/influxdb @@ -15,7 +15,7 @@ services: image: grafana/grafana:latest container_name: grafana ports: - - "3000:3000" + - "13000:3000" links: - influxdb volumes: diff --git a/cmd/lotus-stats/env.stats b/cmd/lotus-stats/env.stats index a76e7554a..ad5ec1619 100644 --- a/cmd/lotus-stats/env.stats +++ b/cmd/lotus-stats/env.stats @@ -1,3 +1,3 @@ -export INFLUX_ADDR="http://localhost:8086" +export INFLUX_ADDR="http://localhost:18086" export INFLUX_USER="" export INFLUX_PASS="" diff --git a/cmd/lotus-stats/main.go b/cmd/lotus-stats/main.go index 3ca139b7d..b4c13ea8c 100644 --- a/cmd/lotus-stats/main.go +++ b/cmd/lotus-stats/main.go @@ -2,71 +2,160 @@ package main import ( "context" - "flag" "os" + "github.com/filecoin-project/lotus/build" + lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/tools/stats" + logging "github.com/ipfs/go-log/v2" + "github.com/urfave/cli/v2" ) var log = logging.Logger("stats") -const ( - influxAddrEnvVar = "INFLUX_ADDR" - influxUserEnvVar = "INFLUX_USER" - influxPassEnvVar = "INFLUX_PASS" -) - func main() { - var repo string = "~/.lotus" - var database string = "lotus" - var reset bool = false - var nosync bool = false - var height int64 = 0 - var headlag int = 3 - - flag.StringVar(&repo, "repo", repo, "lotus repo path") - flag.StringVar(&database, "database", database, "influx database") - flag.Int64Var(&height, "height", height, "block height to start syncing from (0 will resume)") - flag.IntVar(&headlag, "head-lag", headlag, "number of head events to hold to protect against small reorgs") - flag.BoolVar(&reset, "reset", reset, "truncate database before starting stats gathering") - flag.BoolVar(&nosync, "nosync", nosync, "skip waiting for sync") - - flag.Parse() - - ctx := context.Background() - - influx, err := stats.InfluxClient(os.Getenv(influxAddrEnvVar), os.Getenv(influxUserEnvVar), os.Getenv(influxPassEnvVar)) - if err != nil { - log.Fatal(err) + local := []*cli.Command{ + runCmd, + versionCmd, } - if reset { - if err := stats.ResetDatabase(influx, database); err != nil { - log.Fatal(err) - } + app := &cli.App{ + Name: "lotus-stats", + Usage: "Collect basic information about a filecoin network using lotus", + Version: build.UserVersion(), + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "lotus-path", + EnvVars: []string{"LOTUS_PATH"}, + Value: "~/.lotus", // TODO: Consider XDG_DATA_HOME + }, + &cli.StringFlag{ + Name: "log-level", + EnvVars: []string{"LOTUS_STATS_LOG_LEVEL"}, + Value: "info", + }, + }, + Before: func(cctx *cli.Context) error { + return logging.SetLogLevel("stats", cctx.String("log-level")) + }, + Commands: local, } - if !reset && height == 0 { - h, err := stats.GetLastRecordedHeight(influx, database) - if err != nil { - log.Info(err) - } - - height = h + if err := app.Run(os.Args); err != nil { + log.Errorw("exit in error", "err", err) + os.Exit(1) + return } - - api, closer, err := stats.GetFullNodeAPI(ctx, repo) - if err != nil { - log.Fatal(err) - } - defer closer() - - if !nosync { - if err := stats.WaitForSyncComplete(ctx, api); err != nil { - log.Fatal(err) - } - } - - stats.Collect(ctx, api, influx, database, height, headlag) +} + +var versionCmd = &cli.Command{ + Name: "version", + Usage: "Print version", + Action: func(cctx *cli.Context) error { + cli.VersionPrinter(cctx) + return nil + }, +} + +var runCmd = &cli.Command{ + Name: "run", + Usage: "", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "influx-database", + EnvVars: []string{"LOTUS_STATS_INFLUX_DATABASE"}, + Usage: "influx database", + Value: "", + }, + &cli.StringFlag{ + Name: "influx-hostname", + EnvVars: []string{"LOTUS_STATS_INFLUX_HOSTNAME"}, + Value: "http://localhost:8086", + Usage: "influx hostname", + }, + &cli.StringFlag{ + Name: "influx-username", + EnvVars: []string{"LOTUS_STATS_INFLUX_USERNAME"}, + Usage: "influx username", + Value: "", + }, + &cli.StringFlag{ + Name: "influx-password", + EnvVars: []string{"LOTUS_STATS_INFLUX_PASSWORD"}, + Usage: "influx password", + Value: "", + }, + &cli.IntFlag{ + Name: "height", + EnvVars: []string{"LOTUS_STATS_HEIGHT"}, + Usage: "tipset height to start processing from", + Value: 0, + }, + &cli.IntFlag{ + Name: "head-lag", + EnvVars: []string{"LOTUS_STATS_HEAD_LAG"}, + Usage: "the number of tipsets to delay processing on to smooth chain reorgs", + Value: int(build.MessageConfidence), + }, + &cli.BoolFlag{ + Name: "no-sync", + EnvVars: []string{"LOTUS_STATS_NO_SYNC"}, + Usage: "do not wait for chain sync to complete", + Value: false, + }, + }, + Action: func(cctx *cli.Context) error { + ctx := context.Background() + + resetFlag := cctx.Bool("reset") + noSyncFlag := cctx.Bool("no-sync") + heightFlag := cctx.Int("height") + headLagFlag := cctx.Int("head-lag") + + influxHostnameFlag := cctx.String("influx-hostname") + influxUsernameFlag := cctx.String("influx-username") + influxPasswordFlag := cctx.String("influx-password") + influxDatabaseFlag := cctx.String("influx-database") + + log.Infow("opening influx client", "hostname", influxHostnameFlag, "username", influxUsernameFlag, "database", influxDatabaseFlag) + + influx, err := stats.InfluxClient(influxHostnameFlag, influxUsernameFlag, influxPasswordFlag) + if err != nil { + log.Fatal(err) + } + + if resetFlag { + if err := stats.ResetDatabase(influx, influxDatabaseFlag); err != nil { + log.Fatal(err) + } + } + + height := int64(heightFlag) + + if !resetFlag && height == 0 { + h, err := stats.GetLastRecordedHeight(influx, influxDatabaseFlag) + if err != nil { + log.Info(err) + } + + height = h + } + + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + if !noSyncFlag { + if err := stats.WaitForSyncComplete(ctx, api); err != nil { + log.Fatal(err) + } + } + + stats.Collect(ctx, api, influx, influxDatabaseFlag, height, headLagFlag) + + return nil + }, } diff --git a/cmd/lotus-stats/setup.bash b/cmd/lotus-stats/setup.bash index e2812b93a..6510c2fc6 100755 --- a/cmd/lotus-stats/setup.bash +++ b/cmd/lotus-stats/setup.bash @@ -1,10 +1,10 @@ #!/usr/bin/env bash -GRAFANA_HOST="localhost:3000" +GRAFANA_HOST="http://localhost:13000" curl -s -XPOST http://admin:admin@$GRAFANA_HOST/api/datasources -H 'Content-Type: text/json' --data-binary @- > /dev/null << EOF { - "name":"InfluxDB", + "name":"filecoin-ntwk-localstats", "type":"influxdb", "database":"lotus", "url": "http://influxdb:8086", diff --git a/cmd/lotus-storage-miner/actor_test.go b/cmd/lotus-storage-miner/actor_test.go index 949171699..2aea6bda9 100644 --- a/cmd/lotus-storage-miner/actor_test.go +++ b/cmd/lotus-storage-miner/actor_test.go @@ -50,7 +50,7 @@ func TestWorkerKeyChange(t *testing.T) { blocktime := 1 * time.Millisecond - n, sn := builder.MockSbBuilder(t, []test.FullNodeOpts{test.FullNodeWithUpgradeAt(1), test.FullNodeWithUpgradeAt(1)}, test.OneMiner) + n, sn := builder.MockSbBuilder(t, []test.FullNodeOpts{test.FullNodeWithActorsV2At(1), test.FullNodeWithActorsV2At(1)}, test.OneMiner) client1 := n[0] client2 := n[1] diff --git a/cmd/lotus-storage-miner/init.go b/cmd/lotus-storage-miner/init.go index a7fcd722a..008b2ea15 100644 --- a/cmd/lotus-storage-miner/init.go +++ b/cmd/lotus-storage-miner/init.go @@ -433,11 +433,6 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api lapi.FullNode, return err } - spt, err := ffiwrapper.SealProofTypeFromSectorSize(ssize) - if err != nil { - return err - } - mid, err := address.IDFromAddress(a) if err != nil { return xerrors.Errorf("getting id address: %w", err) @@ -451,9 +446,7 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api lapi.FullNode, wsts := statestore.New(namespace.Wrap(mds, modules.WorkerCallsPrefix)) smsts := statestore.New(namespace.Wrap(mds, modules.ManagerWorkPrefix)) - smgr, err := sectorstorage.New(ctx, lr, stores.NewIndex(), &ffiwrapper.Config{ - SealProofType: spt, - }, sectorstorage.SealerConfig{ + smgr, err := sectorstorage.New(ctx, lr, stores.NewIndex(), sectorstorage.SealerConfig{ ParallelFetchLimit: 10, AllowAddPiece: true, AllowPreCommit1: true, @@ -657,9 +650,14 @@ func createStorageMiner(ctx context.Context, api lapi.FullNode, peerid peer.ID, } } - spt, err := ffiwrapper.SealProofTypeFromSectorSize(abi.SectorSize(ssize)) + nv, err := api.StateNetworkVersion(ctx, types.EmptyTSK) if err != nil { - return address.Undef, err + return address.Undef, xerrors.Errorf("getting network version: %w", err) + } + + spt, err := miner.SealProofTypeFromSectorSize(abi.SectorSize(ssize), nv) + if err != nil { + return address.Undef, xerrors.Errorf("getting seal proof type: %w", err) } params, err := actors.SerializeParams(&power2.CreateMinerParams{ diff --git a/cmd/lotus-storage-miner/sealing.go b/cmd/lotus-storage-miner/sealing.go index 49003fc26..ad890129d 100644 --- a/cmd/lotus-storage-miner/sealing.go +++ b/cmd/lotus-storage-miner/sealing.go @@ -28,6 +28,7 @@ var sealingCmd = &cli.Command{ sealingJobsCmd, sealingWorkersCmd, sealingSchedDiagCmd, + sealingAbortCmd, }, } @@ -124,9 +125,13 @@ var sealingWorkersCmd = &cli.Command{ var sealingJobsCmd = &cli.Command{ Name: "jobs", - Usage: "list workers", + Usage: "list running jobs", Flags: []cli.Flag{ &cli.BoolFlag{Name: "color"}, + &cli.BoolFlag{ + Name: "show-ret-done", + Usage: "show returned but not consumed calls", + }, }, Action: func(cctx *cli.Context) error { color.NoColor = !cctx.Bool("color") @@ -187,10 +192,17 @@ var sealingJobsCmd = &cli.Command{ for _, l := range lines { state := "running" - if l.RunWait > 0 { + switch { + case l.RunWait > 0: state = fmt.Sprintf("assigned(%d)", l.RunWait-1) - } - if l.RunWait == -1 { + case l.RunWait == storiface.RWRetDone: + if !cctx.Bool("show-ret-done") { + continue + } + state = "ret-done" + case l.RunWait == storiface.RWReturned: + state = "returned" + case l.RunWait == storiface.RWRetWait: state = "ret-wait" } dur := "n/a" @@ -198,11 +210,16 @@ var sealingJobsCmd = &cli.Command{ dur = time.Now().Sub(l.Start).Truncate(time.Millisecond * 100).String() } + hostname, ok := workerHostnames[l.wid] + if !ok { + hostname = l.Hostname + } + _, _ = fmt.Fprintf(tw, "%s\t%d\t%s\t%s\t%s\t%s\t%s\n", - hex.EncodeToString(l.ID.ID[10:]), + hex.EncodeToString(l.ID.ID[:4]), l.Sector.Number, - hex.EncodeToString(l.wid[5:]), - workerHostnames[l.wid], + hex.EncodeToString(l.wid[:4]), + hostname, l.Task.Short(), state, dur) @@ -244,3 +261,47 @@ var sealingSchedDiagCmd = &cli.Command{ return nil }, } + +var sealingAbortCmd = &cli.Command{ + Name: "abort", + Usage: "Abort a running job", + ArgsUsage: "[callid]", + Action: func(cctx *cli.Context) error { + if cctx.Args().Len() != 1 { + return xerrors.Errorf("expected 1 argument") + } + + nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + + ctx := lcli.ReqContext(cctx) + + jobs, err := nodeApi.WorkerJobs(ctx) + if err != nil { + return xerrors.Errorf("getting worker jobs: %w", err) + } + + var job *storiface.WorkerJob + outer: + for _, workerJobs := range jobs { + for _, j := range workerJobs { + if strings.HasPrefix(j.ID.ID.String(), cctx.Args().First()) { + j := j + job = &j + break outer + } + } + } + + if job == nil { + return xerrors.Errorf("job with specified id prefix not found") + } + + fmt.Printf("aborting job %s, task %s, sector %d, running on host %s\n", job.ID.String(), job.Task.Short(), job.Sector.Number, job.Hostname) + + return nodeApi.SealingAbort(ctx, job.ID) + }, +} diff --git a/cmd/lotus-storage-miner/sectors.go b/cmd/lotus-storage-miner/sectors.go index e2e94cf69..37eb06284 100644 --- a/cmd/lotus-storage-miner/sectors.go +++ b/cmd/lotus-storage-miner/sectors.go @@ -210,7 +210,7 @@ var sectorsListCmd = &cli.Command{ if err != nil { return err } - commitedIDs := make(map[abi.SectorNumber]struct{}, len(activeSet)) + commitedIDs := make(map[abi.SectorNumber]struct{}, len(sset)) for _, info := range sset { commitedIDs[info.SectorNumber] = struct{}{} } diff --git a/cmd/lotus-storage-miner/storage.go b/cmd/lotus-storage-miner/storage.go index 8b960a4bf..e6986f8c7 100644 --- a/cmd/lotus-storage-miner/storage.go +++ b/cmd/lotus-storage-miner/storage.go @@ -1,6 +1,7 @@ package main import ( + "context" "encoding/json" "fmt" "io/ioutil" @@ -20,11 +21,14 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" "github.com/filecoin-project/lotus/extern/sector-storage/stores" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/filecoin-project/lotus/lib/tablewriter" ) const metaFile = "sectorstore.json" @@ -40,6 +44,7 @@ stored while moving through the sealing pipeline (references as 'seal').`, storageAttachCmd, storageListCmd, storageFindCmd, + storageCleanupCmd, }, } @@ -147,6 +152,9 @@ var storageListCmd = &cli.Command{ Flags: []cli.Flag{ &cli.BoolFlag{Name: "color"}, }, + Subcommands: []*cli.Command{ + storageListSectorsCmd, + }, Action: func(cctx *cli.Context) error { color.NoColor = !cctx.Bool("color") @@ -408,3 +416,263 @@ var storageFindCmd = &cli.Command{ return nil }, } + +var storageListSectorsCmd = &cli.Command{ + Name: "sectors", + Usage: "get list of all sector files", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "color", + Value: true, + }, + }, + Action: func(cctx *cli.Context) error { + color.NoColor = !cctx.Bool("color") + + nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + + napi, closer2, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer2() + + ctx := lcli.ReqContext(cctx) + + sectors, err := nodeApi.SectorsList(ctx) + if err != nil { + return xerrors.Errorf("listing sectors: %w", err) + } + + maddr, err := nodeApi.ActorAddress(ctx) + if err != nil { + return err + } + + aid, err := address.IDFromAddress(maddr) + if err != nil { + return err + } + + mi, err := napi.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + sid := func(sn abi.SectorNumber) abi.SectorID { + return abi.SectorID{ + Miner: abi.ActorID(aid), + Number: sn, + } + } + + type entry struct { + id abi.SectorNumber + storage stores.ID + ft storiface.SectorFileType + urls string + + primary, seal, store bool + + state api.SectorState + } + + var list []entry + + for _, sector := range sectors { + st, err := nodeApi.SectorsStatus(ctx, sector, false) + if err != nil { + return xerrors.Errorf("getting sector status for sector %d: %w", sector, err) + } + + for _, ft := range storiface.PathTypes { + si, err := nodeApi.StorageFindSector(ctx, sid(sector), ft, mi.SectorSize, false) + if err != nil { + return xerrors.Errorf("find sector %d: %w", sector, err) + } + + for _, info := range si { + + list = append(list, entry{ + id: sector, + storage: info.ID, + ft: ft, + urls: strings.Join(info.URLs, ";"), + + primary: info.Primary, + seal: info.CanSeal, + store: info.CanStore, + + state: st.State, + }) + } + } + + } + + sort.Slice(list, func(i, j int) bool { + if list[i].store != list[j].store { + return list[i].store + } + + if list[i].storage != list[j].storage { + return list[i].storage < list[j].storage + } + + if list[i].id != list[j].id { + return list[i].id < list[j].id + } + + return list[i].ft < list[j].ft + }) + + tw := tablewriter.New( + tablewriter.Col("Storage"), + tablewriter.Col("Sector"), + tablewriter.Col("Type"), + tablewriter.Col("State"), + tablewriter.Col("Primary"), + tablewriter.Col("Path use"), + tablewriter.Col("URLs"), + ) + + if len(list) == 0 { + return nil + } + + lastS := list[0].storage + sc1, sc2 := color.FgBlue, color.FgCyan + + for _, e := range list { + if e.storage != lastS { + lastS = e.storage + sc1, sc2 = sc2, sc1 + } + + m := map[string]interface{}{ + "Storage": color.New(sc1).Sprint(e.storage), + "Sector": e.id, + "Type": e.ft.String(), + "State": color.New(stateOrder[sealing.SectorState(e.state)].col).Sprint(e.state), + "Primary": maybeStr(e.seal, color.FgGreen, "primary"), + "Path use": maybeStr(e.seal, color.FgMagenta, "seal ") + maybeStr(e.store, color.FgCyan, "store"), + "URLs": e.urls, + } + tw.Write(m) + } + + return tw.Flush(os.Stdout) + }, +} + +func maybeStr(c bool, col color.Attribute, s string) string { + if !c { + return "" + } + + return color.New(col).Sprint(s) +} + +var storageCleanupCmd = &cli.Command{ + Name: "cleanup", + Usage: "trigger cleanup actions", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "removed", + Usage: "cleanup remaining files from removed sectors", + Value: true, + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + + napi, closer2, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer2() + + ctx := lcli.ReqContext(cctx) + + if cctx.Bool("removed") { + if err := cleanupRemovedSectorData(ctx, api, napi); err != nil { + return err + } + } + + // TODO: proving sectors in sealing storage + + return nil + }, +} + +func cleanupRemovedSectorData(ctx context.Context, api api.StorageMiner, napi api.FullNode) error { + sectors, err := api.SectorsList(ctx) + if err != nil { + return err + } + + maddr, err := api.ActorAddress(ctx) + if err != nil { + return err + } + + aid, err := address.IDFromAddress(maddr) + if err != nil { + return err + } + + sid := func(sn abi.SectorNumber) abi.SectorID { + return abi.SectorID{ + Miner: abi.ActorID(aid), + Number: sn, + } + } + + mi, err := napi.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + toRemove := map[abi.SectorNumber]struct{}{} + + for _, sector := range sectors { + st, err := api.SectorsStatus(ctx, sector, false) + if err != nil { + return xerrors.Errorf("getting sector status for sector %d: %w", sector, err) + } + + if sealing.SectorState(st.State) != sealing.Removed { + continue + } + + for _, ft := range storiface.PathTypes { + si, err := api.StorageFindSector(ctx, sid(sector), ft, mi.SectorSize, false) + if err != nil { + return xerrors.Errorf("find sector %d: %w", sector, err) + } + + if len(si) > 0 { + toRemove[sector] = struct{}{} + } + } + } + + for sn := range toRemove { + fmt.Printf("cleaning up data for sector %d\n", sn) + err := api.SectorRemove(ctx, sn) + if err != nil { + log.Error(err) + } + } + + return nil +} diff --git a/cmd/lotus/daemon.go b/cmd/lotus/daemon.go index 4ff63be11..1d13b4082 100644 --- a/cmd/lotus/daemon.go +++ b/cmd/lotus/daemon.go @@ -16,6 +16,7 @@ import ( "strings" paramfetch "github.com/filecoin-project/go-paramfetch" + metricsprom "github.com/ipfs/go-metrics-prometheus" "github.com/mitchellh/go-homedir" "github.com/multiformats/go-multiaddr" "github.com/urfave/cli/v2" @@ -35,7 +36,6 @@ import ( lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/journal" - "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/lib/peermgr" "github.com/filecoin-project/lotus/lib/ulimit" "github.com/filecoin-project/lotus/metrics" @@ -262,8 +262,14 @@ var DaemonCmd = &cli.Command{ liteModeDeps = node.Override(new(api.GatewayAPI), gapi) } - var api api.FullNode + // some libraries like ipfs/go-ds-measure and ipfs/go-ipfs-blockstore + // use ipfs/go-metrics-interface. This injects a Prometheus exporter + // for those. Metrics are exported to the default registry. + if err := metricsprom.Inject(); err != nil { + log.Warnf("unable to inject prometheus ipfs/go-metrics exporter; some metrics will be unavailable; err: %s", err) + } + var api api.FullNode stop, err := node.New(ctx, node.FullAPI(&api, node.Lite(isLite)), @@ -399,9 +405,9 @@ func ImportChain(r repo.Repo, fname string, snapshot bool) (err error) { } defer lr.Close() //nolint:errcheck - ds, err := lr.Datastore("/chain") + bs, err := lr.Blockstore(repo.BlockstoreChain) if err != nil { - return err + return xerrors.Errorf("failed to open blockstore: %w", err) } mds, err := lr.Datastore("/metadata") @@ -409,13 +415,13 @@ func ImportChain(r repo.Repo, fname string, snapshot bool) (err error) { return err } - bs := blockstore.NewBlockstore(ds) - j, err := journal.OpenFSJournal(lr, journal.EnvDisabledEvents()) if err != nil { return xerrors.Errorf("failed to open journal: %w", err) } - cst := store.NewChainStore(bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), j) + + cst := store.NewChainStore(bs, bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), j) + defer cst.Close() //nolint:errcheck log.Infof("importing chain from %s...", fname) @@ -460,7 +466,7 @@ func ImportChain(r repo.Repo, fname string, snapshot bool) (err error) { } log.Infof("accepting %s as new head", ts.Cids()) - if err := cst.SetHead(ts); err != nil { + if err := cst.ForceHeadSilent(context.Background(), ts); err != nil { return err } diff --git a/cmd/lotus/rpc.go b/cmd/lotus/rpc.go index 4f68ac85a..f2c59b615 100644 --- a/cmd/lotus/rpc.go +++ b/cmd/lotus/rpc.go @@ -14,6 +14,7 @@ import ( logging "github.com/ipfs/go-log/v2" "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" + promclient "github.com/prometheus/client_golang/prometheus" "go.opencensus.io/tag" "golang.org/x/xerrors" @@ -49,7 +50,16 @@ func serveRPC(a api.FullNode, stop node.StopFunc, addr multiaddr.Multiaddr, shut http.Handle("/rest/v0/import", importAH) + // Prometheus globals are exposed as interfaces, but the prometheus + // OpenCensus exporter expects a concrete *Registry. The concrete type of + // the globals are actually *Registry, so we downcast them, staying + // defensive in case things change under the hood. + registry, ok := promclient.DefaultRegisterer.(*promclient.Registry) + if !ok { + log.Warnf("failed to export default prometheus registry; some metrics will be unavailable; unexpected type: %T", promclient.DefaultRegisterer) + } exporter, err := prometheus.NewExporter(prometheus.Options{ + Registry: registry, Namespace: "lotus", }) if err != nil { diff --git a/conformance/chaos/actor.go b/conformance/chaos/actor.go index cdda1db83..f5a94989d 100644 --- a/conformance/chaos/actor.go +++ b/conformance/chaos/actor.go @@ -73,6 +73,8 @@ const ( // MethodInspectRuntime is the identifier for the method that returns the // current runtime values. MethodInspectRuntime + // MethodCreateState is the identifier for the method that creates the chaos actor's state. + MethodCreateState ) // Exports defines the methods this actor exposes publicly. @@ -87,6 +89,7 @@ func (a Actor) Exports() []interface{} { MethodMutateState: a.MutateState, MethodAbortWith: a.AbortWith, MethodInspectRuntime: a.InspectRuntime, + MethodCreateState: a.CreateState, } } @@ -227,6 +230,14 @@ type MutateStateArgs struct { Branch MutateStateBranch } +// CreateState creates the chaos actor's state +func (a Actor) CreateState(rt runtime2.Runtime, _ *abi.EmptyValue) *abi.EmptyValue { + rt.ValidateImmediateCallerAcceptAny() + rt.StateCreate(&State{}) + + return nil +} + // MutateState attempts to mutate a state value in the actor. func (a Actor) MutateState(rt runtime2.Runtime, args *MutateStateArgs) *abi.EmptyValue { rt.ValidateImmediateCallerAcceptAny() diff --git a/conformance/chaos/actor_test.go b/conformance/chaos/actor_test.go index dbce4f4c5..e68b9a4df 100644 --- a/conformance/chaos/actor_test.go +++ b/conformance/chaos/actor_test.go @@ -129,8 +129,9 @@ func TestMutateStateInTransaction(t *testing.T) { var a Actor rt.ExpectValidateCallerAny() - rt.StateCreate(&State{}) + rt.Call(a.CreateState, nil) + rt.ExpectValidateCallerAny() val := "__mutstat test" rt.Call(a.MutateState, &MutateStateArgs{ Value: val, @@ -155,23 +156,30 @@ func TestMutateStateAfterTransaction(t *testing.T) { var a Actor rt.ExpectValidateCallerAny() - rt.StateCreate(&State{}) + rt.Call(a.CreateState, nil) + rt.ExpectValidateCallerAny() val := "__mutstat test" + defer func() { + if r := recover(); r == nil { + t.Fatal("The code did not panic") + } else { + var st State + rt.GetState(&st) + + // state should be updated successfully _in_ the transaction but not outside + if st.Value != val+"-in" { + t.Fatal("state was not updated") + } + + rt.Verify() + } + }() rt.Call(a.MutateState, &MutateStateArgs{ Value: val, Branch: MutateAfterTransaction, }) - var st State - rt.GetState(&st) - - // state should be updated successfully _in_ the transaction but not outside - if st.Value != val+"-in" { - t.Fatal("state was not updated") - } - - rt.Verify() } func TestMutateStateReadonly(t *testing.T) { @@ -182,22 +190,30 @@ func TestMutateStateReadonly(t *testing.T) { var a Actor rt.ExpectValidateCallerAny() - rt.StateCreate(&State{}) + rt.Call(a.CreateState, nil) + rt.ExpectValidateCallerAny() val := "__mutstat test" + defer func() { + if r := recover(); r == nil { + t.Fatal("The code did not panic") + } else { + var st State + rt.GetState(&st) + + if st.Value != "" { + t.Fatal("state was not expected to be updated") + } + + rt.Verify() + } + }() + rt.Call(a.MutateState, &MutateStateArgs{ Value: val, Branch: MutateReadonly, }) - var st State - rt.GetState(&st) - - if st.Value != "" { - t.Fatal("state was not expected to be updated") - } - - rt.Verify() } func TestMutateStateInvalidBranch(t *testing.T) { @@ -254,11 +270,13 @@ func TestInspectRuntime(t *testing.T) { receiver := atesting2.NewIDAddr(t, 101) builder := mock2.NewBuilder(context.Background(), receiver) - rt := builder.Build(t) - rt.SetCaller(caller, builtin2.AccountActorCodeID) - rt.StateCreate(&State{}) var a Actor + rt := builder.Build(t) + rt.ExpectValidateCallerAny() + rt.Call(a.CreateState, nil) + + rt.SetCaller(caller, builtin2.AccountActorCodeID) rt.ExpectValidateCallerAny() ret := rt.Call(a.InspectRuntime, abi.Empty) rtr, ok := ret.(*InspectRuntimeReturn) diff --git a/conformance/driver.go b/conformance/driver.go index 95b6f2659..833d50d7b 100644 --- a/conformance/driver.go +++ b/conformance/driver.go @@ -87,10 +87,12 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, preroot syscalls = vm.Syscalls(ffiwrapper.ProofVerifier) vmRand = NewFixedRand() - cs = store.NewChainStore(bs, ds, syscalls, nil) + cs = store.NewChainStore(bs, bs, ds, syscalls, nil) sm = stmgr.NewStateManager(cs) ) + defer cs.Close() //nolint:errcheck + blocks := make([]store.BlockMessages, 0, len(tipset.Blocks)) for _, b := range tipset.Blocks { sb := store.BlockMessages{ diff --git a/documentation/en/api-methods-miner.md b/documentation/en/api-methods-miner.md new file mode 100644 index 000000000..09c340c3a --- /dev/null +++ b/documentation/en/api-methods-miner.md @@ -0,0 +1,1820 @@ +# Groups +* [](#) + * [Closing](#Closing) + * [Session](#Session) + * [Shutdown](#Shutdown) + * [Version](#Version) +* [Actor](#Actor) + * [ActorAddress](#ActorAddress) + * [ActorSectorSize](#ActorSectorSize) +* [Auth](#Auth) + * [AuthNew](#AuthNew) + * [AuthVerify](#AuthVerify) +* [Create](#Create) + * [CreateBackup](#CreateBackup) +* [Deals](#Deals) + * [DealsConsiderOfflineRetrievalDeals](#DealsConsiderOfflineRetrievalDeals) + * [DealsConsiderOfflineStorageDeals](#DealsConsiderOfflineStorageDeals) + * [DealsConsiderOnlineRetrievalDeals](#DealsConsiderOnlineRetrievalDeals) + * [DealsConsiderOnlineStorageDeals](#DealsConsiderOnlineStorageDeals) + * [DealsImportData](#DealsImportData) + * [DealsList](#DealsList) + * [DealsPieceCidBlocklist](#DealsPieceCidBlocklist) + * [DealsSetConsiderOfflineRetrievalDeals](#DealsSetConsiderOfflineRetrievalDeals) + * [DealsSetConsiderOfflineStorageDeals](#DealsSetConsiderOfflineStorageDeals) + * [DealsSetConsiderOnlineRetrievalDeals](#DealsSetConsiderOnlineRetrievalDeals) + * [DealsSetConsiderOnlineStorageDeals](#DealsSetConsiderOnlineStorageDeals) + * [DealsSetPieceCidBlocklist](#DealsSetPieceCidBlocklist) +* [I](#I) + * [ID](#ID) +* [Log](#Log) + * [LogList](#LogList) + * [LogSetLevel](#LogSetLevel) +* [Market](#Market) + * [MarketCancelDataTransfer](#MarketCancelDataTransfer) + * [MarketDataTransferUpdates](#MarketDataTransferUpdates) + * [MarketGetAsk](#MarketGetAsk) + * [MarketGetDealUpdates](#MarketGetDealUpdates) + * [MarketGetRetrievalAsk](#MarketGetRetrievalAsk) + * [MarketImportDealData](#MarketImportDealData) + * [MarketListDataTransfers](#MarketListDataTransfers) + * [MarketListDeals](#MarketListDeals) + * [MarketListIncompleteDeals](#MarketListIncompleteDeals) + * [MarketListRetrievalDeals](#MarketListRetrievalDeals) + * [MarketRestartDataTransfer](#MarketRestartDataTransfer) + * [MarketSetAsk](#MarketSetAsk) + * [MarketSetRetrievalAsk](#MarketSetRetrievalAsk) +* [Mining](#Mining) + * [MiningBase](#MiningBase) +* [Net](#Net) + * [NetAddrsListen](#NetAddrsListen) + * [NetAgentVersion](#NetAgentVersion) + * [NetAutoNatStatus](#NetAutoNatStatus) + * [NetBandwidthStats](#NetBandwidthStats) + * [NetBandwidthStatsByPeer](#NetBandwidthStatsByPeer) + * [NetBandwidthStatsByProtocol](#NetBandwidthStatsByProtocol) + * [NetConnect](#NetConnect) + * [NetConnectedness](#NetConnectedness) + * [NetDisconnect](#NetDisconnect) + * [NetFindPeer](#NetFindPeer) + * [NetPeers](#NetPeers) + * [NetPubsubScores](#NetPubsubScores) +* [Pieces](#Pieces) + * [PiecesGetCIDInfo](#PiecesGetCIDInfo) + * [PiecesGetPieceInfo](#PiecesGetPieceInfo) + * [PiecesListCidInfos](#PiecesListCidInfos) + * [PiecesListPieces](#PiecesListPieces) +* [Pledge](#Pledge) + * [PledgeSector](#PledgeSector) +* [Return](#Return) + * [ReturnAddPiece](#ReturnAddPiece) + * [ReturnFetch](#ReturnFetch) + * [ReturnFinalizeSector](#ReturnFinalizeSector) + * [ReturnMoveStorage](#ReturnMoveStorage) + * [ReturnReadPiece](#ReturnReadPiece) + * [ReturnReleaseUnsealed](#ReturnReleaseUnsealed) + * [ReturnSealCommit1](#ReturnSealCommit1) + * [ReturnSealCommit2](#ReturnSealCommit2) + * [ReturnSealPreCommit1](#ReturnSealPreCommit1) + * [ReturnSealPreCommit2](#ReturnSealPreCommit2) + * [ReturnUnsealPiece](#ReturnUnsealPiece) +* [Sealing](#Sealing) + * [SealingAbort](#SealingAbort) + * [SealingSchedDiag](#SealingSchedDiag) +* [Sector](#Sector) + * [SectorGetExpectedSealDuration](#SectorGetExpectedSealDuration) + * [SectorGetSealDelay](#SectorGetSealDelay) + * [SectorMarkForUpgrade](#SectorMarkForUpgrade) + * [SectorRemove](#SectorRemove) + * [SectorSetExpectedSealDuration](#SectorSetExpectedSealDuration) + * [SectorSetSealDelay](#SectorSetSealDelay) + * [SectorStartSealing](#SectorStartSealing) +* [Sectors](#Sectors) + * [SectorsList](#SectorsList) + * [SectorsRefs](#SectorsRefs) + * [SectorsStatus](#SectorsStatus) + * [SectorsUpdate](#SectorsUpdate) +* [Storage](#Storage) + * [StorageAddLocal](#StorageAddLocal) + * [StorageAttach](#StorageAttach) + * [StorageBestAlloc](#StorageBestAlloc) + * [StorageDeclareSector](#StorageDeclareSector) + * [StorageDropSector](#StorageDropSector) + * [StorageFindSector](#StorageFindSector) + * [StorageInfo](#StorageInfo) + * [StorageList](#StorageList) + * [StorageLocal](#StorageLocal) + * [StorageLock](#StorageLock) + * [StorageReportHealth](#StorageReportHealth) + * [StorageStat](#StorageStat) + * [StorageTryLock](#StorageTryLock) +* [Worker](#Worker) + * [WorkerConnect](#WorkerConnect) + * [WorkerJobs](#WorkerJobs) + * [WorkerStats](#WorkerStats) +## + + +### Closing + + +Perms: read + +Inputs: `null` + +Response: `{}` + +### Session + + +Perms: read + +Inputs: `null` + +Response: `"07070707-0707-0707-0707-070707070707"` + +### Shutdown + + +Perms: admin + +Inputs: `null` + +Response: `{}` + +### Version + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "Version": "string value", + "APIVersion": 65536, + "BlockDelay": 42 +} +``` + +## Actor + + +### ActorAddress +There are not yet any comments for this method. + +Perms: read + +Inputs: `null` + +Response: `"f01234"` + +### ActorSectorSize +There are not yet any comments for this method. + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `34359738368` + +## Auth + + +### AuthNew + + +Perms: admin + +Inputs: +```json +[ + null +] +``` + +Response: `"Ynl0ZSBhcnJheQ=="` + +### AuthVerify + + +Perms: read + +Inputs: +```json +[ + "string value" +] +``` + +Response: `null` + +## Create + + +### CreateBackup +CreateBackup creates node backup onder the specified file name. The +method requires that the lotus-miner is running with the +LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that +the path specified when calling CreateBackup is within the base path + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: `{}` + +## Deals + + +### DealsConsiderOfflineRetrievalDeals +There are not yet any comments for this method. + +Perms: read + +Inputs: `null` + +Response: `true` + +### DealsConsiderOfflineStorageDeals +There are not yet any comments for this method. + +Perms: read + +Inputs: `null` + +Response: `true` + +### DealsConsiderOnlineRetrievalDeals +There are not yet any comments for this method. + +Perms: read + +Inputs: `null` + +Response: `true` + +### DealsConsiderOnlineStorageDeals +There are not yet any comments for this method. + +Perms: read + +Inputs: `null` + +Response: `true` + +### DealsImportData +There are not yet any comments for this method. + +Perms: write + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "string value" +] +``` + +Response: `{}` + +### DealsList +There are not yet any comments for this method. + +Perms: read + +Inputs: `null` + +Response: `null` + +### DealsPieceCidBlocklist +There are not yet any comments for this method. + +Perms: read + +Inputs: `null` + +Response: `null` + +### DealsSetConsiderOfflineRetrievalDeals +There are not yet any comments for this method. + +Perms: admin + +Inputs: +```json +[ + true +] +``` + +Response: `{}` + +### DealsSetConsiderOfflineStorageDeals +There are not yet any comments for this method. + +Perms: admin + +Inputs: +```json +[ + true +] +``` + +Response: `{}` + +### DealsSetConsiderOnlineRetrievalDeals +There are not yet any comments for this method. + +Perms: admin + +Inputs: +```json +[ + true +] +``` + +Response: `{}` + +### DealsSetConsiderOnlineStorageDeals +There are not yet any comments for this method. + +Perms: admin + +Inputs: +```json +[ + true +] +``` + +Response: `{}` + +### DealsSetPieceCidBlocklist +There are not yet any comments for this method. + +Perms: admin + +Inputs: +```json +[ + null +] +``` + +Response: `{}` + +## I + + +### ID + + +Perms: read + +Inputs: `null` + +Response: `"12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"` + +## Log + + +### LogList + + +Perms: write + +Inputs: `null` + +Response: `null` + +### LogSetLevel + + +Perms: write + +Inputs: +```json +[ + "string value", + "string value" +] +``` + +Response: `{}` + +## Market + + +### MarketCancelDataTransfer +ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer + + +Perms: read + +Inputs: +```json +[ + 3, + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + true +] +``` + +Response: `{}` + +### MarketDataTransferUpdates +There are not yet any comments for this method. + +Perms: write + +Inputs: `null` + +Response: +```json +{ + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42 +} +``` + +### MarketGetAsk +There are not yet any comments for this method. + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "Ask": { + "Price": "0", + "VerifiedPrice": "0", + "MinPieceSize": 1032, + "MaxPieceSize": 1032, + "Miner": "f01234", + "Timestamp": 10101, + "Expiry": 10101, + "SeqNo": 42 + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } +} +``` + +### MarketGetDealUpdates +There are not yet any comments for this method. + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "Proposal": { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceSize": 1032, + "VerifiedDeal": true, + "Client": "f01234", + "Provider": "f01234", + "Label": "string value", + "StartEpoch": 10101, + "EndEpoch": 10101, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "ClientSignature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "ProposalCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "AddFundsCid": null, + "PublishCid": null, + "Miner": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Client": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "State": 42, + "PiecePath": ".lotusminer/fstmp123", + "MetadataPath": ".lotusminer/fstmp123", + "SlashEpoch": 10101, + "FastRetrieval": true, + "Message": "string value", + "StoreID": 12, + "FundsReserved": "0", + "Ref": { + "TransferType": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceCid": null, + "PieceSize": 1024 + }, + "AvailableForRetrieval": true, + "DealID": 5432, + "CreationTime": "0001-01-01T00:00:00Z", + "TransferChannelId": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + } +} +``` + +### MarketGetRetrievalAsk +There are not yet any comments for this method. + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "PricePerByte": "0", + "UnsealPrice": "0", + "PaymentInterval": 42, + "PaymentIntervalIncrease": 42 +} +``` + +### MarketImportDealData +There are not yet any comments for this method. + +Perms: write + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "string value" +] +``` + +Response: `{}` + +### MarketListDataTransfers +There are not yet any comments for this method. + +Perms: write + +Inputs: `null` + +Response: `null` + +### MarketListDeals +There are not yet any comments for this method. + +Perms: read + +Inputs: `null` + +Response: `null` + +### MarketListIncompleteDeals +There are not yet any comments for this method. + +Perms: read + +Inputs: `null` + +Response: `null` + +### MarketListRetrievalDeals +There are not yet any comments for this method. + +Perms: read + +Inputs: `null` + +Response: `null` + +### MarketRestartDataTransfer +MinerRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer + + +Perms: read + +Inputs: +```json +[ + 3, + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + true +] +``` + +Response: `{}` + +### MarketSetAsk +There are not yet any comments for this method. + +Perms: admin + +Inputs: +```json +[ + "0", + "0", + 10101, + 1032, + 1032 +] +``` + +Response: `{}` + +### MarketSetRetrievalAsk +There are not yet any comments for this method. + +Perms: admin + +Inputs: +```json +[ + { + "PricePerByte": "0", + "UnsealPrice": "0", + "PaymentInterval": 42, + "PaymentIntervalIncrease": 42 + } +] +``` + +Response: `{}` + +## Mining + + +### MiningBase +There are not yet any comments for this method. + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "Cids": null, + "Blocks": null, + "Height": 0 +} +``` + +## Net + + +### NetAddrsListen + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "Addrs": null, + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +} +``` + +### NetAgentVersion + + +Perms: read + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: `"string value"` + +### NetAutoNatStatus + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "Reachability": 1, + "PublicAddr": "string value" +} +``` + +### NetBandwidthStats + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "TotalIn": 9, + "TotalOut": 9, + "RateIn": 12.3, + "RateOut": 12.3 +} +``` + +### NetBandwidthStatsByPeer + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "12D3KooWSXmXLJmBR1M7i9RW9GQPNUhZSzXKzxDHWtAgNuJAbyEJ": { + "TotalIn": 174000, + "TotalOut": 12500, + "RateIn": 100, + "RateOut": 50 + } +} +``` + +### NetBandwidthStatsByProtocol + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "/fil/hello/1.0.0": { + "TotalIn": 174000, + "TotalOut": 12500, + "RateIn": 100, + "RateOut": 50 + } +} +``` + +### NetConnect + + +Perms: write + +Inputs: +```json +[ + { + "Addrs": null, + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + } +] +``` + +Response: `{}` + +### NetConnectedness + + +Perms: read + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: `1` + +### NetDisconnect + + +Perms: write + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: `{}` + +### NetFindPeer + + +Perms: read + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: +```json +{ + "Addrs": null, + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +} +``` + +### NetPeers + + +Perms: read + +Inputs: `null` + +Response: `null` + +### NetPubsubScores + + +Perms: read + +Inputs: `null` + +Response: `null` + +## Pieces + + +### PiecesGetCIDInfo +There are not yet any comments for this method. + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "CID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceBlockLocations": null +} +``` + +### PiecesGetPieceInfo +There are not yet any comments for this method. + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Deals": null +} +``` + +### PiecesListCidInfos +There are not yet any comments for this method. + +Perms: read + +Inputs: `null` + +Response: `null` + +### PiecesListPieces +There are not yet any comments for this method. + +Perms: read + +Inputs: `null` + +Response: `null` + +## Pledge + + +### PledgeSector +Temp api for testing + + +Perms: write + +Inputs: `null` + +Response: `{}` + +## Return + + +### ReturnAddPiece + + +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + }, + { + "Size": 1032, + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + }, + { + "Code": 0, + "Message": "string value" + } +] +``` + +Response: `{}` + +### ReturnFetch + + +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + }, + { + "Code": 0, + "Message": "string value" + } +] +``` + +Response: `{}` + +### ReturnFinalizeSector + + +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + }, + { + "Code": 0, + "Message": "string value" + } +] +``` + +Response: `{}` + +### ReturnMoveStorage + + +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + }, + { + "Code": 0, + "Message": "string value" + } +] +``` + +Response: `{}` + +### ReturnReadPiece + + +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + }, + true, + { + "Code": 0, + "Message": "string value" + } +] +``` + +Response: `{}` + +### ReturnReleaseUnsealed + + +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + }, + { + "Code": 0, + "Message": "string value" + } +] +``` + +Response: `{}` + +### ReturnSealCommit1 + + +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + }, + null, + { + "Code": 0, + "Message": "string value" + } +] +``` + +Response: `{}` + +### ReturnSealCommit2 + + +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + }, + null, + { + "Code": 0, + "Message": "string value" + } +] +``` + +Response: `{}` + +### ReturnSealPreCommit1 + + +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + }, + null, + { + "Code": 0, + "Message": "string value" + } +] +``` + +Response: `{}` + +### ReturnSealPreCommit2 + + +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + }, + { + "Unsealed": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Sealed": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + }, + { + "Code": 0, + "Message": "string value" + } +] +``` + +Response: `{}` + +### ReturnUnsealPiece + + +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + }, + { + "Code": 0, + "Message": "string value" + } +] +``` + +Response: `{}` + +## Sealing + + +### SealingAbort +There are not yet any comments for this method. + +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + } +] +``` + +Response: `{}` + +### SealingSchedDiag +SealingSchedDiag dumps internal sealing scheduler state + + +Perms: admin + +Inputs: +```json +[ + true +] +``` + +Response: `{}` + +## Sector + + +### SectorGetExpectedSealDuration +SectorGetExpectedSealDuration gets the expected time for a sector to seal + + +Perms: read + +Inputs: `null` + +Response: `60000000000` + +### SectorGetSealDelay +SectorGetSealDelay gets the time that a newly-created sector +waits for more deals before it starts sealing + + +Perms: read + +Inputs: `null` + +Response: `60000000000` + +### SectorMarkForUpgrade +There are not yet any comments for this method. + +Perms: admin + +Inputs: +```json +[ + 9 +] +``` + +Response: `{}` + +### SectorRemove +There are not yet any comments for this method. + +Perms: admin + +Inputs: +```json +[ + 9 +] +``` + +Response: `{}` + +### SectorSetExpectedSealDuration +SectorSetExpectedSealDuration sets the expected time for a sector to seal + + +Perms: write + +Inputs: +```json +[ + 60000000000 +] +``` + +Response: `{}` + +### SectorSetSealDelay +SectorSetSealDelay sets the time that a newly-created sector +waits for more deals before it starts sealing + + +Perms: write + +Inputs: +```json +[ + 60000000000 +] +``` + +Response: `{}` + +### SectorStartSealing +SectorStartSealing can be called on sectors in Empty or WaitDeals states +to trigger sealing early + + +Perms: write + +Inputs: +```json +[ + 9 +] +``` + +Response: `{}` + +## Sectors + + +### SectorsList +List all staged sectors + + +Perms: read + +Inputs: `null` + +Response: `null` + +### SectorsRefs +There are not yet any comments for this method. + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "98000": [ + { + "SectorID": 100, + "Offset": 10485760, + "Size": 1048576 + } + ] +} +``` + +### SectorsStatus +Get the status of a given sector by ID + + +Perms: read + +Inputs: +```json +[ + 9, + true +] +``` + +Response: +```json +{ + "SectorID": 9, + "State": "Proving", + "CommD": null, + "CommR": null, + "Proof": "Ynl0ZSBhcnJheQ==", + "Deals": null, + "Ticket": { + "Value": null, + "Epoch": 10101 + }, + "Seed": { + "Value": null, + "Epoch": 10101 + }, + "PreCommitMsg": null, + "CommitMsg": null, + "Retries": 42, + "ToUpgrade": true, + "LastErr": "string value", + "Log": null, + "SealProof": 8, + "Activation": 10101, + "Expiration": 10101, + "DealWeight": "0", + "VerifiedDealWeight": "0", + "InitialPledge": "0", + "OnTime": 10101, + "Early": 10101 +} +``` + +### SectorsUpdate +There are not yet any comments for this method. + +Perms: admin + +Inputs: +```json +[ + 9, + "Proving" +] +``` + +Response: `{}` + +## Storage + + +### StorageAddLocal +There are not yet any comments for this method. + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: `{}` + +### StorageAttach + + +Perms: admin + +Inputs: +```json +[ + { + "ID": "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8", + "URLs": null, + "Weight": 42, + "CanSeal": true, + "CanStore": true + }, + { + "Capacity": 9, + "Available": 9, + "Reserved": 9 + } +] +``` + +Response: `{}` + +### StorageBestAlloc + + +Perms: admin + +Inputs: +```json +[ + 1, + 34359738368, + "sealing" +] +``` + +Response: `null` + +### StorageDeclareSector + + +Perms: admin + +Inputs: +```json +[ + "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8", + { + "Miner": 1000, + "Number": 9 + }, + 1, + true +] +``` + +Response: `{}` + +### StorageDropSector + + +Perms: admin + +Inputs: +```json +[ + "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8", + { + "Miner": 1000, + "Number": 9 + }, + 1 +] +``` + +Response: `{}` + +### StorageFindSector + + +Perms: admin + +Inputs: +```json +[ + { + "Miner": 1000, + "Number": 9 + }, + 1, + 34359738368, + true +] +``` + +Response: `null` + +### StorageInfo + + +Perms: admin + +Inputs: +```json +[ + "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8" +] +``` + +Response: +```json +{ + "ID": "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8", + "URLs": null, + "Weight": 42, + "CanSeal": true, + "CanStore": true +} +``` + +### StorageList +There are not yet any comments for this method. + +Perms: admin + +Inputs: `null` + +Response: +```json +{ + "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8": [ + { + "Miner": 1000, + "Number": 100, + "SectorFileType": 2 + } + ] +} +``` + +### StorageLocal +There are not yet any comments for this method. + +Perms: admin + +Inputs: `null` + +Response: +```json +{ + "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8": "/data/path" +} +``` + +### StorageLock + + +Perms: admin + +Inputs: +```json +[ + { + "Miner": 1000, + "Number": 9 + }, + 1, + 1 +] +``` + +Response: `{}` + +### StorageReportHealth + + +Perms: admin + +Inputs: +```json +[ + "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8", + { + "Stat": { + "Capacity": 9, + "Available": 9, + "Reserved": 9 + }, + "Err": "string value" + } +] +``` + +Response: `{}` + +### StorageStat +There are not yet any comments for this method. + +Perms: admin + +Inputs: +```json +[ + "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8" +] +``` + +Response: +```json +{ + "Capacity": 9, + "Available": 9, + "Reserved": 9 +} +``` + +### StorageTryLock + + +Perms: admin + +Inputs: +```json +[ + { + "Miner": 1000, + "Number": 9 + }, + 1, + 1 +] +``` + +Response: `true` + +## Worker + + +### WorkerConnect +WorkerConnect tells the node to connect to workers RPC + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: `{}` + +### WorkerJobs +There are not yet any comments for this method. + +Perms: admin + +Inputs: `null` + +Response: +```json +{ + "ef8d99a2-6865-4189-8ffa-9fef0f806eee": [ + { + "ID": { + "Sector": { + "Miner": 1000, + "Number": 100 + }, + "ID": "76081ba0-61bd-45a5-bc08-af05f1c26e5d" + }, + "Sector": { + "Miner": 1000, + "Number": 100 + }, + "Task": "seal/v0/precommit/2", + "RunWait": 0, + "Start": "2020-11-12T09:22:07Z", + "Hostname": "host" + } + ] +} +``` + +### WorkerStats +There are not yet any comments for this method. + +Perms: admin + +Inputs: `null` + +Response: +```json +{ + "ef8d99a2-6865-4189-8ffa-9fef0f806eee": { + "Info": { + "Hostname": "host", + "Resources": { + "MemPhysical": 274877906944, + "MemSwap": 128849018880, + "MemReserved": 2147483648, + "CPUs": 64, + "GPUs": [ + "aGPU 1337" + ] + } + }, + "Enabled": true, + "MemUsedMin": 0, + "MemUsedMax": 0, + "GpuUsed": false, + "CpuUse": 0 + } +} +``` + diff --git a/documentation/en/api-methods-worker.md b/documentation/en/api-methods-worker.md new file mode 100644 index 000000000..7e4fe5e9c --- /dev/null +++ b/documentation/en/api-methods-worker.md @@ -0,0 +1,570 @@ +# Groups +* [](#) + * [Enabled](#Enabled) + * [Fetch](#Fetch) + * [Info](#Info) + * [Paths](#Paths) + * [Remove](#Remove) + * [Session](#Session) + * [Version](#Version) +* [Add](#Add) + * [AddPiece](#AddPiece) +* [Finalize](#Finalize) + * [FinalizeSector](#FinalizeSector) +* [Move](#Move) + * [MoveStorage](#MoveStorage) +* [Process](#Process) + * [ProcessSession](#ProcessSession) +* [Read](#Read) + * [ReadPiece](#ReadPiece) +* [Release](#Release) + * [ReleaseUnsealed](#ReleaseUnsealed) +* [Seal](#Seal) + * [SealCommit1](#SealCommit1) + * [SealCommit2](#SealCommit2) + * [SealPreCommit1](#SealPreCommit1) + * [SealPreCommit2](#SealPreCommit2) +* [Set](#Set) + * [SetEnabled](#SetEnabled) +* [Storage](#Storage) + * [StorageAddLocal](#StorageAddLocal) +* [Task](#Task) + * [TaskTypes](#TaskTypes) +* [Unseal](#Unseal) + * [UnsealPiece](#UnsealPiece) +* [Wait](#Wait) + * [WaitQuiet](#WaitQuiet) +## + + +### Enabled +There are not yet any comments for this method. + +Perms: admin + +Inputs: `null` + +Response: `true` + +### Fetch + + +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + 1, + "sealing", + "move" +] +``` + +Response: +```json +{ + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" +} +``` + +### Info +There are not yet any comments for this method. + +Perms: admin + +Inputs: `null` + +Response: +```json +{ + "Hostname": "string value", + "Resources": { + "MemPhysical": 42, + "MemSwap": 42, + "MemReserved": 42, + "CPUs": 42, + "GPUs": null + } +} +``` + +### Paths +There are not yet any comments for this method. + +Perms: admin + +Inputs: `null` + +Response: `null` + +### Remove +Storage / Other + + +Perms: admin + +Inputs: +```json +[ + { + "Miner": 1000, + "Number": 9 + } +] +``` + +Response: `{}` + +### Session +Like ProcessSession, but returns an error when worker is disabled + + +Perms: admin + +Inputs: `null` + +Response: `"07070707-0707-0707-0707-070707070707"` + +### Version +TODO: Info() (name, ...) ? + + +Perms: admin + +Inputs: `null` + +Response: `65536` + +## Add + + +### AddPiece + + +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + null, + 1024, + {} +] +``` + +Response: +```json +{ + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" +} +``` + +## Finalize + + +### FinalizeSector + + +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + null +] +``` + +Response: +```json +{ + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" +} +``` + +## Move + + +### MoveStorage + + +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + 1 +] +``` + +Response: +```json +{ + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" +} +``` + +## Process + + +### ProcessSession +returns a random UUID of worker session, generated randomly when worker +process starts + + +Perms: admin + +Inputs: `null` + +Response: `"07070707-0707-0707-0707-070707070707"` + +## Read + + +### ReadPiece + + +Perms: admin + +Inputs: +```json +[ + {}, + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + 1040384, + 1024 +] +``` + +Response: +```json +{ + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" +} +``` + +## Release + + +### ReleaseUnsealed + + +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + null +] +``` + +Response: +```json +{ + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" +} +``` + +## Seal + + +### SealCommit1 + + +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + null, + null, + null, + { + "Unsealed": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Sealed": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + } +] +``` + +Response: +```json +{ + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" +} +``` + +### SealCommit2 + + +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + null +] +``` + +Response: +```json +{ + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" +} +``` + +### SealPreCommit1 + + +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + null, + null +] +``` + +Response: +```json +{ + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" +} +``` + +### SealPreCommit2 + + +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + null +] +``` + +Response: +```json +{ + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" +} +``` + +## Set + + +### SetEnabled +SetEnabled marks the worker as enabled/disabled. Not that this setting +may take a few seconds to propagate to task scheduler + + +Perms: admin + +Inputs: +```json +[ + true +] +``` + +Response: `{}` + +## Storage + + +### StorageAddLocal +There are not yet any comments for this method. + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: `{}` + +## Task + + +### TaskTypes +TaskType -> Weight + + +Perms: admin + +Inputs: `null` + +Response: +```json +{ + "seal/v0/precommit/2": {} +} +``` + +## Unseal + + +### UnsealPiece + + +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + 1040384, + 1024, + null, + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" +} +``` + +## Wait + + +### WaitQuiet +WaitQuiet blocks until there are no tasks running + + +Perms: admin + +Inputs: `null` + +Response: `{}` + diff --git a/documentation/en/api-methods.md b/documentation/en/api-methods.md index 4ab89d688..8db940007 100644 --- a/documentation/en/api-methods.md +++ b/documentation/en/api-methods.md @@ -68,7 +68,8 @@ * [LogList](#LogList) * [LogSetLevel](#LogSetLevel) * [Market](#Market) - * [MarketEnsureAvailable](#MarketEnsureAvailable) + * [MarketReleaseFunds](#MarketReleaseFunds) + * [MarketReserveFunds](#MarketReserveFunds) * [Miner](#Miner) * [MinerCreateBlock](#MinerCreateBlock) * [MinerGetBaseInfo](#MinerGetBaseInfo) @@ -245,7 +246,7 @@ Response: ```json { "Version": "string value", - "APIVersion": 4352, + "APIVersion": 65536, "BlockDelay": 42 } ``` @@ -1612,8 +1613,24 @@ Response: `{}` ## Market -### MarketEnsureAvailable -MarketFreeBalance +### MarketReleaseFunds +MarketReleaseFunds releases funds reserved by MarketReserveFunds + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "0" +] +``` + +Response: `{}` + +### MarketReserveFunds +MarketReserveFunds reserves funds for a deal Perms: sign @@ -3857,7 +3874,7 @@ Response: "WorkerChangeEpoch": 10101, "PeerId": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", "Multiaddrs": null, - "SealProofType": 3, + "SealProofType": 8, "SectorSize": 34359738368, "WindowPoStPartitionSectors": 42, "ConsensusFaultElapsed": 10101 @@ -3875,7 +3892,7 @@ Inputs: [ "f01234", { - "SealProof": 3, + "SealProof": 8, "SectorNumber": 9, "SealedCID": { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" @@ -3972,7 +3989,7 @@ Inputs: [ "f01234", { - "SealProof": 3, + "SealProof": 8, "SectorNumber": 9, "SealedCID": { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" @@ -4177,7 +4194,7 @@ Inputs: ] ``` -Response: `6` +Response: `8` ### StateReadState StateReadState returns the indicated actor's state. @@ -4398,7 +4415,7 @@ Response: ```json { "SectorNumber": 9, - "SealProof": 3, + "SealProof": 8, "SealedCID": { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, @@ -4469,7 +4486,7 @@ Response: ```json { "Info": { - "SealProof": 3, + "SealProof": 8, "SectorNumber": 9, "SealedCID": { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" diff --git a/documentation/en/architecture/architecture.md b/documentation/en/architecture/architecture.md index 61cd117bb..5a9eee3c2 100644 --- a/documentation/en/architecture/architecture.md +++ b/documentation/en/architecture/architecture.md @@ -311,7 +311,7 @@ FIXME: Maybe mention the `Batching` interface as the developer will stumble upon FIXME: IPFS blocks vs Filecoin blocks ideally happens before this / here -The [`Blockstore` interface](`github.com/ipfs/go-ipfs-blockstore/blockstore.go`) structures the key-value pair +The [`Blockstore` interface](`github.com/filecoin-project/lotus/lib/blockstore.go`) structures the key-value pair into the CID format for the key and the [`Block` interface](`github.com/ipfs/go-block-format/blocks.go`) for the value. The `Block` value is just a raw string of bytes addressed by its hash, which is included in the CID key. diff --git a/extern/blst b/extern/blst new file mode 160000 index 000000000..1cbb16ed9 --- /dev/null +++ b/extern/blst @@ -0,0 +1 @@ +Subproject commit 1cbb16ed9580dcd3e9593b71221fcf2a048faaef diff --git a/extern/fil-blst b/extern/fil-blst deleted file mode 160000 index 5f93488fc..000000000 --- a/extern/fil-blst +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 5f93488fc0dbfb450f2355269f18fc67010d59bb diff --git a/extern/filecoin-ffi b/extern/filecoin-ffi index 0226d0be6..1d9cb3e8f 160000 --- a/extern/filecoin-ffi +++ b/extern/filecoin-ffi @@ -1 +1 @@ -Subproject commit 0226d0be6f0ec441e687512cd833040414437351 +Subproject commit 1d9cb3e8ff53f51f9318fc57e5d00bc79bdc0128 diff --git a/extern/sector-storage/cbor_gen.go b/extern/sector-storage/cbor_gen.go index 0db97f2c9..c51bae1a6 100644 --- a/extern/sector-storage/cbor_gen.go +++ b/extern/sector-storage/cbor_gen.go @@ -199,7 +199,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write([]byte{164}); err != nil { + if _, err := w.Write([]byte{166}); err != nil { return err } @@ -282,6 +282,51 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error { if _, err := io.WriteString(w, string(t.WorkError)); err != nil { return err } + + // t.WorkerHostname (string) (string) + if len("WorkerHostname") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"WorkerHostname\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("WorkerHostname"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("WorkerHostname")); err != nil { + return err + } + + if len(t.WorkerHostname) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.WorkerHostname was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.WorkerHostname))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.WorkerHostname)); err != nil { + return err + } + + // t.StartTime (int64) (int64) + if len("StartTime") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"StartTime\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("StartTime"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("StartTime")); err != nil { + return err + } + + if t.StartTime >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.StartTime)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.StartTime-1)); err != nil { + return err + } + } return nil } @@ -360,6 +405,43 @@ func (t *WorkState) UnmarshalCBOR(r io.Reader) error { t.WorkError = string(sval) } + // t.WorkerHostname (string) (string) + case "WorkerHostname": + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.WorkerHostname = string(sval) + } + // t.StartTime (int64) (int64) + case "StartTime": + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.StartTime = int64(extraI) + } default: return fmt.Errorf("unknown struct field %d: '%s'", i, name) diff --git a/extern/sector-storage/faults.go b/extern/sector-storage/faults.go index c4e1364ad..6f0dcfa13 100644 --- a/extern/sector-storage/faults.go +++ b/extern/sector-storage/faults.go @@ -9,17 +9,18 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) // FaultTracker TODO: Track things more actively type FaultTracker interface { - CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []abi.SectorID) ([]abi.SectorID, error) + CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef) ([]abi.SectorID, error) } // CheckProvable returns unprovable sectors -func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []abi.SectorID) ([]abi.SectorID, error) { +func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef) ([]abi.SectorID, error) { var bad []abi.SectorID ssize, err := pp.SectorSize() @@ -33,27 +34,27 @@ func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, ctx, cancel := context.WithCancel(ctx) defer cancel() - locked, err := m.index.StorageTryLock(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTNone) + locked, err := m.index.StorageTryLock(ctx, sector.ID, storiface.FTSealed|storiface.FTCache, storiface.FTNone) if err != nil { return xerrors.Errorf("acquiring sector lock: %w", err) } if !locked { log.Warnw("CheckProvable Sector FAULT: can't acquire read lock", "sector", sector, "sealed") - bad = append(bad, sector) + bad = append(bad, sector.ID) return nil } - lp, _, err := m.localStore.AcquireSector(ctx, sector, ssize, storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) + lp, _, err := m.localStore.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) if err != nil { log.Warnw("CheckProvable Sector FAULT: acquire sector in checkProvable", "sector", sector, "error", err) - bad = append(bad, sector) + bad = append(bad, sector.ID) return nil } if lp.Sealed == "" || lp.Cache == "" { log.Warnw("CheckProvable Sector FAULT: cache an/or sealed paths not found", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache) - bad = append(bad, sector) + bad = append(bad, sector.ID) return nil } @@ -69,14 +70,14 @@ func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, st, err := os.Stat(p) if err != nil { log.Warnw("CheckProvable Sector FAULT: sector file stat error", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "file", p, "err", err) - bad = append(bad, sector) + bad = append(bad, sector.ID) return nil } if sz != 0 { if st.Size() != int64(ssize)*sz { log.Warnw("CheckProvable Sector FAULT: sector file is wrong size", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "file", p, "size", st.Size(), "expectSize", int64(ssize)*sz) - bad = append(bad, sector) + bad = append(bad, sector.ID) return nil } } diff --git a/extern/sector-storage/ffiwrapper/basicfs/fs.go b/extern/sector-storage/ffiwrapper/basicfs/fs.go index 7ae303d9c..a833f728c 100644 --- a/extern/sector-storage/ffiwrapper/basicfs/fs.go +++ b/extern/sector-storage/ffiwrapper/basicfs/fs.go @@ -7,6 +7,7 @@ import ( "sync" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) @@ -23,7 +24,7 @@ type Provider struct { waitSector map[sectorFile]chan struct{} } -func (b *Provider) AcquireSector(ctx context.Context, id abi.SectorID, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) { +func (b *Provider) AcquireSector(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) { if err := os.Mkdir(filepath.Join(b.Root, storiface.FTUnsealed.String()), 0755); err != nil && !os.IsExist(err) { // nolint return storiface.SectorPaths{}, nil, err } @@ -37,7 +38,7 @@ func (b *Provider) AcquireSector(ctx context.Context, id abi.SectorID, existing done := func() {} out := storiface.SectorPaths{ - ID: id, + ID: id.ID, } for _, fileType := range storiface.PathTypes { @@ -49,10 +50,10 @@ func (b *Provider) AcquireSector(ctx context.Context, id abi.SectorID, existing if b.waitSector == nil { b.waitSector = map[sectorFile]chan struct{}{} } - ch, found := b.waitSector[sectorFile{id, fileType}] + ch, found := b.waitSector[sectorFile{id.ID, fileType}] if !found { ch = make(chan struct{}, 1) - b.waitSector[sectorFile{id, fileType}] = ch + b.waitSector[sectorFile{id.ID, fileType}] = ch } b.lk.Unlock() @@ -63,7 +64,7 @@ func (b *Provider) AcquireSector(ctx context.Context, id abi.SectorID, existing return storiface.SectorPaths{}, nil, ctx.Err() } - path := filepath.Join(b.Root, fileType.String(), storiface.SectorName(id)) + path := filepath.Join(b.Root, fileType.String(), storiface.SectorName(id.ID)) prevDone := done done = func() { diff --git a/extern/sector-storage/ffiwrapper/config.go b/extern/sector-storage/ffiwrapper/config.go deleted file mode 100644 index ca32b1191..000000000 --- a/extern/sector-storage/ffiwrapper/config.go +++ /dev/null @@ -1,34 +0,0 @@ -package ffiwrapper - -import ( - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-state-types/abi" -) - -type Config struct { - SealProofType abi.RegisteredSealProof - - _ struct{} // guard against nameless init -} - -func sizeFromConfig(cfg Config) (abi.SectorSize, error) { - return cfg.SealProofType.SectorSize() -} - -func SealProofTypeFromSectorSize(ssize abi.SectorSize) (abi.RegisteredSealProof, error) { - switch ssize { - case 2 << 10: - return abi.RegisteredSealProof_StackedDrg2KiBV1, nil - case 8 << 20: - return abi.RegisteredSealProof_StackedDrg8MiBV1, nil - case 512 << 20: - return abi.RegisteredSealProof_StackedDrg512MiBV1, nil - case 32 << 30: - return abi.RegisteredSealProof_StackedDrg32GiBV1, nil - case 64 << 30: - return abi.RegisteredSealProof_StackedDrg64GiBV1, nil - default: - return 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize) - } -} diff --git a/extern/sector-storage/ffiwrapper/sealer.go b/extern/sector-storage/ffiwrapper/sealer.go index c1b558d9a..39cb8fa1b 100644 --- a/extern/sector-storage/ffiwrapper/sealer.go +++ b/extern/sector-storage/ffiwrapper/sealer.go @@ -1,16 +1,12 @@ package ffiwrapper import ( - "github.com/filecoin-project/go-state-types/abi" logging "github.com/ipfs/go-log/v2" ) var log = logging.Logger("ffiwrapper") type Sealer struct { - sealProofType abi.RegisteredSealProof - ssize abi.SectorSize // a function of sealProofType and postProofType - sectors SectorProvider stopping chan struct{} } @@ -18,11 +14,3 @@ type Sealer struct { func (sb *Sealer) Stop() { close(sb.stopping) } - -func (sb *Sealer) SectorSize() abi.SectorSize { - return sb.ssize -} - -func (sb *Sealer) SealProofType() abi.RegisteredSealProof { - return sb.sealProofType -} diff --git a/extern/sector-storage/ffiwrapper/sealer_cgo.go b/extern/sector-storage/ffiwrapper/sealer_cgo.go index b48b0bfd5..0887bc329 100644 --- a/extern/sector-storage/ffiwrapper/sealer_cgo.go +++ b/extern/sector-storage/ffiwrapper/sealer_cgo.go @@ -27,16 +27,8 @@ import ( var _ Storage = &Sealer{} -func New(sectors SectorProvider, cfg *Config) (*Sealer, error) { - sectorSize, err := sizeFromConfig(*cfg) - if err != nil { - return nil, err - } - +func New(sectors SectorProvider) (*Sealer, error) { sb := &Sealer{ - sealProofType: cfg.SealProofType, - ssize: sectorSize, - sectors: sectors, stopping: make(chan struct{}), @@ -45,25 +37,29 @@ func New(sectors SectorProvider, cfg *Config) (*Sealer, error) { return sb, nil } -func (sb *Sealer) NewSector(ctx context.Context, sector abi.SectorID) error { +func (sb *Sealer) NewSector(ctx context.Context, sector storage.SectorRef) error { // TODO: Allocate the sector here instead of in addpiece return nil } -func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPieceSizes []abi.UnpaddedPieceSize, pieceSize abi.UnpaddedPieceSize, file storage.Data) (abi.PieceInfo, error) { +func (sb *Sealer) AddPiece(ctx context.Context, sector storage.SectorRef, existingPieceSizes []abi.UnpaddedPieceSize, pieceSize abi.UnpaddedPieceSize, file storage.Data) (abi.PieceInfo, error) { var offset abi.UnpaddedPieceSize for _, size := range existingPieceSizes { offset += size } - maxPieceSize := abi.PaddedPieceSize(sb.ssize) + ssize, err := sector.ProofType.SectorSize() + if err != nil { + return abi.PieceInfo{}, err + } + + maxPieceSize := abi.PaddedPieceSize(ssize) if offset.Padded()+pieceSize.Padded() > maxPieceSize { return abi.PieceInfo{}, xerrors.Errorf("can't add %d byte piece to sector %v with %d bytes of existing pieces", pieceSize, sector, offset) } - var err error var done func() var stagedFile *partialFile @@ -135,7 +131,7 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie break } - c, err := sb.pieceCid(buf[:read]) + c, err := sb.pieceCid(sector.ProofType, buf[:read]) if err != nil { return abi.PieceInfo{}, xerrors.Errorf("pieceCid error: %w", err) } @@ -162,7 +158,7 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie return pieceCids[0], nil } - pieceCID, err := ffi.GenerateUnsealedCID(sb.sealProofType, pieceCids) + pieceCID, err := ffi.GenerateUnsealedCID(sector.ProofType, pieceCids) if err != nil { return abi.PieceInfo{}, xerrors.Errorf("generate unsealed CID: %w", err) } @@ -178,13 +174,13 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie }, nil } -func (sb *Sealer) pieceCid(in []byte) (cid.Cid, error) { +func (sb *Sealer) pieceCid(spt abi.RegisteredSealProof, in []byte) (cid.Cid, error) { prf, werr, err := ToReadableFile(bytes.NewReader(in), int64(len(in))) if err != nil { return cid.Undef, xerrors.Errorf("getting tee reader pipe: %w", err) } - pieceCID, err := ffi.GeneratePieceCIDFromFile(sb.sealProofType, prf, abi.UnpaddedPieceSize(len(in))) + pieceCID, err := ffi.GeneratePieceCIDFromFile(spt, prf, abi.UnpaddedPieceSize(len(in))) if err != nil { return cid.Undef, xerrors.Errorf("generating piece commitment: %w", err) } @@ -194,8 +190,12 @@ func (sb *Sealer) pieceCid(in []byte) (cid.Cid, error) { return pieceCID, werr() } -func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error { - maxPieceSize := abi.PaddedPieceSize(sb.ssize) +func (sb *Sealer) UnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error { + ssize, err := sector.ProofType.SectorSize() + if err != nil { + return err + } + maxPieceSize := abi.PaddedPieceSize(ssize) // try finding existing unsealedPath, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, storiface.FTNone, storiface.PathStorage) @@ -317,12 +317,12 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s // // TODO: This may be possible to do in parallel - err = ffi.UnsealRange(sb.sealProofType, + err = ffi.UnsealRange(sector.ProofType, srcPaths.Cache, sealed, opw, - sector.Number, - sector.Miner, + sector.ID.Number, + sector.ID.Miner, randomness, commd, uint64(at.Unpadded()), @@ -356,14 +356,18 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s return nil } -func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { +func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { path, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, storiface.FTNone, storiface.PathStorage) if err != nil { return false, xerrors.Errorf("acquire unsealed sector path: %w", err) } defer done() - maxPieceSize := abi.PaddedPieceSize(sb.ssize) + ssize, err := sector.ProofType.SectorSize() + if err != nil { + return false, err + } + maxPieceSize := abi.PaddedPieceSize(ssize) pf, err := openPartialFile(maxPieceSize, path.Unsealed) if err != nil { @@ -408,7 +412,7 @@ func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector abi.Se return true, nil } -func (sb *Sealer) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) { +func (sb *Sealer) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) { paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, storiface.FTSealed|storiface.FTCache, storiface.PathSealing) if err != nil { return nil, xerrors.Errorf("acquiring sector paths: %w", err) @@ -443,29 +447,33 @@ func (sb *Sealer) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke for _, piece := range pieces { sum += piece.Size.Unpadded() } - ussize := abi.PaddedPieceSize(sb.ssize).Unpadded() + ssize, err := sector.ProofType.SectorSize() + if err != nil { + return nil, err + } + ussize := abi.PaddedPieceSize(ssize).Unpadded() if sum != ussize { return nil, xerrors.Errorf("aggregated piece sizes don't match sector size: %d != %d (%d)", sum, ussize, int64(ussize-sum)) } // TODO: context cancellation respect p1o, err := ffi.SealPreCommitPhase1( - sb.sealProofType, + sector.ProofType, paths.Cache, paths.Unsealed, paths.Sealed, - sector.Number, - sector.Miner, + sector.ID.Number, + sector.ID.Miner, ticket, pieces, ) if err != nil { - return nil, xerrors.Errorf("presealing sector %d (%s): %w", sector.Number, paths.Unsealed, err) + return nil, xerrors.Errorf("presealing sector %d (%s): %w", sector.ID.Number, paths.Unsealed, err) } return p1o, nil } -func (sb *Sealer) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.PreCommit1Out) (storage.SectorCids, error) { +func (sb *Sealer) SealPreCommit2(ctx context.Context, sector storage.SectorRef, phase1Out storage.PreCommit1Out) (storage.SectorCids, error) { paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, 0, storiface.PathSealing) if err != nil { return storage.SectorCids{}, xerrors.Errorf("acquiring sector paths: %w", err) @@ -474,7 +482,7 @@ func (sb *Sealer) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase sealedCID, unsealedCID, err := ffi.SealPreCommitPhase2(phase1Out, paths.Cache, paths.Sealed) if err != nil { - return storage.SectorCids{}, xerrors.Errorf("presealing sector %d (%s): %w", sector.Number, paths.Unsealed, err) + return storage.SectorCids{}, xerrors.Errorf("presealing sector %d (%s): %w", sector.ID.Number, paths.Unsealed, err) } return storage.SectorCids{ @@ -483,40 +491,45 @@ func (sb *Sealer) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase }, nil } -func (sb *Sealer) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) { +func (sb *Sealer) SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) { paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, 0, storiface.PathSealing) if err != nil { return nil, xerrors.Errorf("acquire sector paths: %w", err) } defer done() output, err := ffi.SealCommitPhase1( - sb.sealProofType, + sector.ProofType, cids.Sealed, cids.Unsealed, paths.Cache, paths.Sealed, - sector.Number, - sector.Miner, + sector.ID.Number, + sector.ID.Miner, ticket, seed, pieces, ) if err != nil { log.Warn("StandaloneSealCommit error: ", err) - log.Warnf("num:%d tkt:%v seed:%v, pi:%v sealedCID:%v, unsealedCID:%v", sector.Number, ticket, seed, pieces, cids.Sealed, cids.Unsealed) + log.Warnf("num:%d tkt:%v seed:%v, pi:%v sealedCID:%v, unsealedCID:%v", sector.ID.Number, ticket, seed, pieces, cids.Sealed, cids.Unsealed) return nil, xerrors.Errorf("StandaloneSealCommit: %w", err) } return output, nil } -func (sb *Sealer) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.Commit1Out) (storage.Proof, error) { - return ffi.SealCommitPhase2(phase1Out, sector.Number, sector.Miner) +func (sb *Sealer) SealCommit2(ctx context.Context, sector storage.SectorRef, phase1Out storage.Commit1Out) (storage.Proof, error) { + return ffi.SealCommitPhase2(phase1Out, sector.ID.Number, sector.ID.Miner) } -func (sb *Sealer) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error { +func (sb *Sealer) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) error { + ssize, err := sector.ProofType.SectorSize() + if err != nil { + return err + } + maxPieceSize := abi.PaddedPieceSize(ssize) + if len(keepUnsealed) > 0 { - maxPieceSize := abi.PaddedPieceSize(sb.ssize) sr := pieceRun(0, maxPieceSize) @@ -580,10 +593,10 @@ func (sb *Sealer) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU } defer done() - return ffi.ClearCache(uint64(sb.ssize), paths.Cache) + return ffi.ClearCache(uint64(ssize), paths.Cache) } -func (sb *Sealer) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error { +func (sb *Sealer) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) error { // This call is meant to mark storage as 'freeable'. Given that unsealing is // very expensive, we don't remove data as soon as we can - instead we only // do that when we don't have free space for data that really needs it @@ -593,7 +606,7 @@ func (sb *Sealer) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safe return xerrors.Errorf("not supported at this layer") } -func (sb *Sealer) Remove(ctx context.Context, sector abi.SectorID) error { +func (sb *Sealer) Remove(ctx context.Context, sector storage.SectorRef) error { return xerrors.Errorf("not supported at this layer") // happens in localworker } diff --git a/extern/sector-storage/ffiwrapper/sealer_test.go b/extern/sector-storage/ffiwrapper/sealer_test.go index 5ae5cec67..686ef847b 100644 --- a/extern/sector-storage/ffiwrapper/sealer_test.go +++ b/extern/sector-storage/ffiwrapper/sealer_test.go @@ -43,7 +43,7 @@ var sectorSize, _ = sealProofType.SectorSize() var sealRand = abi.SealRandomness{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2} type seal struct { - id abi.SectorID + ref storage.SectorRef cids storage.SectorCids pi abi.PieceInfo ticket abi.SealRandomness @@ -56,12 +56,12 @@ func data(sn abi.SectorNumber, dlen abi.UnpaddedPieceSize) io.Reader { ) } -func (s *seal) precommit(t *testing.T, sb *Sealer, id abi.SectorID, done func()) { +func (s *seal) precommit(t *testing.T, sb *Sealer, id storage.SectorRef, done func()) { defer done() dlen := abi.PaddedPieceSize(sectorSize).Unpadded() var err error - r := data(id.Number, dlen) + r := data(id.ID.Number, dlen) s.pi, err = sb.AddPiece(context.TODO(), id, []abi.UnpaddedPieceSize{}, dlen, r) if err != nil { t.Fatalf("%+v", err) @@ -84,19 +84,19 @@ func (s *seal) commit(t *testing.T, sb *Sealer, done func()) { defer done() seed := abi.InteractiveSealRandomness{0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9, 8, 7, 6, 45, 3, 2, 1, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9} - pc1, err := sb.SealCommit1(context.TODO(), s.id, s.ticket, seed, []abi.PieceInfo{s.pi}, s.cids) + pc1, err := sb.SealCommit1(context.TODO(), s.ref, s.ticket, seed, []abi.PieceInfo{s.pi}, s.cids) if err != nil { t.Fatalf("%+v", err) } - proof, err := sb.SealCommit2(context.TODO(), s.id, pc1) + proof, err := sb.SealCommit2(context.TODO(), s.ref, pc1) if err != nil { t.Fatalf("%+v", err) } ok, err := ProofVerifier.VerifySeal(proof2.SealVerifyInfo{ - SectorID: s.id, + SectorID: s.ref.ID, SealedCID: s.cids.Sealed, - SealProof: sealProofType, + SealProof: s.ref.ProofType, Proof: proof, Randomness: s.ticket, InteractiveRandomness: seed, @@ -111,7 +111,7 @@ func (s *seal) commit(t *testing.T, sb *Sealer, done func()) { } } -func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si abi.SectorID, done func()) { +func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si storage.SectorRef, done func()) { defer done() var b bytes.Buffer @@ -120,7 +120,7 @@ func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si abi.Sec t.Fatal(err) } - expect, _ := ioutil.ReadAll(data(si.Number, 1016)) + expect, _ := ioutil.ReadAll(data(si.ID.Number, 1016)) if !bytes.Equal(b.Bytes(), expect) { t.Fatal("read wrong bytes") } @@ -150,7 +150,7 @@ func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si abi.Sec t.Fatal(err) } - expect, _ = ioutil.ReadAll(data(si.Number, 1016)) + expect, _ = ioutil.ReadAll(data(si.ID.Number, 1016)) require.Equal(t, expect, b.Bytes()) b.Reset() @@ -174,13 +174,13 @@ func post(t *testing.T, sealer *Sealer, skipped []abi.SectorID, seals ...seal) { sis := make([]proof2.SectorInfo, len(seals)) for i, s := range seals { sis[i] = proof2.SectorInfo{ - SealProof: sealProofType, - SectorNumber: s.id.Number, + SealProof: s.ref.ProofType, + SectorNumber: s.ref.ID.Number, SealedCID: s.cids.Sealed, } } - proofs, skp, err := sealer.GenerateWindowPoSt(context.TODO(), seals[0].id.Miner, sis, randomness) + proofs, skp, err := sealer.GenerateWindowPoSt(context.TODO(), seals[0].ref.ID.Miner, sis, randomness) if len(skipped) > 0 { require.Error(t, err) require.EqualValues(t, skipped, skp) @@ -195,7 +195,7 @@ func post(t *testing.T, sealer *Sealer, skipped []abi.SectorID, seals ...seal) { Randomness: randomness, Proofs: proofs, ChallengedSectors: sis, - Prover: seals[0].id.Miner, + Prover: seals[0].ref.ID.Miner, }) if err != nil { t.Fatalf("%+v", err) @@ -205,7 +205,7 @@ func post(t *testing.T, sealer *Sealer, skipped []abi.SectorID, seals ...seal) { } } -func corrupt(t *testing.T, sealer *Sealer, id abi.SectorID) { +func corrupt(t *testing.T, sealer *Sealer, id storage.SectorRef) { paths, done, err := sealer.sectors.AcquireSector(context.Background(), id, storiface.FTSealed, 0, storiface.PathStorage) require.NoError(t, err) defer done() @@ -264,14 +264,10 @@ func TestSealAndVerify(t *testing.T) { } miner := abi.ActorID(123) - cfg := &Config{ - SealProofType: sealProofType, - } - sp := &basicfs.Provider{ Root: cdir, } - sb, err := New(sp, cfg) + sb, err := New(sp) if err != nil { t.Fatalf("%+v", err) } @@ -286,9 +282,12 @@ func TestSealAndVerify(t *testing.T) { } defer cleanup() - si := abi.SectorID{Miner: miner, Number: 1} + si := storage.SectorRef{ + ID: abi.SectorID{Miner: miner, Number: 1}, + ProofType: sealProofType, + } - s := seal{id: si} + s := seal{ref: si} start := time.Now() @@ -338,13 +337,10 @@ func TestSealPoStNoCommit(t *testing.T) { miner := abi.ActorID(123) - cfg := &Config{ - SealProofType: sealProofType, - } sp := &basicfs.Provider{ Root: dir, } - sb, err := New(sp, cfg) + sb, err := New(sp) if err != nil { t.Fatalf("%+v", err) } @@ -360,9 +356,12 @@ func TestSealPoStNoCommit(t *testing.T) { } defer cleanup() - si := abi.SectorID{Miner: miner, Number: 1} + si := storage.SectorRef{ + ID: abi.SectorID{Miner: miner, Number: 1}, + ProofType: sealProofType, + } - s := seal{id: si} + s := seal{ref: si} start := time.Now() @@ -403,13 +402,10 @@ func TestSealAndVerify3(t *testing.T) { miner := abi.ActorID(123) - cfg := &Config{ - SealProofType: sealProofType, - } sp := &basicfs.Provider{ Root: dir, } - sb, err := New(sp, cfg) + sb, err := New(sp) if err != nil { t.Fatalf("%+v", err) } @@ -424,13 +420,22 @@ func TestSealAndVerify3(t *testing.T) { var wg sync.WaitGroup - si1 := abi.SectorID{Miner: miner, Number: 1} - si2 := abi.SectorID{Miner: miner, Number: 2} - si3 := abi.SectorID{Miner: miner, Number: 3} + si1 := storage.SectorRef{ + ID: abi.SectorID{Miner: miner, Number: 1}, + ProofType: sealProofType, + } + si2 := storage.SectorRef{ + ID: abi.SectorID{Miner: miner, Number: 2}, + ProofType: sealProofType, + } + si3 := storage.SectorRef{ + ID: abi.SectorID{Miner: miner, Number: 3}, + ProofType: sealProofType, + } - s1 := seal{id: si1} - s2 := seal{id: si2} - s3 := seal{id: si3} + s1 := seal{ref: si1} + s2 := seal{ref: si2} + s3 := seal{ref: si3} wg.Add(3) go s1.precommit(t, sb, si1, wg.Done) //nolint: staticcheck @@ -451,7 +456,7 @@ func TestSealAndVerify3(t *testing.T) { corrupt(t, sb, si1) corrupt(t, sb, si2) - post(t, sb, []abi.SectorID{si1, si2}, s1, s2, s3) + post(t, sb, []abi.SectorID{si1.ID, si2.ID}, s1, s2, s3) } func BenchmarkWriteWithAlignment(b *testing.B) { diff --git a/extern/sector-storage/ffiwrapper/types.go b/extern/sector-storage/ffiwrapper/types.go index b67f9c595..b7e96636a 100644 --- a/extern/sector-storage/ffiwrapper/types.go +++ b/extern/sector-storage/ffiwrapper/types.go @@ -29,8 +29,8 @@ type Storage interface { storage.Prover StorageSealer - UnsealPiece(ctx context.Context, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error - ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) + UnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error + ReadPiece(ctx context.Context, writer io.Writer, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) } type Verifier interface { @@ -44,7 +44,7 @@ type Verifier interface { type SectorProvider interface { // * returns storiface.ErrSectorNotFound if a requested existing sector doesn't exist // * returns an error when allocate is set, and existing isn't, and the sector exists - AcquireSector(ctx context.Context, id abi.SectorID, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) + AcquireSector(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) } var _ SectorProvider = &basicfs.Provider{} diff --git a/extern/sector-storage/ffiwrapper/verifier_cgo.go b/extern/sector-storage/ffiwrapper/verifier_cgo.go index 9dab7103e..15e0e6ab3 100644 --- a/extern/sector-storage/ffiwrapper/verifier_cgo.go +++ b/extern/sector-storage/ffiwrapper/verifier_cgo.go @@ -11,6 +11,7 @@ import ( ffi "github.com/filecoin-project/filecoin-ffi" "github.com/filecoin-project/go-state-types/abi" proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) @@ -74,12 +75,15 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn continue } - sid := abi.SectorID{Miner: mid, Number: s.SectorNumber} + sid := storage.SectorRef{ + ID: abi.SectorID{Miner: mid, Number: s.SectorNumber}, + ProofType: s.SealProof, + } paths, d, err := sb.sectors.AcquireSector(ctx, sid, storiface.FTCache|storiface.FTSealed, 0, storiface.PathStorage) if err != nil { - log.Warnw("failed to acquire sector, skipping", "sector", sid, "error", err) - skipped = append(skipped, sid) + log.Warnw("failed to acquire sector, skipping", "sector", sid.ID, "error", err) + skipped = append(skipped, sid.ID) continue } doneFuncs = append(doneFuncs, d) diff --git a/extern/sector-storage/manager.go b/extern/sector-storage/manager.go index 9a41dcd44..52e079d75 100644 --- a/extern/sector-storage/manager.go +++ b/extern/sector-storage/manager.go @@ -47,9 +47,7 @@ type Worker interface { } type SectorManager interface { - SectorSize() abi.SectorSize - - ReadPiece(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error + ReadPiece(context.Context, io.Writer, storage.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error ffiwrapper.StorageSealer storage.Prover @@ -61,8 +59,6 @@ type WorkerID uuid.UUID // worker session UUID var ClosedWorkerID = uuid.UUID{} type Manager struct { - scfg *ffiwrapper.Config - ls stores.LocalStorage storage *stores.Remote localStore *stores.Local @@ -105,13 +101,13 @@ type StorageAuth http.Header type WorkerStateStore *statestore.StateStore type ManagerStateStore *statestore.StateStore -func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg *ffiwrapper.Config, sc SealerConfig, urls URLs, sa StorageAuth, wss WorkerStateStore, mss ManagerStateStore) (*Manager, error) { +func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, sc SealerConfig, urls URLs, sa StorageAuth, wss WorkerStateStore, mss ManagerStateStore) (*Manager, error) { lstor, err := stores.NewLocal(ctx, ls, si, urls) if err != nil { return nil, err } - prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor, index: si, spt: cfg.SealProofType}, cfg) + prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor, index: si}) if err != nil { return nil, xerrors.Errorf("creating prover instance: %w", err) } @@ -119,15 +115,13 @@ func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg stor := stores.NewRemote(lstor, si, http.Header(sa), sc.ParallelFetchLimit) m := &Manager{ - scfg: cfg, - ls: ls, storage: stor, localStore: lstor, remoteHnd: &stores.FetchHandler{Local: lstor}, index: si, - sched: newScheduler(cfg.SealProofType), + sched: newScheduler(), Prover: prover, @@ -162,7 +156,6 @@ func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg } err = m.AddWorker(ctx, NewLocalWorker(WorkerConfig{ - SealProof: cfg.SealProofType, TaskTypes: localTasks, }, stor, lstor, si, m, wss)) if err != nil { @@ -198,46 +191,43 @@ func (m *Manager) ServeHTTP(w http.ResponseWriter, r *http.Request) { m.remoteHnd.ServeHTTP(w, r) } -func (m *Manager) SectorSize() abi.SectorSize { - sz, _ := m.scfg.SealProofType.SectorSize() - return sz -} - func schedNop(context.Context, Worker) error { return nil } -func (m *Manager) schedFetch(sector abi.SectorID, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) func(context.Context, Worker) error { +func (m *Manager) schedFetch(sector storage.SectorRef, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) func(context.Context, Worker) error { return func(ctx context.Context, worker Worker) error { _, err := m.waitSimpleCall(ctx)(worker.Fetch(ctx, sector, ft, ptype, am)) return err } } -func (m *Manager) readPiece(sink io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, rok *bool) func(ctx context.Context, w Worker) error { +func (m *Manager) readPiece(sink io.Writer, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, rok *bool) func(ctx context.Context, w Worker) error { return func(ctx context.Context, w Worker) error { r, err := m.waitSimpleCall(ctx)(w.ReadPiece(ctx, sink, sector, offset, size)) if err != nil { return err } - *rok = r.(bool) + if r != nil { + *rok = r.(bool) + } return nil } } -func (m *Manager) tryReadUnsealedPiece(ctx context.Context, sink io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (foundUnsealed bool, readOk bool, selector WorkerSelector, returnErr error) { +func (m *Manager) tryReadUnsealedPiece(ctx context.Context, sink io.Writer, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (foundUnsealed bool, readOk bool, selector WorkerSelector, returnErr error) { // acquire a lock purely for reading unsealed sectors ctx, cancel := context.WithCancel(ctx) defer cancel() - if err := m.index.StorageLock(ctx, sector, storiface.FTUnsealed, storiface.FTNone); err != nil { + if err := m.index.StorageLock(ctx, sector.ID, storiface.FTUnsealed, storiface.FTNone); err != nil { returnErr = xerrors.Errorf("acquiring read sector lock: %w", err) return } // passing 0 spt because we only need it when allowFetch is true - best, err := m.index.StorageFindSector(ctx, sector, storiface.FTUnsealed, 0, false) + best, err := m.index.StorageFindSector(ctx, sector.ID, storiface.FTUnsealed, 0, false) if err != nil { returnErr = xerrors.Errorf("read piece: checking for already existing unsealed sector: %w", err) return @@ -247,7 +237,7 @@ func (m *Manager) tryReadUnsealedPiece(ctx context.Context, sink io.Writer, sect if foundUnsealed { // append to existing // There is unsealed sector, see if we can read from it - selector = newExistingSelector(m.index, sector, storiface.FTUnsealed, false) + selector = newExistingSelector(m.index, sector.ID, storiface.FTUnsealed, false) err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, m.schedFetch(sector, storiface.FTUnsealed, storiface.PathSealing, storiface.AcquireMove), m.readPiece(sink, sector, offset, size, &readOk)) @@ -260,7 +250,7 @@ func (m *Manager) tryReadUnsealedPiece(ctx context.Context, sink io.Writer, sect return } -func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) error { +func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) error { foundUnsealed, readOk, selector, err := m.tryReadUnsealedPiece(ctx, sink, sector, offset, size) if err != nil { return err @@ -271,7 +261,7 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect ctx, cancel := context.WithCancel(ctx) defer cancel() - if err := m.index.StorageLock(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTUnsealed); err != nil { + if err := m.index.StorageLock(ctx, sector.ID, storiface.FTSealed|storiface.FTCache, storiface.FTUnsealed); err != nil { return xerrors.Errorf("acquiring unseal sector lock: %w", err) } @@ -300,7 +290,7 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect return err } - selector = newExistingSelector(m.index, sector, storiface.FTUnsealed, false) + selector = newExistingSelector(m.index, sector.ID, storiface.FTUnsealed, false) err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, m.schedFetch(sector, storiface.FTUnsealed, storiface.PathSealing, storiface.AcquireMove), m.readPiece(sink, sector, offset, size, &readOk)) @@ -315,16 +305,16 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect return nil } -func (m *Manager) NewSector(ctx context.Context, sector abi.SectorID) error { +func (m *Manager) NewSector(ctx context.Context, sector storage.SectorRef) error { log.Warnf("stub NewSector") return nil } -func (m *Manager) AddPiece(ctx context.Context, sector abi.SectorID, existingPieces []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) { +func (m *Manager) AddPiece(ctx context.Context, sector storage.SectorRef, existingPieces []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) { ctx, cancel := context.WithCancel(ctx) defer cancel() - if err := m.index.StorageLock(ctx, sector, storiface.FTNone, storiface.FTUnsealed); err != nil { + if err := m.index.StorageLock(ctx, sector.ID, storiface.FTNone, storiface.FTUnsealed); err != nil { return abi.PieceInfo{}, xerrors.Errorf("acquiring sector lock: %w", err) } @@ -333,7 +323,7 @@ func (m *Manager) AddPiece(ctx context.Context, sector abi.SectorID, existingPie if len(existingPieces) == 0 { // new selector = newAllocSelector(m.index, storiface.FTUnsealed, storiface.PathSealing) } else { // use existing - selector = newExistingSelector(m.index, sector, storiface.FTUnsealed, false) + selector = newExistingSelector(m.index, sector.ID, storiface.FTUnsealed, false) } var out abi.PieceInfo @@ -342,14 +332,16 @@ func (m *Manager) AddPiece(ctx context.Context, sector abi.SectorID, existingPie if err != nil { return err } - out = p.(abi.PieceInfo) + if p != nil { + out = p.(abi.PieceInfo) + } return nil }) return out, err } -func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) { +func (m *Manager) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) { ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -366,7 +358,9 @@ func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke waitErr = werr return } - out = p.(storage.PreCommit1Out) + if p != nil { + out = p.(storage.PreCommit1Out) + } } if wait { // already in progress @@ -374,7 +368,7 @@ func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke return out, waitErr } - if err := m.index.StorageLock(ctx, sector, storiface.FTUnsealed, storiface.FTSealed|storiface.FTCache); err != nil { + if err := m.index.StorageLock(ctx, sector.ID, storiface.FTUnsealed, storiface.FTSealed|storiface.FTCache); err != nil { return nil, xerrors.Errorf("acquiring sector lock: %w", err) } @@ -383,7 +377,7 @@ func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke selector := newAllocSelector(m.index, storiface.FTCache|storiface.FTSealed, storiface.PathSealing) err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit1, selector, m.schedFetch(sector, storiface.FTUnsealed, storiface.PathSealing, storiface.AcquireMove), func(ctx context.Context, w Worker) error { - err := m.startWork(ctx, wk)(w.SealPreCommit1(ctx, sector, ticket, pieces)) + err := m.startWork(ctx, w, wk)(w.SealPreCommit1(ctx, sector, ticket, pieces)) if err != nil { return err } @@ -398,7 +392,7 @@ func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke return out, waitErr } -func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.PreCommit1Out) (out storage.SectorCids, err error) { +func (m *Manager) SealPreCommit2(ctx context.Context, sector storage.SectorRef, phase1Out storage.PreCommit1Out) (out storage.SectorCids, err error) { ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -415,7 +409,9 @@ func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase waitErr = werr return } - out = p.(storage.SectorCids) + if p != nil { + out = p.(storage.SectorCids) + } } if wait { // already in progress @@ -423,14 +419,14 @@ func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase return out, waitErr } - if err := m.index.StorageLock(ctx, sector, storiface.FTSealed, storiface.FTCache); err != nil { + if err := m.index.StorageLock(ctx, sector.ID, storiface.FTSealed, storiface.FTCache); err != nil { return storage.SectorCids{}, xerrors.Errorf("acquiring sector lock: %w", err) } - selector := newExistingSelector(m.index, sector, storiface.FTCache|storiface.FTSealed, true) + selector := newExistingSelector(m.index, sector.ID, storiface.FTCache|storiface.FTSealed, true) err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit2, selector, m.schedFetch(sector, storiface.FTCache|storiface.FTSealed, storiface.PathSealing, storiface.AcquireMove), func(ctx context.Context, w Worker) error { - err := m.startWork(ctx, wk)(w.SealPreCommit2(ctx, sector, phase1Out)) + err := m.startWork(ctx, w, wk)(w.SealPreCommit2(ctx, sector, phase1Out)) if err != nil { return err } @@ -445,7 +441,7 @@ func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase return out, waitErr } -func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (out storage.Commit1Out, err error) { +func (m *Manager) SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (out storage.Commit1Out, err error) { ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -462,7 +458,9 @@ func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket a waitErr = werr return } - out = p.(storage.Commit1Out) + if p != nil { + out = p.(storage.Commit1Out) + } } if wait { // already in progress @@ -470,17 +468,17 @@ func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket a return out, waitErr } - if err := m.index.StorageLock(ctx, sector, storiface.FTSealed, storiface.FTCache); err != nil { + if err := m.index.StorageLock(ctx, sector.ID, storiface.FTSealed, storiface.FTCache); err != nil { return storage.Commit1Out{}, xerrors.Errorf("acquiring sector lock: %w", err) } // NOTE: We set allowFetch to false in so that we always execute on a worker // with direct access to the data. We want to do that because this step is // generally very cheap / fast, and transferring data is not worth the effort - selector := newExistingSelector(m.index, sector, storiface.FTCache|storiface.FTSealed, false) + selector := newExistingSelector(m.index, sector.ID, storiface.FTCache|storiface.FTSealed, false) err = m.sched.Schedule(ctx, sector, sealtasks.TTCommit1, selector, m.schedFetch(sector, storiface.FTCache|storiface.FTSealed, storiface.PathSealing, storiface.AcquireMove), func(ctx context.Context, w Worker) error { - err := m.startWork(ctx, wk)(w.SealCommit1(ctx, sector, ticket, seed, pieces, cids)) + err := m.startWork(ctx, w, wk)(w.SealCommit1(ctx, sector, ticket, seed, pieces, cids)) if err != nil { return err } @@ -495,7 +493,7 @@ func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket a return out, waitErr } -func (m *Manager) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.Commit1Out) (out storage.Proof, err error) { +func (m *Manager) SealCommit2(ctx context.Context, sector storage.SectorRef, phase1Out storage.Commit1Out) (out storage.Proof, err error) { wk, wait, cancel, err := m.getWork(ctx, sealtasks.TTCommit2, sector, phase1Out) if err != nil { return storage.Proof{}, xerrors.Errorf("getWork: %w", err) @@ -509,7 +507,9 @@ func (m *Manager) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Ou waitErr = werr return } - out = p.(storage.Proof) + if p != nil { + out = p.(storage.Proof) + } } if wait { // already in progress @@ -520,7 +520,7 @@ func (m *Manager) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Ou selector := newTaskSelector() err = m.sched.Schedule(ctx, sector, sealtasks.TTCommit2, selector, schedNop, func(ctx context.Context, w Worker) error { - err := m.startWork(ctx, wk)(w.SealCommit2(ctx, sector, phase1Out)) + err := m.startWork(ctx, w, wk)(w.SealCommit2(ctx, sector, phase1Out)) if err != nil { return err } @@ -536,17 +536,17 @@ func (m *Manager) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Ou return out, waitErr } -func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error { +func (m *Manager) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) error { ctx, cancel := context.WithCancel(ctx) defer cancel() - if err := m.index.StorageLock(ctx, sector, storiface.FTNone, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache); err != nil { + if err := m.index.StorageLock(ctx, sector.ID, storiface.FTNone, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache); err != nil { return xerrors.Errorf("acquiring sector lock: %w", err) } unsealed := storiface.FTUnsealed { - unsealedStores, err := m.index.StorageFindSector(ctx, sector, storiface.FTUnsealed, 0, false) + unsealedStores, err := m.index.StorageFindSector(ctx, sector.ID, storiface.FTUnsealed, 0, false) if err != nil { return xerrors.Errorf("finding unsealed sector: %w", err) } @@ -556,7 +556,7 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU } } - selector := newExistingSelector(m.index, sector, storiface.FTCache|storiface.FTSealed, false) + selector := newExistingSelector(m.index, sector.ID, storiface.FTCache|storiface.FTSealed, false) err := m.sched.Schedule(ctx, sector, sealtasks.TTFinalize, selector, m.schedFetch(sector, storiface.FTCache|storiface.FTSealed|unsealed, storiface.PathSealing, storiface.AcquireMove), @@ -589,75 +589,75 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU return nil } -func (m *Manager) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error { +func (m *Manager) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) error { log.Warnw("ReleaseUnsealed todo") return nil } -func (m *Manager) Remove(ctx context.Context, sector abi.SectorID) error { +func (m *Manager) Remove(ctx context.Context, sector storage.SectorRef) error { ctx, cancel := context.WithCancel(ctx) defer cancel() - if err := m.index.StorageLock(ctx, sector, storiface.FTNone, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache); err != nil { + if err := m.index.StorageLock(ctx, sector.ID, storiface.FTNone, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache); err != nil { return xerrors.Errorf("acquiring sector lock: %w", err) } var err error - if rerr := m.storage.Remove(ctx, sector, storiface.FTSealed, true); rerr != nil { + if rerr := m.storage.Remove(ctx, sector.ID, storiface.FTSealed, true); rerr != nil { err = multierror.Append(err, xerrors.Errorf("removing sector (sealed): %w", rerr)) } - if rerr := m.storage.Remove(ctx, sector, storiface.FTCache, true); rerr != nil { + if rerr := m.storage.Remove(ctx, sector.ID, storiface.FTCache, true); rerr != nil { err = multierror.Append(err, xerrors.Errorf("removing sector (cache): %w", rerr)) } - if rerr := m.storage.Remove(ctx, sector, storiface.FTUnsealed, true); rerr != nil { + if rerr := m.storage.Remove(ctx, sector.ID, storiface.FTUnsealed, true); rerr != nil { err = multierror.Append(err, xerrors.Errorf("removing sector (unsealed): %w", rerr)) } return err } -func (m *Manager) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err string) error { +func (m *Manager) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error { return m.returnResult(callID, pi, err) } -func (m *Manager) ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err string) error { +func (m *Manager) ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err *storiface.CallError) error { return m.returnResult(callID, p1o, err) } -func (m *Manager) ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err string) error { +func (m *Manager) ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err *storiface.CallError) error { return m.returnResult(callID, sealed, err) } -func (m *Manager) ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err string) error { +func (m *Manager) ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err *storiface.CallError) error { return m.returnResult(callID, out, err) } -func (m *Manager) ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err string) error { +func (m *Manager) ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err *storiface.CallError) error { return m.returnResult(callID, proof, err) } -func (m *Manager) ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err string) error { +func (m *Manager) ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { return m.returnResult(callID, nil, err) } -func (m *Manager) ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err string) error { +func (m *Manager) ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { return m.returnResult(callID, nil, err) } -func (m *Manager) ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err string) error { +func (m *Manager) ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { return m.returnResult(callID, nil, err) } -func (m *Manager) ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err string) error { +func (m *Manager) ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { return m.returnResult(callID, nil, err) } -func (m *Manager) ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err string) error { +func (m *Manager) ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err *storiface.CallError) error { return m.returnResult(callID, ok, err) } -func (m *Manager) ReturnFetch(ctx context.Context, callID storiface.CallID, err string) error { +func (m *Manager) ReturnFetch(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { return m.returnResult(callID, nil, err) } @@ -688,7 +688,48 @@ func (m *Manager) SchedDiag(ctx context.Context, doSched bool) (interface{}, err } } - return m.sched.Info(ctx) + si, err := m.sched.Info(ctx) + if err != nil { + return nil, err + } + + type SchedInfo interface{} + i := struct { + SchedInfo + + ReturnedWork []string + Waiting []string + + CallToWork map[string]string + + EarlyRet []string + }{ + SchedInfo: si, + + CallToWork: map[string]string{}, + } + + m.workLk.Lock() + + for w := range m.results { + i.ReturnedWork = append(i.ReturnedWork, w.String()) + } + + for id := range m.callRes { + i.EarlyRet = append(i.EarlyRet, id.String()) + } + + for w := range m.waitRes { + i.Waiting = append(i.Waiting, w.String()) + } + + for c, w := range m.callToWork { + i.CallToWork[c.String()] = w.String() + } + + m.workLk.Unlock() + + return i, nil } func (m *Manager) Close(ctx context.Context) error { diff --git a/extern/sector-storage/manager_calltracker.go b/extern/sector-storage/manager_calltracker.go index 8a1c1e4f9..e2f801303 100644 --- a/extern/sector-storage/manager_calltracker.go +++ b/extern/sector-storage/manager_calltracker.go @@ -5,9 +5,9 @@ import ( "crypto/sha256" "encoding/hex" "encoding/json" - "errors" "fmt" "os" + "time" "golang.org/x/xerrors" @@ -41,6 +41,9 @@ type WorkState struct { WorkerCall storiface.CallID // Set when entering wsRunning WorkError string // Status = wsDone, set when failed to start work + + WorkerHostname string // hostname of last worker handling this job + StartTime int64 // unix seconds } func newWorkID(method sealtasks.TaskType, params ...interface{}) (WorkID, error) { @@ -85,8 +88,7 @@ func (m *Manager) setupWorkTracker() { log.Errorf("cleannig up work state for %s", wid) } case wsDone: - // realistically this shouldn't ever happen as we return results - // immediately after getting them + // can happen after restart, abandoning work, and another restart log.Warnf("dropping done work, no result, wid %s", wid) if err := m.work.Get(wid).End(); err != nil { @@ -167,8 +169,16 @@ func (m *Manager) getWork(ctx context.Context, method sealtasks.TaskType, params }, nil } -func (m *Manager) startWork(ctx context.Context, wk WorkID) func(callID storiface.CallID, err error) error { +func (m *Manager) startWork(ctx context.Context, w Worker, wk WorkID) func(callID storiface.CallID, err error) error { return func(callID storiface.CallID, err error) error { + var hostname string + info, ierr := w.Info(ctx) + if ierr != nil { + hostname = "[err]" + } else { + hostname = info.Hostname + } + m.workLk.Lock() defer m.workLk.Unlock() @@ -194,6 +204,8 @@ func (m *Manager) startWork(ctx context.Context, wk WorkID) func(callID storifac ws.Status = wsRunning } ws.WorkerCall = callID + ws.WorkerHostname = hostname + ws.StartTime = time.Now().Unix() return nil }) if err != nil { @@ -337,15 +349,12 @@ func (m *Manager) waitCall(ctx context.Context, callID storiface.CallID) (interf } } -func (m *Manager) returnResult(callID storiface.CallID, r interface{}, serr string) error { - var err error - if serr != "" { - err = errors.New(serr) - } - +func (m *Manager) returnResult(callID storiface.CallID, r interface{}, cerr *storiface.CallError) error { res := result{ - r: r, - err: err, + r: r, + } + if cerr != nil { + res.err = cerr } m.sched.workTracker.onDone(callID) @@ -379,6 +388,20 @@ func (m *Manager) returnResult(callID storiface.CallID, r interface{}, serr stri m.results[wid] = res + err := m.work.Get(wid).Mutate(func(ws *WorkState) error { + ws.Status = wsDone + return nil + }) + if err != nil { + // in the unlikely case: + // * manager has restarted, and we're still tracking this work, and + // * the work is abandoned (storage-fsm doesn't do a matching call on the sector), and + // * the call is returned from the worker, and + // * this errors + // the user will get jobs stuck in ret-wait state + log.Errorf("marking work as done: %+v", err) + } + _, found := m.waitRes[wid] if found { close(m.waitRes[wid]) @@ -387,3 +410,8 @@ func (m *Manager) returnResult(callID storiface.CallID, r interface{}, serr stri return nil } + +func (m *Manager) Abort(ctx context.Context, call storiface.CallID) error { + // TODO: Allow temp error + return m.returnResult(call, nil, storiface.Err(storiface.ErrUnknown, xerrors.New("task aborted"))) +} diff --git a/extern/sector-storage/manager_test.go b/extern/sector-storage/manager_test.go index f69d62b17..e768d4516 100644 --- a/extern/sector-storage/manager_test.go +++ b/extern/sector-storage/manager_test.go @@ -21,6 +21,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-statestore" + "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" @@ -90,28 +91,23 @@ func newTestMgr(ctx context.Context, t *testing.T, ds datastore.Datastore) (*Man st := newTestStorage(t) si := stores.NewIndex() - cfg := &ffiwrapper.Config{ - SealProofType: abi.RegisteredSealProof_StackedDrg2KiBV1, - } lstor, err := stores.NewLocal(ctx, st, si, nil) require.NoError(t, err) - prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor, spt: cfg.SealProofType}, cfg) + prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor, index: si}) require.NoError(t, err) stor := stores.NewRemote(lstor, si, nil, 6000) m := &Manager{ - scfg: cfg, - ls: st, storage: stor, localStore: lstor, remoteHnd: &stores.FetchHandler{Local: lstor}, index: si, - sched: newScheduler(cfg.SealProofType), + sched: newScheduler(), Prover: prover, @@ -141,12 +137,14 @@ func TestSimple(t *testing.T) { } err := m.AddWorker(ctx, newTestWorker(WorkerConfig{ - SealProof: abi.RegisteredSealProof_StackedDrg2KiBV1, TaskTypes: localTasks, }, lstor, m)) require.NoError(t, err) - sid := abi.SectorID{Miner: 1000, Number: 1} + sid := storage.SectorRef{ + ID: abi.SectorID{Miner: 1000, Number: 1}, + ProofType: abi.RegisteredSealProof_StackedDrg2KiBV1, + } pi, err := m.AddPiece(ctx, sid, nil, 1016, strings.NewReader(strings.Repeat("testthis", 127))) require.NoError(t, err) @@ -176,14 +174,16 @@ func TestRedoPC1(t *testing.T) { } tw := newTestWorker(WorkerConfig{ - SealProof: abi.RegisteredSealProof_StackedDrg2KiBV1, TaskTypes: localTasks, }, lstor, m) err := m.AddWorker(ctx, tw) require.NoError(t, err) - sid := abi.SectorID{Miner: 1000, Number: 1} + sid := storage.SectorRef{ + ID: abi.SectorID{Miner: 1000, Number: 1}, + ProofType: abi.RegisteredSealProof_StackedDrg2KiBV1, + } pi, err := m.AddPiece(ctx, sid, nil, 1016, strings.NewReader(strings.Repeat("testthis", 127))) require.NoError(t, err) @@ -228,14 +228,16 @@ func TestRestartManager(t *testing.T) { } tw := newTestWorker(WorkerConfig{ - SealProof: abi.RegisteredSealProof_StackedDrg2KiBV1, TaskTypes: localTasks, }, lstor, m) err := m.AddWorker(ctx, tw) require.NoError(t, err) - sid := abi.SectorID{Miner: 1000, Number: 1} + sid := storage.SectorRef{ + ID: abi.SectorID{Miner: 1000, Number: 1}, + ProofType: abi.RegisteredSealProof_StackedDrg2KiBV1, + } pi, err := m.AddPiece(ctx, sid, nil, 1016, strings.NewReader(strings.Repeat("testthis", 127))) require.NoError(t, err) @@ -329,14 +331,16 @@ func TestRestartWorker(t *testing.T) { w := newLocalWorker(func() (ffiwrapper.Storage, error) { return &testExec{apch: arch}, nil }, WorkerConfig{ - SealProof: 0, TaskTypes: localTasks, }, stor, lstor, idx, m, statestore.New(wds)) err := m.AddWorker(ctx, w) require.NoError(t, err) - sid := abi.SectorID{Miner: 1000, Number: 1} + sid := storage.SectorRef{ + ID: abi.SectorID{Miner: 1000, Number: 1}, + ProofType: abi.RegisteredSealProof_StackedDrg2KiBV1, + } apDone := make(chan struct{}) @@ -363,7 +367,6 @@ func TestRestartWorker(t *testing.T) { w = newLocalWorker(func() (ffiwrapper.Storage, error) { return &testExec{apch: arch}, nil }, WorkerConfig{ - SealProof: 0, TaskTypes: localTasks, }, stor, lstor, idx, m, statestore.New(wds)) @@ -400,7 +403,6 @@ func TestReenableWorker(t *testing.T) { w := newLocalWorker(func() (ffiwrapper.Storage, error) { return &testExec{apch: arch}, nil }, WorkerConfig{ - SealProof: 0, TaskTypes: localTasks, }, stor, lstor, idx, m, statestore.New(wds)) diff --git a/extern/sector-storage/mock/mock.go b/extern/sector-storage/mock/mock.go index b3de99ce5..747fcdf8b 100644 --- a/extern/sector-storage/mock/mock.go +++ b/extern/sector-storage/mock/mock.go @@ -27,21 +27,14 @@ var log = logging.Logger("sbmock") type SectorMgr struct { sectors map[abi.SectorID]*sectorState pieces map[cid.Cid][]byte - sectorSize abi.SectorSize nextSectorID abi.SectorNumber - proofType abi.RegisteredSealProof lk sync.Mutex } type mockVerif struct{} -func NewMockSectorMgr(ssize abi.SectorSize, genesisSectors []abi.SectorID) *SectorMgr { - rt, err := ffiwrapper.SealProofTypeFromSectorSize(ssize) - if err != nil { - panic(err) - } - +func NewMockSectorMgr(genesisSectors []abi.SectorID) *SectorMgr { sectors := make(map[abi.SectorID]*sectorState) for _, sid := range genesisSectors { sectors[sid] = §orState{ @@ -53,9 +46,7 @@ func NewMockSectorMgr(ssize abi.SectorSize, genesisSectors []abi.SectorID) *Sect return &SectorMgr{ sectors: sectors, pieces: map[cid.Cid][]byte{}, - sectorSize: ssize, nextSectorID: 5, - proofType: rt, } } @@ -75,17 +66,17 @@ type sectorState struct { lk sync.Mutex } -func (mgr *SectorMgr) NewSector(ctx context.Context, sector abi.SectorID) error { +func (mgr *SectorMgr) NewSector(ctx context.Context, sector storage.SectorRef) error { return nil } -func (mgr *SectorMgr) AddPiece(ctx context.Context, sectorID abi.SectorID, existingPieces []abi.UnpaddedPieceSize, size abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) { - log.Warn("Add piece: ", sectorID, size, mgr.proofType) +func (mgr *SectorMgr) AddPiece(ctx context.Context, sectorID storage.SectorRef, existingPieces []abi.UnpaddedPieceSize, size abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) { + log.Warn("Add piece: ", sectorID, size, sectorID.ProofType) var b bytes.Buffer tr := io.TeeReader(r, &b) - c, err := ffiwrapper.GeneratePieceCIDFromFile(mgr.proofType, tr, size) + c, err := ffiwrapper.GeneratePieceCIDFromFile(sectorID.ProofType, tr, size) if err != nil { return abi.PieceInfo{}, xerrors.Errorf("failed to generate piece cid: %w", err) } @@ -95,12 +86,12 @@ func (mgr *SectorMgr) AddPiece(ctx context.Context, sectorID abi.SectorID, exist mgr.lk.Lock() mgr.pieces[c] = b.Bytes() - ss, ok := mgr.sectors[sectorID] + ss, ok := mgr.sectors[sectorID.ID] if !ok { ss = §orState{ state: statePacking, } - mgr.sectors[sectorID] = ss + mgr.sectors[sectorID.ID] = ss } mgr.lk.Unlock() @@ -115,10 +106,6 @@ func (mgr *SectorMgr) AddPiece(ctx context.Context, sectorID abi.SectorID, exist }, nil } -func (mgr *SectorMgr) SectorSize() abi.SectorSize { - return mgr.sectorSize -} - func (mgr *SectorMgr) AcquireSectorNumber() (abi.SectorNumber, error) { mgr.lk.Lock() defer mgr.lk.Unlock() @@ -127,9 +114,9 @@ func (mgr *SectorMgr) AcquireSectorNumber() (abi.SectorNumber, error) { return id, nil } -func (mgr *SectorMgr) ForceState(sid abi.SectorID, st int) error { +func (mgr *SectorMgr) ForceState(sid storage.SectorRef, st int) error { mgr.lk.Lock() - ss, ok := mgr.sectors[sid] + ss, ok := mgr.sectors[sid.ID] mgr.lk.Unlock() if !ok { return xerrors.Errorf("no sector with id %d in storage", sid) @@ -140,18 +127,23 @@ func (mgr *SectorMgr) ForceState(sid abi.SectorID, st int) error { return nil } -func (mgr *SectorMgr) SealPreCommit1(ctx context.Context, sid abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) { +func (mgr *SectorMgr) SealPreCommit1(ctx context.Context, sid storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) { mgr.lk.Lock() - ss, ok := mgr.sectors[sid] + ss, ok := mgr.sectors[sid.ID] mgr.lk.Unlock() if !ok { return nil, xerrors.Errorf("no sector with id %d in storage", sid) } + ssize, err := sid.ProofType.SectorSize() + if err != nil { + return nil, xerrors.Errorf("failed to get proof sector size: %w", err) + } + ss.lk.Lock() defer ss.lk.Unlock() - ussize := abi.PaddedPieceSize(mgr.sectorSize).Unpadded() + ussize := abi.PaddedPieceSize(ssize).Unpadded() // TODO: verify pieces in sinfo.pieces match passed in pieces @@ -180,7 +172,7 @@ func (mgr *SectorMgr) SealPreCommit1(ctx context.Context, sid abi.SectorID, tick } } - commd, err := MockVerifier.GenerateDataCommitment(mgr.proofType, pis) + commd, err := MockVerifier.GenerateDataCommitment(sid.ProofType, pis) if err != nil { return nil, err } @@ -195,7 +187,7 @@ func (mgr *SectorMgr) SealPreCommit1(ctx context.Context, sid abi.SectorID, tick return cc, nil } -func (mgr *SectorMgr) SealPreCommit2(ctx context.Context, sid abi.SectorID, phase1Out storage.PreCommit1Out) (cids storage.SectorCids, err error) { +func (mgr *SectorMgr) SealPreCommit2(ctx context.Context, sid storage.SectorRef, phase1Out storage.PreCommit1Out) (cids storage.SectorCids, err error) { db := []byte(string(phase1Out)) db[0] ^= 'd' @@ -214,9 +206,9 @@ func (mgr *SectorMgr) SealPreCommit2(ctx context.Context, sid abi.SectorID, phas }, nil } -func (mgr *SectorMgr) SealCommit1(ctx context.Context, sid abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (output storage.Commit1Out, err error) { +func (mgr *SectorMgr) SealCommit1(ctx context.Context, sid storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (output storage.Commit1Out, err error) { mgr.lk.Lock() - ss, ok := mgr.sectors[sid] + ss, ok := mgr.sectors[sid.ID] mgr.lk.Unlock() if !ok { return nil, xerrors.Errorf("no such sector %d", sid) @@ -236,16 +228,16 @@ func (mgr *SectorMgr) SealCommit1(ctx context.Context, sid abi.SectorID, ticket var out [32]byte for i := range out { - out[i] = cids.Unsealed.Bytes()[i] + cids.Sealed.Bytes()[31-i] - ticket[i]*seed[i] ^ byte(sid.Number&0xff) + out[i] = cids.Unsealed.Bytes()[i] + cids.Sealed.Bytes()[31-i] - ticket[i]*seed[i] ^ byte(sid.ID.Number&0xff) } return out[:], nil } -func (mgr *SectorMgr) SealCommit2(ctx context.Context, sid abi.SectorID, phase1Out storage.Commit1Out) (proof storage.Proof, err error) { +func (mgr *SectorMgr) SealCommit2(ctx context.Context, sid storage.SectorRef, phase1Out storage.Commit1Out) (proof storage.Proof, err error) { var out [1920]byte for i := range out[:len(phase1Out)] { - out[i] = phase1Out[i] ^ byte(sid.Number&0xff) + out[i] = phase1Out[i] ^ byte(sid.ID.Number&0xff) } return out[:], nil @@ -253,10 +245,10 @@ func (mgr *SectorMgr) SealCommit2(ctx context.Context, sid abi.SectorID, phase1O // Test Instrumentation Methods -func (mgr *SectorMgr) MarkFailed(sid abi.SectorID, failed bool) error { +func (mgr *SectorMgr) MarkFailed(sid storage.SectorRef, failed bool) error { mgr.lk.Lock() defer mgr.lk.Unlock() - ss, ok := mgr.sectors[sid] + ss, ok := mgr.sectors[sid.ID] if !ok { return fmt.Errorf("no such sector in storage") } @@ -265,10 +257,10 @@ func (mgr *SectorMgr) MarkFailed(sid abi.SectorID, failed bool) error { return nil } -func (mgr *SectorMgr) MarkCorrupted(sid abi.SectorID, corrupted bool) error { +func (mgr *SectorMgr) MarkCorrupted(sid storage.SectorRef, corrupted bool) error { mgr.lk.Lock() defer mgr.lk.Unlock() - ss, ok := mgr.sectors[sid] + ss, ok := mgr.sectors[sid.ID] if !ok { return fmt.Errorf("no such sector in storage") } @@ -353,113 +345,120 @@ func generateFakePoSt(sectorInfo []proof2.SectorInfo, rpt func(abi.RegisteredSea } } -func (mgr *SectorMgr) ReadPiece(ctx context.Context, w io.Writer, sectorID abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, c cid.Cid) error { - if len(mgr.sectors[sectorID].pieces) > 1 || offset != 0 { +func (mgr *SectorMgr) ReadPiece(ctx context.Context, w io.Writer, sectorID storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, c cid.Cid) error { + if len(mgr.sectors[sectorID.ID].pieces) > 1 || offset != 0 { panic("implme") } - _, err := io.CopyN(w, bytes.NewReader(mgr.pieces[mgr.sectors[sectorID].pieces[0]]), int64(size)) + _, err := io.CopyN(w, bytes.NewReader(mgr.pieces[mgr.sectors[sectorID.ID].pieces[0]]), int64(size)) return err } -func (mgr *SectorMgr) StageFakeData(mid abi.ActorID) (abi.SectorID, []abi.PieceInfo, error) { - usize := abi.PaddedPieceSize(mgr.sectorSize).Unpadded() +func (mgr *SectorMgr) StageFakeData(mid abi.ActorID, spt abi.RegisteredSealProof) (storage.SectorRef, []abi.PieceInfo, error) { + psize, err := spt.SectorSize() + if err != nil { + return storage.SectorRef{}, nil, err + } + usize := abi.PaddedPieceSize(psize).Unpadded() sid, err := mgr.AcquireSectorNumber() if err != nil { - return abi.SectorID{}, nil, err + return storage.SectorRef{}, nil, err } buf := make([]byte, usize) _, _ = rand.Read(buf) // nolint:gosec - id := abi.SectorID{ - Miner: mid, - Number: sid, + id := storage.SectorRef{ + ID: abi.SectorID{ + Miner: mid, + Number: sid, + }, + ProofType: spt, } pi, err := mgr.AddPiece(context.TODO(), id, nil, usize, bytes.NewReader(buf)) if err != nil { - return abi.SectorID{}, nil, err + return storage.SectorRef{}, nil, err } return id, []abi.PieceInfo{pi}, nil } -func (mgr *SectorMgr) FinalizeSector(context.Context, abi.SectorID, []storage.Range) error { +func (mgr *SectorMgr) FinalizeSector(context.Context, storage.SectorRef, []storage.Range) error { return nil } -func (mgr *SectorMgr) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error { +func (mgr *SectorMgr) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) error { return nil } -func (mgr *SectorMgr) Remove(ctx context.Context, sector abi.SectorID) error { +func (mgr *SectorMgr) Remove(ctx context.Context, sector storage.SectorRef) error { mgr.lk.Lock() defer mgr.lk.Unlock() - if _, has := mgr.sectors[sector]; !has { + if _, has := mgr.sectors[sector.ID]; !has { return xerrors.Errorf("sector not found") } - delete(mgr.sectors, sector) + delete(mgr.sectors, sector.ID) return nil } -func (mgr *SectorMgr) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, ids []abi.SectorID) ([]abi.SectorID, error) { +func (mgr *SectorMgr) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, ids []storage.SectorRef) ([]abi.SectorID, error) { var bad []abi.SectorID for _, sid := range ids { - _, found := mgr.sectors[sid] + _, found := mgr.sectors[sid.ID] - if !found || mgr.sectors[sid].failed { - bad = append(bad, sid) + if !found || mgr.sectors[sid.ID].failed { + bad = append(bad, sid.ID) } } return bad, nil } -func (mgr *SectorMgr) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err string) error { +func (mgr *SectorMgr) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error { panic("not supported") } -func (mgr *SectorMgr) ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err string) error { +func (mgr *SectorMgr) ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err *storiface.CallError) error { panic("not supported") } -func (mgr *SectorMgr) ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err string) error { +func (mgr *SectorMgr) ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err *storiface.CallError) error { panic("not supported") } -func (mgr *SectorMgr) ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err string) error { +func (mgr *SectorMgr) ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err *storiface.CallError) error { panic("not supported") } -func (mgr *SectorMgr) ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err string) error { +func (mgr *SectorMgr) ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err *storiface.CallError) error { panic("not supported") } -func (mgr *SectorMgr) ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err string) error { +func (mgr *SectorMgr) ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { panic("not supported") } -func (mgr *SectorMgr) ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err string) error { +func (mgr *SectorMgr) ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { panic("not supported") } -func (mgr *SectorMgr) ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err string) error { +func (mgr *SectorMgr) ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { panic("not supported") } -func (mgr *SectorMgr) ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err string) error { +func (mgr *SectorMgr) ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { panic("not supported") } -func (mgr *SectorMgr) ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err string) error { +func (mgr *SectorMgr) ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err *storiface.CallError) error { panic("not supported") } -func (mgr *SectorMgr) ReturnFetch(ctx context.Context, callID storiface.CallID, err string) error { +func (mgr *SectorMgr) ReturnFetch(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { panic("not supported") } diff --git a/extern/sector-storage/mock/mock_test.go b/extern/sector-storage/mock/mock_test.go index 47c060f66..abc120058 100644 --- a/extern/sector-storage/mock/mock_test.go +++ b/extern/sector-storage/mock/mock_test.go @@ -9,9 +9,9 @@ import ( ) func TestOpFinish(t *testing.T) { - sb := NewMockSectorMgr(2048, nil) + sb := NewMockSectorMgr(nil) - sid, pieces, err := sb.StageFakeData(123) + sid, pieces, err := sb.StageFakeData(123, abi.RegisteredSealProof_StackedDrg2KiBV1_1) if err != nil { t.Fatal(err) } diff --git a/extern/sector-storage/request_queue.go b/extern/sector-storage/request_queue.go index 9247ce24a..925c44fa8 100644 --- a/extern/sector-storage/request_queue.go +++ b/extern/sector-storage/request_queue.go @@ -20,7 +20,7 @@ func (q requestQueue) Less(i, j int) bool { return q[i].taskType.Less(q[j].taskType) } - return q[i].sector.Number < q[j].sector.Number // optimize minerActor.NewSectors bitfield + return q[i].sector.ID.Number < q[j].sector.ID.Number // optimize minerActor.NewSectors bitfield } func (q requestQueue) Swap(i, j int) { diff --git a/extern/sector-storage/resources.go b/extern/sector-storage/resources.go index 6b531e82b..7da3e96a6 100644 --- a/extern/sector-storage/resources.go +++ b/extern/sector-storage/resources.go @@ -314,4 +314,13 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources func init() { ResourceTable[sealtasks.TTUnseal] = ResourceTable[sealtasks.TTPreCommit1] // TODO: measure accurately ResourceTable[sealtasks.TTReadUnsealed] = ResourceTable[sealtasks.TTFetch] + + // V1_1 is the same as V1 + for _, m := range ResourceTable { + m[abi.RegisteredSealProof_StackedDrg2KiBV1_1] = m[abi.RegisteredSealProof_StackedDrg2KiBV1] + m[abi.RegisteredSealProof_StackedDrg8MiBV1_1] = m[abi.RegisteredSealProof_StackedDrg8MiBV1] + m[abi.RegisteredSealProof_StackedDrg512MiBV1_1] = m[abi.RegisteredSealProof_StackedDrg512MiBV1] + m[abi.RegisteredSealProof_StackedDrg32GiBV1_1] = m[abi.RegisteredSealProof_StackedDrg32GiBV1] + m[abi.RegisteredSealProof_StackedDrg64GiBV1_1] = m[abi.RegisteredSealProof_StackedDrg64GiBV1] + } } diff --git a/extern/sector-storage/roprov.go b/extern/sector-storage/roprov.go index 7f051b549..ebc7610d7 100644 --- a/extern/sector-storage/roprov.go +++ b/extern/sector-storage/roprov.go @@ -5,7 +5,7 @@ import ( "golang.org/x/xerrors" - "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/extern/sector-storage/stores" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" @@ -14,23 +14,17 @@ import ( type readonlyProvider struct { index stores.SectorIndex stor *stores.Local - spt abi.RegisteredSealProof } -func (l *readonlyProvider) AcquireSector(ctx context.Context, id abi.SectorID, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType) (storiface.SectorPaths, func(), error) { +func (l *readonlyProvider) AcquireSector(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType) (storiface.SectorPaths, func(), error) { if allocate != storiface.FTNone { return storiface.SectorPaths{}, nil, xerrors.New("read-only storage") } - ssize, err := l.spt.SectorSize() - if err != nil { - return storiface.SectorPaths{}, nil, xerrors.Errorf("failed to determine sector size: %w", err) - } - ctx, cancel := context.WithCancel(ctx) // use TryLock to avoid blocking - locked, err := l.index.StorageTryLock(ctx, id, existing, storiface.FTNone) + locked, err := l.index.StorageTryLock(ctx, id.ID, existing, storiface.FTNone) if err != nil { cancel() return storiface.SectorPaths{}, nil, xerrors.Errorf("acquiring sector lock: %w", err) @@ -40,7 +34,7 @@ func (l *readonlyProvider) AcquireSector(ctx context.Context, id abi.SectorID, e return storiface.SectorPaths{}, nil, xerrors.Errorf("failed to acquire sector lock") } - p, _, err := l.stor.AcquireSector(ctx, id, ssize, existing, allocate, sealing, storiface.AcquireMove) + p, _, err := l.stor.AcquireSector(ctx, id, existing, allocate, sealing, storiface.AcquireMove) return p, cancel, err } diff --git a/extern/sector-storage/sched.go b/extern/sector-storage/sched.go index 549a16a96..79761a65e 100644 --- a/extern/sector-storage/sched.go +++ b/extern/sector-storage/sched.go @@ -11,6 +11,7 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" @@ -51,8 +52,6 @@ type WorkerSelector interface { } type scheduler struct { - spt abi.RegisteredSealProof - workersLk sync.RWMutex workers map[WorkerID]*workerHandle @@ -122,7 +121,7 @@ type activeResources struct { } type workerRequest struct { - sector abi.SectorID + sector storage.SectorRef taskType sealtasks.TaskType priority int // larger values more important sel WorkerSelector @@ -143,10 +142,8 @@ type workerResponse struct { err error } -func newScheduler(spt abi.RegisteredSealProof) *scheduler { +func newScheduler() *scheduler { return &scheduler{ - spt: spt, - workers: map[WorkerID]*workerHandle{}, schedule: make(chan *workerRequest), @@ -168,7 +165,7 @@ func newScheduler(spt abi.RegisteredSealProof) *scheduler { } } -func (sh *scheduler) Schedule(ctx context.Context, sector abi.SectorID, taskType sealtasks.TaskType, sel WorkerSelector, prepare WorkerAction, work WorkerAction) error { +func (sh *scheduler) Schedule(ctx context.Context, sector storage.SectorRef, taskType sealtasks.TaskType, sel WorkerSelector, prepare WorkerAction, work WorkerAction) error { ret := make(chan workerResponse) select { @@ -315,7 +312,7 @@ func (sh *scheduler) diag() SchedDiagInfo { task := (*sh.schedQueue)[sqi] out.Requests = append(out.Requests, SchedDiagRequestInfo{ - Sector: task.sector, + Sector: task.sector.ID, TaskType: task.taskType, Priority: task.priority, }) @@ -378,7 +375,7 @@ func (sh *scheduler) trySched() { }() task := (*sh.schedQueue)[sqi] - needRes := ResourceTable[task.taskType][sh.spt] + needRes := ResourceTable[task.taskType][task.sector.ProofType] task.indexHeap = sqi for wnd, windowRequest := range sh.openWindows { @@ -400,7 +397,7 @@ func (sh *scheduler) trySched() { } rpcCtx, cancel := context.WithTimeout(task.ctx, SelectorTimeout) - ok, err := task.sel.Ok(rpcCtx, task.taskType, sh.spt, worker) + ok, err := task.sel.Ok(rpcCtx, task.taskType, task.sector.ProofType, worker) cancel() if err != nil { log.Errorf("trySched(1) req.sel.Ok error: %+v", err) @@ -456,21 +453,21 @@ func (sh *scheduler) trySched() { for sqi := 0; sqi < sh.schedQueue.Len(); sqi++ { task := (*sh.schedQueue)[sqi] - needRes := ResourceTable[task.taskType][sh.spt] + needRes := ResourceTable[task.taskType][task.sector.ProofType] selectedWindow := -1 for _, wnd := range acceptableWindows[task.indexHeap] { wid := sh.openWindows[wnd].worker wr := sh.workers[wid].info.Resources - log.Debugf("SCHED try assign sqi:%d sector %d to window %d", sqi, task.sector.Number, wnd) + log.Debugf("SCHED try assign sqi:%d sector %d to window %d", sqi, task.sector.ID.Number, wnd) // TODO: allow bigger windows if !windows[wnd].allocated.canHandleRequest(needRes, wid, "schedAssign", wr) { continue } - log.Debugf("SCHED ASSIGNED sqi:%d sector %d task %s to window %d", sqi, task.sector.Number, task.taskType, wnd) + log.Debugf("SCHED ASSIGNED sqi:%d sector %d task %s to window %d", sqi, task.sector.ID.Number, task.taskType, wnd) windows[wnd].allocated.add(wr, needRes) // TODO: We probably want to re-sort acceptableWindows here based on new diff --git a/extern/sector-storage/sched_test.go b/extern/sector-storage/sched_test.go index 849896ff6..a87d403b7 100644 --- a/extern/sector-storage/sched_test.go +++ b/extern/sector-storage/sched_test.go @@ -47,55 +47,55 @@ type schedTestWorker struct { session uuid.UUID } -func (s *schedTestWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) { +func (s *schedTestWorker) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storiface.CallID, error) { +func (s *schedTestWorker) SealPreCommit2(ctx context.Context, sector storage.SectorRef, pc1o storage.PreCommit1Out) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) { +func (s *schedTestWorker) SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storiface.CallID, error) { +func (s *schedTestWorker) SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (storiface.CallID, error) { +func (s *schedTestWorker) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) (storiface.CallID, error) { +func (s *schedTestWorker) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) Remove(ctx context.Context, sector abi.SectorID) (storiface.CallID, error) { +func (s *schedTestWorker) Remove(ctx context.Context, sector storage.SectorRef) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) NewSector(ctx context.Context, sector abi.SectorID) (storiface.CallID, error) { +func (s *schedTestWorker) NewSector(ctx context.Context, sector storage.SectorRef) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) { +func (s *schedTestWorker) AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) MoveStorage(ctx context.Context, sector abi.SectorID, types storiface.SectorFileType) (storiface.CallID, error) { +func (s *schedTestWorker) MoveStorage(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) Fetch(ctx context.Context, id abi.SectorID, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) { +func (s *schedTestWorker) Fetch(ctx context.Context, id storage.SectorRef, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) (storiface.CallID, error) { +func (s *schedTestWorker) UnsealPiece(ctx context.Context, id storage.SectorRef, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) { +func (s *schedTestWorker) ReadPiece(ctx context.Context, writer io.Writer, id storage.SectorRef, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) { panic("implement me") } @@ -165,8 +165,7 @@ func addTestWorker(t *testing.T, sched *scheduler, index *stores.Index, name str } func TestSchedStartStop(t *testing.T) { - spt := abi.RegisteredSealProof_StackedDrg32GiBV1 - sched := newScheduler(spt) + sched := newScheduler() go sched.runSched() addTestWorker(t, sched, stores.NewIndex(), "fred", nil) @@ -211,12 +210,15 @@ func TestSched(t *testing.T) { go func() { defer rm.wg.Done() - sectorNum := abi.SectorID{ - Miner: 8, - Number: sid, + sectorRef := storage.SectorRef{ + ID: abi.SectorID{ + Miner: 8, + Number: sid, + }, + ProofType: spt, } - err := sched.Schedule(ctx, sectorNum, taskType, sel, func(ctx context.Context, w Worker) error { + err := sched.Schedule(ctx, sectorRef, taskType, sel, func(ctx context.Context, w Worker) error { wi, err := w.Info(ctx) require.NoError(t, err) @@ -286,7 +288,7 @@ func TestSched(t *testing.T) { return func(t *testing.T) { index := stores.NewIndex() - sched := newScheduler(spt) + sched := newScheduler() sched.testSync = make(chan struct{}) go sched.runSched() @@ -518,7 +520,6 @@ func (s slowishSelector) Cmp(ctx context.Context, task sealtasks.TaskType, a, b var _ WorkerSelector = slowishSelector(true) func BenchmarkTrySched(b *testing.B) { - spt := abi.RegisteredSealProof_StackedDrg32GiBV1 logging.SetAllLoggers(logging.LevelInfo) defer logging.SetAllLoggers(logging.LevelDebug) ctx := context.Background() @@ -528,7 +529,7 @@ func BenchmarkTrySched(b *testing.B) { for i := 0; i < b.N; i++ { b.StopTimer() - sched := newScheduler(spt) + sched := newScheduler() sched.workers[WorkerID{}] = &workerHandle{ workerRpc: nil, info: storiface.WorkerInfo{ @@ -568,9 +569,8 @@ func BenchmarkTrySched(b *testing.B) { } func TestWindowCompact(t *testing.T) { - sh := scheduler{ - spt: abi.RegisteredSealProof_StackedDrg32GiBV1, - } + sh := scheduler{} + spt := abi.RegisteredSealProof_StackedDrg32GiBV1 test := func(start [][]sealtasks.TaskType, expect [][]sealtasks.TaskType) func(t *testing.T) { return func(t *testing.T) { @@ -584,8 +584,11 @@ func TestWindowCompact(t *testing.T) { window := &schedWindow{} for _, task := range windowTasks { - window.todo = append(window.todo, &workerRequest{taskType: task}) - window.allocated.add(wh.info.Resources, ResourceTable[task][sh.spt]) + window.todo = append(window.todo, &workerRequest{ + taskType: task, + sector: storage.SectorRef{ProofType: spt}, + }) + window.allocated.add(wh.info.Resources, ResourceTable[task][spt]) } wh.activeWindows = append(wh.activeWindows, window) @@ -604,7 +607,7 @@ func TestWindowCompact(t *testing.T) { for ti, task := range tasks { require.Equal(t, task, wh.activeWindows[wi].todo[ti].taskType, "%d, %d", wi, ti) - expectRes.add(wh.info.Resources, ResourceTable[task][sh.spt]) + expectRes.add(wh.info.Resources, ResourceTable[task][spt]) } require.Equal(t, expectRes.cpuUse, wh.activeWindows[wi].allocated.cpuUse, "%d", wi) diff --git a/extern/sector-storage/sched_worker.go b/extern/sector-storage/sched_worker.go index e56e9056d..67bddca3a 100644 --- a/extern/sector-storage/sched_worker.go +++ b/extern/sector-storage/sched_worker.go @@ -139,10 +139,17 @@ func (sw *schedWorker) handleWorker() { // wait for more tasks to be assigned by the main scheduler or for the worker // to finish precessing a task - update, ok := sw.waitForUpdates() + update, pokeSched, ok := sw.waitForUpdates() if !ok { return } + if pokeSched { + // a task has finished preparing, which can mean that we've freed some space on some worker + select { + case sched.workerChange <- struct{}{}: + default: // workerChange is buffered, and scheduling is global, so it's ok if we don't send here + } + } if update { break } @@ -257,23 +264,23 @@ func (sw *schedWorker) requestWindows() bool { return true } -func (sw *schedWorker) waitForUpdates() (update bool, ok bool) { +func (sw *schedWorker) waitForUpdates() (update bool, sched bool, ok bool) { select { case <-sw.heartbeatTimer.C: - return false, true + return false, false, true case w := <-sw.scheduledWindows: sw.worker.wndLk.Lock() sw.worker.activeWindows = append(sw.worker.activeWindows, w) sw.worker.wndLk.Unlock() - return true, true + return true, false, true case <-sw.taskDone: log.Debugw("task done", "workerid", sw.wid) - return true, true + return true, true, true case <-sw.sched.closing: case <-sw.worker.closingMgr: } - return false, false + return false, false, false } func (sw *schedWorker) workerCompactWindows() { @@ -287,7 +294,7 @@ func (sw *schedWorker) workerCompactWindows() { var moved []int for ti, todo := range window.todo { - needRes := ResourceTable[todo.taskType][sw.sched.spt] + needRes := ResourceTable[todo.taskType][todo.sector.ProofType] if !lower.allocated.canHandleRequest(needRes, sw.wid, "compactWindows", worker.info.Resources) { continue } @@ -343,7 +350,7 @@ assignLoop: worker.lk.Lock() for t, todo := range firstWindow.todo { - needRes := ResourceTable[todo.taskType][sw.sched.spt] + needRes := ResourceTable[todo.taskType][todo.sector.ProofType] if worker.preparing.canHandleRequest(needRes, sw.wid, "startPreparing", worker.info.Resources) { tidx = t break @@ -357,7 +364,7 @@ assignLoop: todo := firstWindow.todo[tidx] - log.Debugf("assign worker sector %d", todo.sector.Number) + log.Debugf("assign worker sector %d", todo.sector.ID.Number) err := sw.startProcessingTask(sw.taskDone, todo) if err != nil { @@ -382,7 +389,7 @@ assignLoop: func (sw *schedWorker) startProcessingTask(taskDone chan struct{}, req *workerRequest) error { w, sh := sw.worker, sw.sched - needRes := ResourceTable[req.taskType][sh.spt] + needRes := ResourceTable[req.taskType][req.sector.ProofType] w.lk.Lock() w.preparing.add(w.info.Resources, needRes) diff --git a/extern/sector-storage/selector_task.go b/extern/sector-storage/selector_task.go index ffed40d68..e4d92757e 100644 --- a/extern/sector-storage/selector_task.go +++ b/extern/sector-storage/selector_task.go @@ -45,4 +45,4 @@ func (s *taskSelector) Cmp(ctx context.Context, _ sealtasks.TaskType, a, b *work return a.utilization() < b.utilization(), nil } -var _ WorkerSelector = &allocSelector{} +var _ WorkerSelector = &taskSelector{} diff --git a/extern/sector-storage/stats.go b/extern/sector-storage/stats.go index bae60b426..df3b4eed0 100644 --- a/extern/sector-storage/stats.go +++ b/extern/sector-storage/stats.go @@ -46,7 +46,7 @@ func (m *Manager) WorkerJobs() map[uuid.UUID][]storiface.WorkerJob { for _, request := range window.todo { out[uuid.UUID(id)] = append(out[uuid.UUID(id)], storiface.WorkerJob{ ID: storiface.UndefCall, - Sector: request.sector, + Sector: request.sector.ID, Task: request.taskType, RunWait: wi + 1, Start: request.start, @@ -67,12 +67,26 @@ func (m *Manager) WorkerJobs() map[uuid.UUID][]storiface.WorkerJob { continue } + var ws WorkState + if err := m.work.Get(work).Get(&ws); err != nil { + log.Errorf("WorkerJobs: get work %s: %+v", work, err) + } + + wait := storiface.RWRetWait + if _, ok := m.results[work]; ok { + wait = storiface.RWReturned + } + if ws.Status == wsDone { + wait = storiface.RWRetDone + } + out[uuid.UUID{}] = append(out[uuid.UUID{}], storiface.WorkerJob{ - ID: id, - Sector: id.Sector, - Task: work.Method, - RunWait: -1, - Start: time.Time{}, + ID: id, + Sector: id.Sector, + Task: work.Method, + RunWait: wait, + Start: time.Unix(ws.StartTime, 0), + Hostname: ws.WorkerHostname, }) } diff --git a/extern/sector-storage/stores/http_handler.go b/extern/sector-storage/stores/http_handler.go index 2237bd407..a4c0480d4 100644 --- a/extern/sector-storage/stores/http_handler.go +++ b/extern/sector-storage/stores/http_handler.go @@ -12,6 +12,8 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/storiface" "github.com/filecoin-project/lotus/extern/sector-storage/tarutil" + + "github.com/filecoin-project/specs-storage/storage" ) var log = logging.Logger("stores") @@ -73,7 +75,12 @@ func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Requ // The caller has a lock on this sector already, no need to get one here // passing 0 spt because we don't allocate anything - paths, _, err := handler.Local.AcquireSector(r.Context(), id, 0, ft, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) + si := storage.SectorRef{ + ID: id, + ProofType: 0, + } + + paths, _, err := handler.Local.AcquireSector(r.Context(), si, ft, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) if err != nil { log.Errorf("%+v", err) w.WriteHeader(500) diff --git a/extern/sector-storage/stores/index.go b/extern/sector-storage/stores/index.go index acd799ab7..eb3e7690f 100644 --- a/extern/sector-storage/stores/index.go +++ b/extern/sector-storage/stores/index.go @@ -2,6 +2,7 @@ package stores import ( "context" + "errors" "net/url" gopath "path" "sort" @@ -35,7 +36,7 @@ type StorageInfo struct { type HealthReport struct { Stat fsutil.FsStat - Err error + Err string } type SectorStorageInfo struct { @@ -175,7 +176,9 @@ func (i *Index) StorageReportHealth(ctx context.Context, id ID, report HealthRep } ent.fsi = report.Stat - ent.heartbeatErr = report.Err + if report.Err != "" { + ent.heartbeatErr = errors.New(report.Err) + } ent.lastHeartbeat = time.Now() return nil diff --git a/extern/sector-storage/stores/interface.go b/extern/sector-storage/stores/interface.go index 574ec599e..a997ad3d2 100644 --- a/extern/sector-storage/stores/interface.go +++ b/extern/sector-storage/stores/interface.go @@ -5,12 +5,14 @@ import ( "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-storage/storage" + "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) type Store interface { - AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.SectorSize, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType, op storiface.AcquireMode) (paths storiface.SectorPaths, stores storiface.SectorPaths, err error) + AcquireSector(ctx context.Context, s storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType, op storiface.AcquireMode) (paths storiface.SectorPaths, stores storiface.SectorPaths, err error) Remove(ctx context.Context, s abi.SectorID, types storiface.SectorFileType, force bool) error // like remove, but doesn't remove the primary sector copy, nor the last @@ -18,7 +20,7 @@ type Store interface { RemoveCopies(ctx context.Context, s abi.SectorID, types storiface.SectorFileType) error // move sectors into storage - MoveStorage(ctx context.Context, s abi.SectorID, ssize abi.SectorSize, types storiface.SectorFileType) error + MoveStorage(ctx context.Context, s storage.SectorRef, types storiface.SectorFileType) error FsStat(ctx context.Context, id ID) (fsutil.FsStat, error) } diff --git a/extern/sector-storage/stores/local.go b/extern/sector-storage/stores/local.go index 89c22bd99..c39e76f18 100644 --- a/extern/sector-storage/stores/local.go +++ b/extern/sector-storage/stores/local.go @@ -14,6 +14,7 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" @@ -298,29 +299,39 @@ func (st *Local) reportHealth(ctx context.Context) { return } - st.localLk.RLock() + st.reportStorage(ctx) + } +} - toReport := map[ID]HealthReport{} - for id, p := range st.paths { - stat, err := p.stat(st.localStorage) +func (st *Local) reportStorage(ctx context.Context) { + st.localLk.RLock() - toReport[id] = HealthReport{ - Stat: stat, - Err: err, - } + toReport := map[ID]HealthReport{} + for id, p := range st.paths { + stat, err := p.stat(st.localStorage) + r := HealthReport{Stat: stat} + if err != nil { + r.Err = err.Error() } - st.localLk.RUnlock() + toReport[id] = r + } - for id, report := range toReport { - if err := st.index.StorageReportHealth(ctx, id, report); err != nil { - log.Warnf("error reporting storage health for %s (%+v): %+v", id, report, err) - } + st.localLk.RUnlock() + + for id, report := range toReport { + if err := st.index.StorageReportHealth(ctx, id, report); err != nil { + log.Warnf("error reporting storage health for %s (%+v): %+v", id, report, err) } } } -func (st *Local) Reserve(ctx context.Context, sid abi.SectorID, ssize abi.SectorSize, ft storiface.SectorFileType, storageIDs storiface.SectorPaths, overheadTab map[storiface.SectorFileType]int) (func(), error) { +func (st *Local) Reserve(ctx context.Context, sid storage.SectorRef, ft storiface.SectorFileType, storageIDs storiface.SectorPaths, overheadTab map[storiface.SectorFileType]int) (func(), error) { + ssize, err := sid.ProofType.SectorSize() + if err != nil { + return nil, err + } + st.localLk.Lock() done := func() {} @@ -350,7 +361,7 @@ func (st *Local) Reserve(ctx context.Context, sid abi.SectorID, ssize abi.Sector overhead := int64(overheadTab[fileType]) * int64(ssize) / storiface.FSOverheadDen if stat.Available < overhead { - return nil, xerrors.Errorf("can't reserve %d bytes in '%s' (id:%s), only %d available", overhead, p.local, id, stat.Available) + return nil, storiface.Err(storiface.ErrTempAllocateSpace, xerrors.Errorf("can't reserve %d bytes in '%s' (id:%s), only %d available", overhead, p.local, id, stat.Available)) } p.reserved += overhead @@ -370,11 +381,16 @@ func (st *Local) Reserve(ctx context.Context, sid abi.SectorID, ssize abi.Sector return done, nil } -func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, ssize abi.SectorSize, existing storiface.SectorFileType, allocate storiface.SectorFileType, pathType storiface.PathType, op storiface.AcquireMode) (storiface.SectorPaths, storiface.SectorPaths, error) { +func (st *Local) AcquireSector(ctx context.Context, sid storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, pathType storiface.PathType, op storiface.AcquireMode) (storiface.SectorPaths, storiface.SectorPaths, error) { if existing|allocate != existing^allocate { return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.New("can't both find and allocate a sector") } + ssize, err := sid.ProofType.SectorSize() + if err != nil { + return storiface.SectorPaths{}, storiface.SectorPaths{}, err + } + st.localLk.RLock() defer st.localLk.RUnlock() @@ -386,7 +402,7 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, ssize abi. continue } - si, err := st.index.StorageFindSector(ctx, sid, fileType, ssize, false) + si, err := st.index.StorageFindSector(ctx, sid.ID, fileType, ssize, false) if err != nil { log.Warnf("finding existing sector %d(t:%d) failed: %+v", sid, fileType, err) continue @@ -402,7 +418,7 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, ssize abi. continue } - spath := p.sectorPath(sid, fileType) + spath := p.sectorPath(sid.ID, fileType) storiface.SetPathByType(&out, fileType, spath) storiface.SetPathByType(&storageIDs, fileType, string(info.ID)) @@ -444,7 +460,7 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, ssize abi. // TODO: Check free space - best = p.sectorPath(sid, fileType) + best = p.sectorPath(sid.ID, fileType) bestID = si.ID break } @@ -568,16 +584,18 @@ func (st *Local) removeSector(ctx context.Context, sid abi.SectorID, typ storifa log.Errorf("removing sector (%v) from %s: %+v", sid, spath, err) } + st.reportStorage(ctx) // report freed space + return nil } -func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, ssize abi.SectorSize, types storiface.SectorFileType) error { - dest, destIds, err := st.AcquireSector(ctx, s, ssize, storiface.FTNone, types, storiface.PathStorage, storiface.AcquireMove) +func (st *Local) MoveStorage(ctx context.Context, s storage.SectorRef, types storiface.SectorFileType) error { + dest, destIds, err := st.AcquireSector(ctx, s, storiface.FTNone, types, storiface.PathStorage, storiface.AcquireMove) if err != nil { return xerrors.Errorf("acquire dest storage: %w", err) } - src, srcIds, err := st.AcquireSector(ctx, s, ssize, types, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) + src, srcIds, err := st.AcquireSector(ctx, s, types, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) if err != nil { return xerrors.Errorf("acquire src storage: %w", err) } @@ -609,7 +627,7 @@ func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, ssize abi.Sect log.Debugf("moving %v(%d) to storage: %s(se:%t; st:%t) -> %s(se:%t; st:%t)", s, fileType, sst.ID, sst.CanSeal, sst.CanStore, dst.ID, dst.CanSeal, dst.CanStore) - if err := st.index.StorageDropSector(ctx, ID(storiface.PathByType(srcIds, fileType)), s, fileType); err != nil { + if err := st.index.StorageDropSector(ctx, ID(storiface.PathByType(srcIds, fileType)), s.ID, fileType); err != nil { return xerrors.Errorf("dropping source sector from index: %w", err) } @@ -618,11 +636,13 @@ func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, ssize abi.Sect return xerrors.Errorf("moving sector %v(%d): %w", s, fileType, err) } - if err := st.index.StorageDeclareSector(ctx, ID(storiface.PathByType(destIds, fileType)), s, fileType, true); err != nil { + if err := st.index.StorageDeclareSector(ctx, ID(storiface.PathByType(destIds, fileType)), s.ID, fileType, true); err != nil { return xerrors.Errorf("declare sector %d(t:%d) -> %s: %w", s, fileType, ID(storiface.PathByType(destIds, fileType)), err) } } + st.reportStorage(ctx) // report space use changes + return nil } diff --git a/extern/sector-storage/stores/remote.go b/extern/sector-storage/stores/remote.go index 37dde910d..bf66c1bb5 100644 --- a/extern/sector-storage/stores/remote.go +++ b/extern/sector-storage/stores/remote.go @@ -19,6 +19,7 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/tarutil" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-storage/storage" "github.com/hashicorp/go-multierror" files "github.com/ipfs/go-ipfs-files" @@ -58,7 +59,7 @@ func NewRemote(local *Local, index SectorIndex, auth http.Header, fetchLimit int } } -func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.SectorSize, existing storiface.SectorFileType, allocate storiface.SectorFileType, pathType storiface.PathType, op storiface.AcquireMode) (storiface.SectorPaths, storiface.SectorPaths, error) { +func (r *Remote) AcquireSector(ctx context.Context, s storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, pathType storiface.PathType, op storiface.AcquireMode) (storiface.SectorPaths, storiface.SectorPaths, error) { if existing|allocate != existing^allocate { return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.New("can't both find and allocate a sector") } @@ -66,9 +67,9 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.Se for { r.fetchLk.Lock() - c, locked := r.fetching[s] + c, locked := r.fetching[s.ID] if !locked { - r.fetching[s] = make(chan struct{}) + r.fetching[s.ID] = make(chan struct{}) r.fetchLk.Unlock() break } @@ -85,12 +86,12 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.Se defer func() { r.fetchLk.Lock() - close(r.fetching[s]) - delete(r.fetching, s) + close(r.fetching[s.ID]) + delete(r.fetching, s.ID) r.fetchLk.Unlock() }() - paths, stores, err := r.local.AcquireSector(ctx, s, ssize, existing, allocate, pathType, op) + paths, stores, err := r.local.AcquireSector(ctx, s, existing, allocate, pathType, op) if err != nil { return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.Errorf("local acquire error: %w", err) } @@ -106,7 +107,7 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.Se } } - apaths, ids, err := r.local.AcquireSector(ctx, s, ssize, storiface.FTNone, toFetch, pathType, op) + apaths, ids, err := r.local.AcquireSector(ctx, s, storiface.FTNone, toFetch, pathType, op) if err != nil { return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.Errorf("allocate local sector for fetching: %w", err) } @@ -116,7 +117,7 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.Se odt = storiface.FsOverheadFinalized } - releaseStorage, err := r.local.Reserve(ctx, s, ssize, toFetch, ids, odt) + releaseStorage, err := r.local.Reserve(ctx, s, toFetch, ids, odt) if err != nil { return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.Errorf("reserving storage space: %w", err) } @@ -134,7 +135,7 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.Se dest := storiface.PathByType(apaths, fileType) storageID := storiface.PathByType(ids, fileType) - url, err := r.acquireFromRemote(ctx, s, fileType, dest) + url, err := r.acquireFromRemote(ctx, s.ID, fileType, dest) if err != nil { return storiface.SectorPaths{}, storiface.SectorPaths{}, err } @@ -142,7 +143,7 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.Se storiface.SetPathByType(&paths, fileType, dest) storiface.SetPathByType(&stores, fileType, storageID) - if err := r.index.StorageDeclareSector(ctx, ID(storageID), s, fileType, op == storiface.AcquireMove); err != nil { + if err := r.index.StorageDeclareSector(ctx, ID(storageID), s.ID, fileType, op == storiface.AcquireMove); err != nil { log.Warnf("declaring sector %v in %s failed: %+v", s, storageID, err) continue } @@ -281,14 +282,14 @@ func (r *Remote) fetch(ctx context.Context, url, outname string) error { } } -func (r *Remote) MoveStorage(ctx context.Context, s abi.SectorID, ssize abi.SectorSize, types storiface.SectorFileType) error { +func (r *Remote) MoveStorage(ctx context.Context, s storage.SectorRef, types storiface.SectorFileType) error { // Make sure we have the data local - _, _, err := r.AcquireSector(ctx, s, ssize, types, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) + _, _, err := r.AcquireSector(ctx, s, types, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) if err != nil { return xerrors.Errorf("acquire src storage (remote): %w", err) } - return r.local.MoveStorage(ctx, s, ssize, types) + return r.local.MoveStorage(ctx, s, types) } func (r *Remote) Remove(ctx context.Context, sid abi.SectorID, typ storiface.SectorFileType, force bool) error { diff --git a/extern/sector-storage/storiface/worker.go b/extern/sector-storage/storiface/worker.go index bbc9ca554..49d1de357 100644 --- a/extern/sector-storage/storiface/worker.go +++ b/extern/sector-storage/storiface/worker.go @@ -2,6 +2,7 @@ package storiface import ( "context" + "errors" "fmt" "io" "time" @@ -41,13 +42,26 @@ type WorkerStats struct { CpuUse uint64 // nolint } +const ( + RWRetWait = -1 + RWReturned = -2 + RWRetDone = -3 +) + type WorkerJob struct { ID CallID Sector abi.SectorID Task sealtasks.TaskType - RunWait int // -1 - ret-wait, 0 - running, 1+ - assigned + // 1+ - assigned + // 0 - running + // -1 - ret-wait + // -2 - returned + // -3 - ret-done + RunWait int Start time.Time + + Hostname string `json:",omitempty"` // optional, set for ret-wait jobs } type CallID struct { @@ -64,29 +78,69 @@ var _ fmt.Stringer = &CallID{} var UndefCall CallID type WorkerCalls interface { - AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (CallID, error) - SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (CallID, error) - SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (CallID, error) - SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (CallID, error) - SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (CallID, error) - FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (CallID, error) - ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) (CallID, error) - MoveStorage(ctx context.Context, sector abi.SectorID, types SectorFileType) (CallID, error) - UnsealPiece(context.Context, abi.SectorID, UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (CallID, error) - ReadPiece(context.Context, io.Writer, abi.SectorID, UnpaddedByteIndex, abi.UnpaddedPieceSize) (CallID, error) - Fetch(context.Context, abi.SectorID, SectorFileType, PathType, AcquireMode) (CallID, error) + AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (CallID, error) + SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (CallID, error) + SealPreCommit2(ctx context.Context, sector storage.SectorRef, pc1o storage.PreCommit1Out) (CallID, error) + SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (CallID, error) + SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (CallID, error) + FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (CallID, error) + ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (CallID, error) + MoveStorage(ctx context.Context, sector storage.SectorRef, types SectorFileType) (CallID, error) + UnsealPiece(context.Context, storage.SectorRef, UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (CallID, error) + ReadPiece(context.Context, io.Writer, storage.SectorRef, UnpaddedByteIndex, abi.UnpaddedPieceSize) (CallID, error) + Fetch(context.Context, storage.SectorRef, SectorFileType, PathType, AcquireMode) (CallID, error) +} + +type ErrorCode int + +const ( + ErrUnknown ErrorCode = iota +) + +const ( + // Temp Errors + ErrTempUnknown ErrorCode = iota + 100 + ErrTempWorkerRestart + ErrTempAllocateSpace +) + +type CallError struct { + Code ErrorCode + Message string + sub error +} + +func (c *CallError) Error() string { + return fmt.Sprintf("storage call error %d: %s", c.Code, c.Message) +} + +func (c *CallError) Unwrap() error { + if c.sub != nil { + return c.sub + } + + return errors.New(c.Message) +} + +func Err(code ErrorCode, sub error) *CallError { + return &CallError{ + Code: code, + Message: sub.Error(), + + sub: sub, + } } type WorkerReturn interface { - ReturnAddPiece(ctx context.Context, callID CallID, pi abi.PieceInfo, err string) error - ReturnSealPreCommit1(ctx context.Context, callID CallID, p1o storage.PreCommit1Out, err string) error - ReturnSealPreCommit2(ctx context.Context, callID CallID, sealed storage.SectorCids, err string) error - ReturnSealCommit1(ctx context.Context, callID CallID, out storage.Commit1Out, err string) error - ReturnSealCommit2(ctx context.Context, callID CallID, proof storage.Proof, err string) error - ReturnFinalizeSector(ctx context.Context, callID CallID, err string) error - ReturnReleaseUnsealed(ctx context.Context, callID CallID, err string) error - ReturnMoveStorage(ctx context.Context, callID CallID, err string) error - ReturnUnsealPiece(ctx context.Context, callID CallID, err string) error - ReturnReadPiece(ctx context.Context, callID CallID, ok bool, err string) error - ReturnFetch(ctx context.Context, callID CallID, err string) error + ReturnAddPiece(ctx context.Context, callID CallID, pi abi.PieceInfo, err *CallError) error + ReturnSealPreCommit1(ctx context.Context, callID CallID, p1o storage.PreCommit1Out, err *CallError) error + ReturnSealPreCommit2(ctx context.Context, callID CallID, sealed storage.SectorCids, err *CallError) error + ReturnSealCommit1(ctx context.Context, callID CallID, out storage.Commit1Out, err *CallError) error + ReturnSealCommit2(ctx context.Context, callID CallID, proof storage.Proof, err *CallError) error + ReturnFinalizeSector(ctx context.Context, callID CallID, err *CallError) error + ReturnReleaseUnsealed(ctx context.Context, callID CallID, err *CallError) error + ReturnMoveStorage(ctx context.Context, callID CallID, err *CallError) error + ReturnUnsealPiece(ctx context.Context, callID CallID, err *CallError) error + ReturnReadPiece(ctx context.Context, callID CallID, ok bool, err *CallError) error + ReturnFetch(ctx context.Context, callID CallID, err *CallError) error } diff --git a/extern/sector-storage/teststorage_test.go b/extern/sector-storage/teststorage_test.go index 0c8a240a3..72b27b154 100644 --- a/extern/sector-storage/teststorage_test.go +++ b/extern/sector-storage/teststorage_test.go @@ -31,50 +31,50 @@ func (t *testExec) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, panic("implement me") } -func (t *testExec) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) { +func (t *testExec) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) { panic("implement me") } -func (t *testExec) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storage.SectorCids, error) { +func (t *testExec) SealPreCommit2(ctx context.Context, sector storage.SectorRef, pc1o storage.PreCommit1Out) (storage.SectorCids, error) { panic("implement me") } -func (t *testExec) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) { +func (t *testExec) SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) { panic("implement me") } -func (t *testExec) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storage.Proof, error) { +func (t *testExec) SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storage.Proof, error) { panic("implement me") } -func (t *testExec) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error { +func (t *testExec) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) error { panic("implement me") } -func (t *testExec) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error { +func (t *testExec) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) error { panic("implement me") } -func (t *testExec) Remove(ctx context.Context, sector abi.SectorID) error { +func (t *testExec) Remove(ctx context.Context, sector storage.SectorRef) error { panic("implement me") } -func (t *testExec) NewSector(ctx context.Context, sector abi.SectorID) error { +func (t *testExec) NewSector(ctx context.Context, sector storage.SectorRef) error { panic("implement me") } -func (t *testExec) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) { +func (t *testExec) AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) { resp := make(chan apres) t.apch <- resp ar := <-resp return ar.pi, ar.err } -func (t *testExec) UnsealPiece(ctx context.Context, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error { +func (t *testExec) UnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error { panic("implement me") } -func (t *testExec) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { +func (t *testExec) ReadPiece(ctx context.Context, writer io.Writer, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { panic("implement me") } diff --git a/extern/sector-storage/testworker_test.go b/extern/sector-storage/testworker_test.go index d04afb0cc..2fe99f3d4 100644 --- a/extern/sector-storage/testworker_test.go +++ b/extern/sector-storage/testworker_test.go @@ -31,11 +31,6 @@ type testWorker struct { } func newTestWorker(wcfg WorkerConfig, lstor *stores.Local, ret storiface.WorkerReturn) *testWorker { - ssize, err := wcfg.SealProof.SectorSize() - if err != nil { - panic(err) - } - acceptTasks := map[sealtasks.TaskType]struct{}{} for _, taskType := range wcfg.TaskTypes { acceptTasks[taskType] = struct{}{} @@ -46,15 +41,15 @@ func newTestWorker(wcfg WorkerConfig, lstor *stores.Local, ret storiface.WorkerR lstor: lstor, ret: ret, - mockSeal: mock.NewMockSectorMgr(ssize, nil), + mockSeal: mock.NewMockSectorMgr(nil), session: uuid.New(), } } -func (t *testWorker) asyncCall(sector abi.SectorID, work func(ci storiface.CallID)) (storiface.CallID, error) { +func (t *testWorker) asyncCall(sector storage.SectorRef, work func(ci storiface.CallID)) (storiface.CallID, error) { ci := storiface.CallID{ - Sector: sector, + Sector: sector.ID, ID: uuid.New(), } @@ -63,16 +58,16 @@ func (t *testWorker) asyncCall(sector abi.SectorID, work func(ci storiface.CallI return ci, nil } -func (t *testWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) { +func (t *testWorker) AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) { return t.asyncCall(sector, func(ci storiface.CallID) { p, err := t.mockSeal.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData) - if err := t.ret.ReturnAddPiece(ctx, ci, p, errstr(err)); err != nil { + if err := t.ret.ReturnAddPiece(ctx, ci, p, toCallError(err)); err != nil { log.Error(err) } }) } -func (t *testWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) { +func (t *testWorker) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) { return t.asyncCall(sector, func(ci storiface.CallID) { t.pc1s++ @@ -84,15 +79,15 @@ func (t *testWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ti defer t.pc1lk.Unlock() p1o, err := t.mockSeal.SealPreCommit1(ctx, sector, ticket, pieces) - if err := t.ret.ReturnSealPreCommit1(ctx, ci, p1o, errstr(err)); err != nil { + if err := t.ret.ReturnSealPreCommit1(ctx, ci, p1o, toCallError(err)); err != nil { log.Error(err) } }) } -func (t *testWorker) Fetch(ctx context.Context, sector abi.SectorID, fileType storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) { +func (t *testWorker) Fetch(ctx context.Context, sector storage.SectorRef, fileType storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) { return t.asyncCall(sector, func(ci storiface.CallID) { - if err := t.ret.ReturnFetch(ctx, ci, ""); err != nil { + if err := t.ret.ReturnFetch(ctx, ci, nil); err != nil { log.Error(err) } }) diff --git a/extern/sector-storage/worker_local.go b/extern/sector-storage/worker_local.go index ae2b325ca..c069d7bf7 100644 --- a/extern/sector-storage/worker_local.go +++ b/extern/sector-storage/worker_local.go @@ -20,7 +20,7 @@ import ( ffi "github.com/filecoin-project/filecoin-ffi" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-statestore" - storage2 "github.com/filecoin-project/specs-storage/storage" + storage "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" @@ -31,7 +31,6 @@ import ( var pathTypes = []storiface.SectorFileType{storiface.FTUnsealed, storiface.FTSealed, storiface.FTCache} type WorkerConfig struct { - SealProof abi.RegisteredSealProof TaskTypes []sealtasks.TaskType NoSwap bool } @@ -40,7 +39,6 @@ type WorkerConfig struct { type ExecutorFunc func() (ffiwrapper.Storage, error) type LocalWorker struct { - scfg *ffiwrapper.Config storage stores.Store localStore *stores.Local sindex stores.SectorIndex @@ -64,9 +62,6 @@ func newLocalWorker(executor ExecutorFunc, wcfg WorkerConfig, store stores.Store } w := &LocalWorker{ - scfg: &ffiwrapper.Config{ - SealProofType: wcfg.SealProof, - }, storage: store, localStore: local, sindex: sindex, @@ -95,7 +90,7 @@ func newLocalWorker(executor ExecutorFunc, wcfg WorkerConfig, store stores.Store go func() { for _, call := range unfinished { - err := xerrors.Errorf("worker restarted") + err := storiface.Err(storiface.ErrTempWorkerRestart, xerrors.New("worker restarted")) // TODO: Handle restarting PC1 once support is merged @@ -119,18 +114,13 @@ type localWorkerPathProvider struct { op storiface.AcquireMode } -func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi.SectorID, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType) (storiface.SectorPaths, func(), error) { - ssize, err := l.w.scfg.SealProofType.SectorSize() +func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType) (storiface.SectorPaths, func(), error) { + paths, storageIDs, err := l.w.storage.AcquireSector(ctx, sector, existing, allocate, sealing, l.op) if err != nil { return storiface.SectorPaths{}, nil, err } - paths, storageIDs, err := l.w.storage.AcquireSector(ctx, sector, ssize, existing, allocate, sealing, l.op) - if err != nil { - return storiface.SectorPaths{}, nil, err - } - - releaseStorage, err := l.w.localStore.Reserve(ctx, sector, ssize, allocate, storageIDs, storiface.FSOverheadSeal) + releaseStorage, err := l.w.localStore.Reserve(ctx, sector, allocate, storageIDs, storiface.FSOverheadSeal) if err != nil { return storiface.SectorPaths{}, nil, xerrors.Errorf("reserving storage space: %w", err) } @@ -147,7 +137,7 @@ func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi. sid := storiface.PathByType(storageIDs, fileType) - if err := l.w.sindex.StorageDeclareSector(ctx, stores.ID(sid), sector, fileType, l.op == storiface.AcquireMove); err != nil { + if err := l.w.sindex.StorageDeclareSector(ctx, stores.ID(sid), sector.ID, fileType, l.op == storiface.AcquireMove); err != nil { log.Errorf("declare sector error: %+v", err) } } @@ -155,22 +145,36 @@ func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi. } func (l *LocalWorker) ffiExec() (ffiwrapper.Storage, error) { - return ffiwrapper.New(&localWorkerPathProvider{w: l}, l.scfg) + return ffiwrapper.New(&localWorkerPathProvider{w: l}) } type ReturnType string +const ( + AddPiece ReturnType = "AddPiece" + SealPreCommit1 ReturnType = "SealPreCommit1" + SealPreCommit2 ReturnType = "SealPreCommit2" + SealCommit1 ReturnType = "SealCommit1" + SealCommit2 ReturnType = "SealCommit2" + FinalizeSector ReturnType = "FinalizeSector" + ReleaseUnsealed ReturnType = "ReleaseUnsealed" + MoveStorage ReturnType = "MoveStorage" + UnsealPiece ReturnType = "UnsealPiece" + ReadPiece ReturnType = "ReadPiece" + Fetch ReturnType = "Fetch" +) + // in: func(WorkerReturn, context.Context, CallID, err string) // in: func(WorkerReturn, context.Context, CallID, ret T, err string) -func rfunc(in interface{}) func(context.Context, storiface.CallID, storiface.WorkerReturn, interface{}, error) error { +func rfunc(in interface{}) func(context.Context, storiface.CallID, storiface.WorkerReturn, interface{}, *storiface.CallError) error { rf := reflect.ValueOf(in) ft := rf.Type() withRet := ft.NumIn() == 5 - return func(ctx context.Context, ci storiface.CallID, wr storiface.WorkerReturn, i interface{}, err error) error { + return func(ctx context.Context, ci storiface.CallID, wr storiface.WorkerReturn, i interface{}, err *storiface.CallError) error { rctx := reflect.ValueOf(ctx) rwr := reflect.ValueOf(wr) - rerr := reflect.ValueOf(errstr(err)) + rerr := reflect.ValueOf(err) rci := reflect.ValueOf(ci) var ro []reflect.Value @@ -194,23 +198,23 @@ func rfunc(in interface{}) func(context.Context, storiface.CallID, storiface.Wor } } -var returnFunc = map[ReturnType]func(context.Context, storiface.CallID, storiface.WorkerReturn, interface{}, error) error{ - "AddPiece": rfunc(storiface.WorkerReturn.ReturnAddPiece), - "SealPreCommit1": rfunc(storiface.WorkerReturn.ReturnSealPreCommit1), - "SealPreCommit2": rfunc(storiface.WorkerReturn.ReturnSealPreCommit2), - "SealCommit1": rfunc(storiface.WorkerReturn.ReturnSealCommit1), - "SealCommit2": rfunc(storiface.WorkerReturn.ReturnSealCommit2), - "FinalizeSector": rfunc(storiface.WorkerReturn.ReturnFinalizeSector), - "ReleaseUnsealed": rfunc(storiface.WorkerReturn.ReturnReleaseUnsealed), - "MoveStorage": rfunc(storiface.WorkerReturn.ReturnMoveStorage), - "UnsealPiece": rfunc(storiface.WorkerReturn.ReturnUnsealPiece), - "ReadPiece": rfunc(storiface.WorkerReturn.ReturnReadPiece), - "Fetch": rfunc(storiface.WorkerReturn.ReturnFetch), +var returnFunc = map[ReturnType]func(context.Context, storiface.CallID, storiface.WorkerReturn, interface{}, *storiface.CallError) error{ + AddPiece: rfunc(storiface.WorkerReturn.ReturnAddPiece), + SealPreCommit1: rfunc(storiface.WorkerReturn.ReturnSealPreCommit1), + SealPreCommit2: rfunc(storiface.WorkerReturn.ReturnSealPreCommit2), + SealCommit1: rfunc(storiface.WorkerReturn.ReturnSealCommit1), + SealCommit2: rfunc(storiface.WorkerReturn.ReturnSealCommit2), + FinalizeSector: rfunc(storiface.WorkerReturn.ReturnFinalizeSector), + ReleaseUnsealed: rfunc(storiface.WorkerReturn.ReturnReleaseUnsealed), + MoveStorage: rfunc(storiface.WorkerReturn.ReturnMoveStorage), + UnsealPiece: rfunc(storiface.WorkerReturn.ReturnUnsealPiece), + ReadPiece: rfunc(storiface.WorkerReturn.ReturnReadPiece), + Fetch: rfunc(storiface.WorkerReturn.ReturnFetch), } -func (l *LocalWorker) asyncCall(ctx context.Context, sector abi.SectorID, rt ReturnType, work func(ctx context.Context, ci storiface.CallID) (interface{}, error)) (storiface.CallID, error) { +func (l *LocalWorker) asyncCall(ctx context.Context, sector storage.SectorRef, rt ReturnType, work func(ctx context.Context, ci storiface.CallID) (interface{}, error)) (storiface.CallID, error) { ci := storiface.CallID{ - Sector: sector, + Sector: sector.ID, ID: uuid.New(), } @@ -241,7 +245,7 @@ func (l *LocalWorker) asyncCall(ctx context.Context, sector abi.SectorID, rt Ret } } - if doReturn(ctx, rt, ci, l.ret, res, err) { + if doReturn(ctx, rt, ci, l.ret, res, toCallError(err)) { if err := l.ct.onReturned(ci); err != nil { log.Errorf("tracking call (done): %+v", err) } @@ -251,8 +255,17 @@ func (l *LocalWorker) asyncCall(ctx context.Context, sector abi.SectorID, rt Ret return ci, nil } +func toCallError(err error) *storiface.CallError { + var serr *storiface.CallError + if err != nil && !xerrors.As(err, &serr) { + serr = storiface.Err(storiface.ErrUnknown, err) + } + + return serr +} + // doReturn tries to send the result to manager, returns true if successful -func doReturn(ctx context.Context, rt ReturnType, ci storiface.CallID, ret storiface.WorkerReturn, res interface{}, rerr error) bool { +func doReturn(ctx context.Context, rt ReturnType, ci storiface.CallID, ret storiface.WorkerReturn, res interface{}, rerr *storiface.CallError) bool { for { err := returnFunc[rt](ctx, ci, ret, res, rerr) if err == nil { @@ -275,15 +288,7 @@ func doReturn(ctx context.Context, rt ReturnType, ci storiface.CallID, ret stori return true } -func errstr(err error) string { - if err != nil { - return err.Error() - } - - return "" -} - -func (l *LocalWorker) NewSector(ctx context.Context, sector abi.SectorID) error { +func (l *LocalWorker) NewSector(ctx context.Context, sector storage.SectorRef) error { sb, err := l.executor() if err != nil { return err @@ -292,19 +297,19 @@ func (l *LocalWorker) NewSector(ctx context.Context, sector abi.SectorID) error return sb.NewSector(ctx, sector) } -func (l *LocalWorker) AddPiece(ctx context.Context, sector abi.SectorID, epcs []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (storiface.CallID, error) { +func (l *LocalWorker) AddPiece(ctx context.Context, sector storage.SectorRef, epcs []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (storiface.CallID, error) { sb, err := l.executor() if err != nil { return storiface.UndefCall, err } - return l.asyncCall(ctx, sector, "AddPiece", func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + return l.asyncCall(ctx, sector, AddPiece, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { return sb.AddPiece(ctx, sector, epcs, sz, r) }) } -func (l *LocalWorker) Fetch(ctx context.Context, sector abi.SectorID, fileType storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) { - return l.asyncCall(ctx, sector, "Fetch", func(ctx context.Context, ci storiface.CallID) (interface{}, error) { +func (l *LocalWorker) Fetch(ctx context.Context, sector storage.SectorRef, fileType storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) { + return l.asyncCall(ctx, sector, Fetch, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { _, done, err := (&localWorkerPathProvider{w: l, op: am}).AcquireSector(ctx, sector, fileType, storiface.FTNone, ptype) if err == nil { done() @@ -314,16 +319,16 @@ func (l *LocalWorker) Fetch(ctx context.Context, sector abi.SectorID, fileType s }) } -func (l *LocalWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) { - return l.asyncCall(ctx, sector, "SealPreCommit1", func(ctx context.Context, ci storiface.CallID) (interface{}, error) { +func (l *LocalWorker) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) { + return l.asyncCall(ctx, sector, SealPreCommit1, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { { // cleanup previous failed attempts if they exist - if err := l.storage.Remove(ctx, sector, storiface.FTSealed, true); err != nil { + if err := l.storage.Remove(ctx, sector.ID, storiface.FTSealed, true); err != nil { return nil, xerrors.Errorf("cleaning up sealed data: %w", err) } - if err := l.storage.Remove(ctx, sector, storiface.FTCache, true); err != nil { + if err := l.storage.Remove(ctx, sector.ID, storiface.FTCache, true); err != nil { return nil, xerrors.Errorf("cleaning up cache data: %w", err) } } @@ -337,52 +342,52 @@ func (l *LocalWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, t }) } -func (l *LocalWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage2.PreCommit1Out) (storiface.CallID, error) { +func (l *LocalWorker) SealPreCommit2(ctx context.Context, sector storage.SectorRef, phase1Out storage.PreCommit1Out) (storiface.CallID, error) { sb, err := l.executor() if err != nil { return storiface.UndefCall, err } - return l.asyncCall(ctx, sector, "SealPreCommit2", func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + return l.asyncCall(ctx, sector, SealPreCommit2, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { return sb.SealPreCommit2(ctx, sector, phase1Out) }) } -func (l *LocalWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage2.SectorCids) (storiface.CallID, error) { +func (l *LocalWorker) SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) { sb, err := l.executor() if err != nil { return storiface.UndefCall, err } - return l.asyncCall(ctx, sector, "SealCommit1", func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + return l.asyncCall(ctx, sector, SealCommit1, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { return sb.SealCommit1(ctx, sector, ticket, seed, pieces, cids) }) } -func (l *LocalWorker) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage2.Commit1Out) (storiface.CallID, error) { +func (l *LocalWorker) SealCommit2(ctx context.Context, sector storage.SectorRef, phase1Out storage.Commit1Out) (storiface.CallID, error) { sb, err := l.executor() if err != nil { return storiface.UndefCall, err } - return l.asyncCall(ctx, sector, "SealCommit2", func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + return l.asyncCall(ctx, sector, SealCommit2, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { return sb.SealCommit2(ctx, sector, phase1Out) }) } -func (l *LocalWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage2.Range) (storiface.CallID, error) { +func (l *LocalWorker) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) { sb, err := l.executor() if err != nil { return storiface.UndefCall, err } - return l.asyncCall(ctx, sector, "FinalizeSector", func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + return l.asyncCall(ctx, sector, FinalizeSector, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { if err := sb.FinalizeSector(ctx, sector, keepUnsealed); err != nil { return nil, xerrors.Errorf("finalizing sector: %w", err) } if len(keepUnsealed) == 0 { - if err := l.storage.Remove(ctx, sector, storiface.FTUnsealed, true); err != nil { + if err := l.storage.Remove(ctx, sector.ID, storiface.FTUnsealed, true); err != nil { return nil, xerrors.Errorf("removing unsealed data: %w", err) } } @@ -391,7 +396,7 @@ func (l *LocalWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, k }) } -func (l *LocalWorker) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage2.Range) (storiface.CallID, error) { +func (l *LocalWorker) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (storiface.CallID, error) { return storiface.UndefCall, xerrors.Errorf("implement me") } @@ -411,33 +416,28 @@ func (l *LocalWorker) Remove(ctx context.Context, sector abi.SectorID) error { return err } -func (l *LocalWorker) MoveStorage(ctx context.Context, sector abi.SectorID, types storiface.SectorFileType) (storiface.CallID, error) { - return l.asyncCall(ctx, sector, "MoveStorage", func(ctx context.Context, ci storiface.CallID) (interface{}, error) { - ssize, err := l.scfg.SealProofType.SectorSize() - if err != nil { - return nil, err - } - - return nil, l.storage.MoveStorage(ctx, sector, ssize, types) +func (l *LocalWorker) MoveStorage(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) { + return l.asyncCall(ctx, sector, MoveStorage, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + return nil, l.storage.MoveStorage(ctx, sector, types) }) } -func (l *LocalWorker) UnsealPiece(ctx context.Context, sector abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) (storiface.CallID, error) { +func (l *LocalWorker) UnsealPiece(ctx context.Context, sector storage.SectorRef, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) (storiface.CallID, error) { sb, err := l.executor() if err != nil { return storiface.UndefCall, err } - return l.asyncCall(ctx, sector, "UnsealPiece", func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + return l.asyncCall(ctx, sector, UnsealPiece, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { if err = sb.UnsealPiece(ctx, sector, index, size, randomness, cid); err != nil { return nil, xerrors.Errorf("unsealing sector: %w", err) } - if err = l.storage.RemoveCopies(ctx, sector, storiface.FTSealed); err != nil { + if err = l.storage.RemoveCopies(ctx, sector.ID, storiface.FTSealed); err != nil { return nil, xerrors.Errorf("removing source data: %w", err) } - if err = l.storage.RemoveCopies(ctx, sector, storiface.FTCache); err != nil { + if err = l.storage.RemoveCopies(ctx, sector.ID, storiface.FTCache); err != nil { return nil, xerrors.Errorf("removing source data: %w", err) } @@ -445,13 +445,13 @@ func (l *LocalWorker) UnsealPiece(ctx context.Context, sector abi.SectorID, inde }) } -func (l *LocalWorker) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) { +func (l *LocalWorker) ReadPiece(ctx context.Context, writer io.Writer, sector storage.SectorRef, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) { sb, err := l.executor() if err != nil { return storiface.UndefCall, err } - return l.asyncCall(ctx, sector, "ReadPiece", func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + return l.asyncCall(ctx, sector, ReadPiece, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { return sb.ReadPiece(ctx, writer, sector, index, size) }) } diff --git a/extern/sector-storage/worker_tracked.go b/extern/sector-storage/worker_tracked.go index 4a22fcca7..febb190c5 100644 --- a/extern/sector-storage/worker_tracked.go +++ b/extern/sector-storage/worker_tracked.go @@ -42,7 +42,7 @@ func (wt *workTracker) onDone(callID storiface.CallID) { delete(wt.running, callID) } -func (wt *workTracker) track(wid WorkerID, sid abi.SectorID, task sealtasks.TaskType) func(storiface.CallID, error) (storiface.CallID, error) { +func (wt *workTracker) track(wid WorkerID, sid storage.SectorRef, task sealtasks.TaskType) func(storiface.CallID, error) (storiface.CallID, error) { return func(callID storiface.CallID, err error) (storiface.CallID, error) { if err != nil { return callID, err @@ -60,7 +60,7 @@ func (wt *workTracker) track(wid WorkerID, sid abi.SectorID, task sealtasks.Task wt.running[callID] = trackedWork{ job: storiface.WorkerJob{ ID: callID, - Sector: sid, + Sector: sid.ID, Task: task, Start: time.Now(), }, @@ -99,39 +99,39 @@ type trackedWorker struct { tracker *workTracker } -func (t *trackedWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) { +func (t *trackedWorker) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) { return t.tracker.track(t.wid, sector, sealtasks.TTPreCommit1)(t.Worker.SealPreCommit1(ctx, sector, ticket, pieces)) } -func (t *trackedWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storiface.CallID, error) { +func (t *trackedWorker) SealPreCommit2(ctx context.Context, sector storage.SectorRef, pc1o storage.PreCommit1Out) (storiface.CallID, error) { return t.tracker.track(t.wid, sector, sealtasks.TTPreCommit2)(t.Worker.SealPreCommit2(ctx, sector, pc1o)) } -func (t *trackedWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) { +func (t *trackedWorker) SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) { return t.tracker.track(t.wid, sector, sealtasks.TTCommit1)(t.Worker.SealCommit1(ctx, sector, ticket, seed, pieces, cids)) } -func (t *trackedWorker) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storiface.CallID, error) { +func (t *trackedWorker) SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storiface.CallID, error) { return t.tracker.track(t.wid, sector, sealtasks.TTCommit2)(t.Worker.SealCommit2(ctx, sector, c1o)) } -func (t *trackedWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (storiface.CallID, error) { +func (t *trackedWorker) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) { return t.tracker.track(t.wid, sector, sealtasks.TTFinalize)(t.Worker.FinalizeSector(ctx, sector, keepUnsealed)) } -func (t *trackedWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) { +func (t *trackedWorker) AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) { return t.tracker.track(t.wid, sector, sealtasks.TTAddPiece)(t.Worker.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData)) } -func (t *trackedWorker) Fetch(ctx context.Context, s abi.SectorID, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) { +func (t *trackedWorker) Fetch(ctx context.Context, s storage.SectorRef, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) { return t.tracker.track(t.wid, s, sealtasks.TTFetch)(t.Worker.Fetch(ctx, s, ft, ptype, am)) } -func (t *trackedWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) (storiface.CallID, error) { +func (t *trackedWorker) UnsealPiece(ctx context.Context, id storage.SectorRef, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) (storiface.CallID, error) { return t.tracker.track(t.wid, id, sealtasks.TTUnseal)(t.Worker.UnsealPiece(ctx, id, index, size, randomness, cid)) } -func (t *trackedWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) { +func (t *trackedWorker) ReadPiece(ctx context.Context, writer io.Writer, id storage.SectorRef, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) { return t.tracker.track(t.wid, id, sealtasks.TTReadUnsealed)(t.Worker.ReadPiece(ctx, writer, id, index, size)) } diff --git a/extern/storage-sealing/checks.go b/extern/storage-sealing/checks.go index ed7a691ef..56a55bb61 100644 --- a/extern/storage-sealing/checks.go +++ b/extern/storage-sealing/checks.go @@ -14,7 +14,6 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/sector-storage/zerocomm" ) @@ -166,23 +165,14 @@ func (m *Sealing) checkCommit(ctx context.Context, si SectorInfo, proof []byte, return &ErrBadSeed{xerrors.Errorf("seed has changed")} } - ss, err := m.api.StateMinerSectorSize(ctx, m.maddr, tok) - if err != nil { - return &ErrApi{err} - } - spt, err := ffiwrapper.SealProofTypeFromSectorSize(ss) - if err != nil { - return err - } - if *si.CommR != pci.Info.SealedCID { log.Warn("on-chain sealed CID doesn't match!") } ok, err := m.verif.VerifySeal(proof2.SealVerifyInfo{ - SectorID: m.minerSector(si.SectorNumber), + SectorID: m.minerSectorID(si.SectorNumber), SealedCID: pci.Info.SealedCID, - SealProof: spt, + SealProof: pci.Info.SealProof, Proof: proof, Randomness: si.TicketValue, InteractiveRandomness: si.SeedValue, diff --git a/extern/storage-sealing/fsm.go b/extern/storage-sealing/fsm.go index 3a5931c8b..cf0be4cd4 100644 --- a/extern/storage-sealing/fsm.go +++ b/extern/storage-sealing/fsm.go @@ -106,6 +106,7 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto ), PreCommitFailed: planOne( on(SectorRetryPreCommit{}, PreCommitting), + on(SectorRetryPreCommitWait{}, PreCommitWait), on(SectorRetryWaitSeed{}, WaitSeed), on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed), on(SectorPreCommitLanded{}, WaitSeed), @@ -125,6 +126,7 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto on(SectorChainPreCommitFailed{}, PreCommitFailed), on(SectorRetryPreCommit{}, PreCommitting), on(SectorRetryCommitWait{}, CommitWait), + on(SectorRetrySubmitCommit{}, SubmitCommit), on(SectorDealsExpired{}, DealsExpired), on(SectorInvalidDealIDs{}, RecoverDealIDs), on(SectorTicketExpired{}, Removing), @@ -267,7 +269,7 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta */ - m.stats.updateSector(m.minerSector(state.SectorNumber), state.State) + m.stats.updateSector(m.minerSectorID(state.SectorNumber), state.State) switch state.State { // Happy path @@ -394,6 +396,15 @@ func (m *Sealing) restartSectors(ctx context.Context) error { return xerrors.Errorf("getting the sealing delay: %w", err) } + spt, err := m.currentSealProof(ctx) + if err != nil { + return xerrors.Errorf("getting current seal proof: %w", err) + } + ssize, err := spt.SectorSize() + if err != nil { + return err + } + m.unsealedInfoMap.lk.Lock() defer m.unsealedInfoMap.lk.Unlock() for _, sector := range trackedSectors { @@ -408,7 +419,9 @@ func (m *Sealing) restartSectors(ctx context.Context) error { // something's funky here, but probably safe to move on log.Warnf("sector %v was already in the unsealedInfoMap when restarting", sector.SectorNumber) } else { - ui := UnsealedSectorInfo{} + ui := UnsealedSectorInfo{ + ssize: ssize, + } for _, p := range sector.Pieces { if p.DealInfo != nil { ui.numDeals++ @@ -443,6 +456,13 @@ func (m *Sealing) ForceSectorState(ctx context.Context, id abi.SectorNumber, sta } func final(events []statemachine.Event, state *SectorInfo) (uint64, error) { + if len(events) > 0 { + if gm, ok := events[0].User.(globalMutator); ok { + gm.applyGlobal(state) + return 1, nil + } + } + return 0, xerrors.Errorf("didn't expect any events in state %s, got %+v", state.State, events) } diff --git a/extern/storage-sealing/garbage.go b/extern/storage-sealing/garbage.go index caf371806..c3b282d79 100644 --- a/extern/storage-sealing/garbage.go +++ b/extern/storage-sealing/garbage.go @@ -6,9 +6,10 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-storage/storage" ) -func (m *Sealing) pledgeSector(ctx context.Context, sectorID abi.SectorID, existingPieceSizes []abi.UnpaddedPieceSize, sizes ...abi.UnpaddedPieceSize) ([]abi.PieceInfo, error) { +func (m *Sealing) pledgeSector(ctx context.Context, sectorID storage.SectorRef, existingPieceSizes []abi.UnpaddedPieceSize, sizes ...abi.UnpaddedPieceSize) ([]abi.PieceInfo, error) { if len(sizes) == 0 { return nil, nil } @@ -47,20 +48,31 @@ func (m *Sealing) PledgeSector() error { // this, as we run everything here async, and it's cancelled when the // command exits - size := abi.PaddedPieceSize(m.sealer.SectorSize()).Unpadded() + spt, err := m.currentSealProof(ctx) + if err != nil { + log.Errorf("%+v", err) + return + } + + size, err := spt.SectorSize() + if err != nil { + log.Errorf("%+v", err) + return + } sid, err := m.sc.Next() if err != nil { log.Errorf("%+v", err) return } - err = m.sealer.NewSector(ctx, m.minerSector(sid)) + sectorID := m.minerSector(spt, sid) + err = m.sealer.NewSector(ctx, sectorID) if err != nil { log.Errorf("%+v", err) return } - pieces, err := m.pledgeSector(ctx, m.minerSector(sid), []abi.UnpaddedPieceSize{}, size) + pieces, err := m.pledgeSector(ctx, sectorID, []abi.UnpaddedPieceSize{}, abi.PaddedPieceSize(size).Unpadded()) if err != nil { log.Errorf("%+v", err) return @@ -74,7 +86,7 @@ func (m *Sealing) PledgeSector() error { } } - if err := m.newSectorCC(sid, ps); err != nil { + if err := m.newSectorCC(ctx, sid, ps); err != nil { log.Errorf("%+v", err) return } diff --git a/extern/storage-sealing/sealing.go b/extern/storage-sealing/sealing.go index d9953eee0..5211f8bbe 100644 --- a/extern/storage-sealing/sealing.go +++ b/extern/storage-sealing/sealing.go @@ -8,13 +8,15 @@ import ( "sync" "time" - "github.com/filecoin-project/go-state-types/network" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/namespace" logging "github.com/ipfs/go-log/v2" "golang.org/x/xerrors" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/specs-storage/storage" + "github.com/filecoin-project/go-address" padreader "github.com/filecoin-project/go-padreader" "github.com/filecoin-project/go-state-types/abi" @@ -53,6 +55,7 @@ type SealingAPI interface { StateMinerWorkerAddress(ctx context.Context, maddr address.Address, tok TipSetToken) (address.Address, error) StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error) StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error) + StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error) StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, TipSetToken) (bool, error) StateMarketStorageDeal(context.Context, abi.DealID, TipSetToken) (market.DealProposal, error) StateNetworkVersion(ctx context.Context, tok TipSetToken) (network.Version, error) @@ -105,6 +108,7 @@ type UnsealedSectorInfo struct { // stored should always equal sum of pieceSizes.Padded() stored abi.PaddedPieceSize pieceSizes []abi.UnpaddedPieceSize + ssize abi.SectorSize } func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds datastore.Batching, sealer sectorstorage.SectorManager, sc SectorIDCounter, verif ffiwrapper.Verifier, pcp PreCommitPolicy, gc GetSealingConfigFunc, notifee SectorStateNotifee) *Sealing { @@ -151,19 +155,30 @@ func (m *Sealing) Run(ctx context.Context) error { func (m *Sealing) Stop(ctx context.Context) error { return m.sectors.Stop(ctx) } + func (m *Sealing) AddPieceToAnySector(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader, d DealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) { log.Infof("Adding piece for deal %d (publish msg: %s)", d.DealID, d.PublishCid) if (padreader.PaddedSize(uint64(size))) != size { return 0, 0, xerrors.Errorf("cannot allocate unpadded piece") } - if size > abi.PaddedPieceSize(m.sealer.SectorSize()).Unpadded() { + sp, err := m.currentSealProof(ctx) + if err != nil { + return 0, 0, xerrors.Errorf("getting current seal proof type: %w", err) + } + + ssize, err := sp.SectorSize() + if err != nil { + return 0, 0, err + } + + if size > abi.PaddedPieceSize(ssize).Unpadded() { return 0, 0, xerrors.Errorf("piece cannot fit into a sector") } m.unsealedInfoMap.lk.Lock() - sid, pads, err := m.getSectorAndPadding(size) + sid, pads, err := m.getSectorAndPadding(ctx, size) if err != nil { m.unsealedInfoMap.lk.Unlock() return 0, 0, xerrors.Errorf("getting available sector: %w", err) @@ -185,7 +200,7 @@ func (m *Sealing) AddPieceToAnySector(ctx context.Context, size abi.UnpaddedPiec return 0, 0, xerrors.Errorf("adding piece to sector: %w", err) } - startPacking := m.unsealedInfoMap.infos[sid].numDeals >= getDealPerSectorLimit(m.sealer.SectorSize()) + startPacking := m.unsealedInfoMap.infos[sid].numDeals >= getDealPerSectorLimit(ssize) m.unsealedInfoMap.lk.Unlock() @@ -201,7 +216,16 @@ func (m *Sealing) AddPieceToAnySector(ctx context.Context, size abi.UnpaddedPiec // Caller should hold m.unsealedInfoMap.lk func (m *Sealing) addPiece(ctx context.Context, sectorID abi.SectorNumber, size abi.UnpaddedPieceSize, r io.Reader, di *DealInfo) error { log.Infof("Adding piece to sector %d", sectorID) - ppi, err := m.sealer.AddPiece(sectorstorage.WithPriority(ctx, DealSectorPriority), m.minerSector(sectorID), m.unsealedInfoMap.infos[sectorID].pieceSizes, size, r) + sp, err := m.currentSealProof(ctx) + if err != nil { + return xerrors.Errorf("getting current seal proof type: %w", err) + } + ssize, err := sp.SectorSize() + if err != nil { + return err + } + + ppi, err := m.sealer.AddPiece(sectorstorage.WithPriority(ctx, DealSectorPriority), m.minerSector(sp, sectorID), m.unsealedInfoMap.infos[sectorID].pieceSizes, size, r) if err != nil { return xerrors.Errorf("writing piece: %w", err) } @@ -224,6 +248,7 @@ func (m *Sealing) addPiece(ctx context.Context, sectorID abi.SectorNumber, size numDeals: num, stored: ui.stored + piece.Piece.Size, pieceSizes: append(ui.pieceSizes, piece.Piece.Size.Unpadded()), + ssize: ssize, } return nil @@ -257,16 +282,16 @@ func (m *Sealing) StartPacking(sectorID abi.SectorNumber) error { } // Caller should hold m.unsealedInfoMap.lk -func (m *Sealing) getSectorAndPadding(size abi.UnpaddedPieceSize) (abi.SectorNumber, []abi.PaddedPieceSize, error) { - ss := abi.PaddedPieceSize(m.sealer.SectorSize()) +func (m *Sealing) getSectorAndPadding(ctx context.Context, size abi.UnpaddedPieceSize) (abi.SectorNumber, []abi.PaddedPieceSize, error) { for k, v := range m.unsealedInfoMap.infos { pads, padLength := ffiwrapper.GetRequiredPadding(v.stored, size.Padded()) - if v.stored+size.Padded()+padLength <= ss { + + if v.stored+size.Padded()+padLength <= abi.PaddedPieceSize(v.ssize) { return k, pads, nil } } - ns, err := m.newDealSector() + ns, ssize, err := m.newDealSector(ctx) if err != nil { return 0, nil, err } @@ -275,23 +300,24 @@ func (m *Sealing) getSectorAndPadding(size abi.UnpaddedPieceSize) (abi.SectorNum numDeals: 0, stored: 0, pieceSizes: nil, + ssize: ssize, } return ns, nil, nil } // newDealSector creates a new sector for deal storage -func (m *Sealing) newDealSector() (abi.SectorNumber, error) { +func (m *Sealing) newDealSector(ctx context.Context) (abi.SectorNumber, abi.SectorSize, error) { // First make sure we don't have too many 'open' sectors cfg, err := m.getConfig() if err != nil { - return 0, xerrors.Errorf("getting config: %w", err) + return 0, 0, xerrors.Errorf("getting config: %w", err) } if cfg.MaxSealingSectorsForDeals > 0 { if m.stats.curSealing() > cfg.MaxSealingSectorsForDeals { - return 0, ErrTooManySectorsSealing + return 0, 0, ErrTooManySectorsSealing } } @@ -338,36 +364,36 @@ func (m *Sealing) newDealSector() (abi.SectorNumber, error) { } } + spt, err := m.currentSealProof(ctx) + if err != nil { + return 0, 0, xerrors.Errorf("getting current seal proof type: %w", err) + } + // Now actually create a new sector sid, err := m.sc.Next() if err != nil { - return 0, xerrors.Errorf("getting sector number: %w", err) + return 0, 0, xerrors.Errorf("getting sector number: %w", err) } - err = m.sealer.NewSector(context.TODO(), m.minerSector(sid)) + err = m.sealer.NewSector(context.TODO(), m.minerSector(spt, sid)) if err != nil { - return 0, xerrors.Errorf("initializing sector: %w", err) - } - - rt, err := ffiwrapper.SealProofTypeFromSectorSize(m.sealer.SectorSize()) - if err != nil { - return 0, xerrors.Errorf("bad sector size: %w", err) + return 0, 0, xerrors.Errorf("initializing sector: %w", err) } log.Infof("Creating sector %d", sid) err = m.sectors.Send(uint64(sid), SectorStart{ ID: sid, - SectorType: rt, + SectorType: spt, }) if err != nil { - return 0, xerrors.Errorf("starting the sector fsm: %w", err) + return 0, 0, xerrors.Errorf("starting the sector fsm: %w", err) } cf, err := m.getConfig() if err != nil { - return 0, xerrors.Errorf("getting the sealing delay: %w", err) + return 0, 0, xerrors.Errorf("getting the sealing delay: %w", err) } if cf.WaitDealsDelay > 0 { @@ -380,25 +406,42 @@ func (m *Sealing) newDealSector() (abi.SectorNumber, error) { }() } - return sid, nil + ssize, err := spt.SectorSize() + return sid, ssize, err } // newSectorCC accepts a slice of pieces with no deal (junk data) -func (m *Sealing) newSectorCC(sid abi.SectorNumber, pieces []Piece) error { - rt, err := ffiwrapper.SealProofTypeFromSectorSize(m.sealer.SectorSize()) +func (m *Sealing) newSectorCC(ctx context.Context, sid abi.SectorNumber, pieces []Piece) error { + spt, err := m.currentSealProof(ctx) if err != nil { - return xerrors.Errorf("bad sector size: %w", err) + return xerrors.Errorf("getting current seal proof type: %w", err) } log.Infof("Creating CC sector %d", sid) return m.sectors.Send(uint64(sid), SectorStartCC{ ID: sid, Pieces: pieces, - SectorType: rt, + SectorType: spt, }) } -func (m *Sealing) minerSector(num abi.SectorNumber) abi.SectorID { +func (m *Sealing) currentSealProof(ctx context.Context) (abi.RegisteredSealProof, error) { + mi, err := m.api.StateMinerInfo(ctx, m.maddr, nil) + if err != nil { + return 0, err + } + + return mi.SealProofType, nil +} + +func (m *Sealing) minerSector(spt abi.RegisteredSealProof, num abi.SectorNumber) storage.SectorRef { + return storage.SectorRef{ + ID: m.minerSectorID(num), + ProofType: spt, + } +} + +func (m *Sealing) minerSectorID(num abi.SectorNumber) abi.SectorID { mid, err := address.IDFromAddress(m.maddr) if err != nil { panic(err) diff --git a/extern/storage-sealing/states_failed.go b/extern/storage-sealing/states_failed.go index b583701ae..760afc0ba 100644 --- a/extern/storage-sealing/states_failed.go +++ b/extern/storage-sealing/states_failed.go @@ -77,6 +77,34 @@ func (m *Sealing) handlePreCommitFailed(ctx statemachine.Context, sector SectorI return nil } + if sector.PreCommitMessage != nil { + mw, err := m.api.StateSearchMsg(ctx.Context(), *sector.PreCommitMessage) + if err != nil { + // API error + if err := failedCooldown(ctx, sector); err != nil { + return err + } + + return ctx.Send(SectorRetryPreCommitWait{}) + } + + if mw == nil { + // API error in precommit + return ctx.Send(SectorRetryPreCommitWait{}) + } + + switch mw.Receipt.ExitCode { + case exitcode.Ok: + // API error in PreCommitWait + return ctx.Send(SectorRetryPreCommitWait{}) + case exitcode.SysErrOutOfGas: + // API error in PreCommitWait AND gas estimator guessed a wrong number in PreCommit + return ctx.Send(SectorRetryPreCommit{}) + default: + // something else went wrong + } + } + if err := checkPrecommit(ctx.Context(), m.Address(), sector, tok, height, m.api); err != nil { switch err.(type) { case *ErrApi: @@ -160,6 +188,34 @@ func (m *Sealing) handleCommitFailed(ctx statemachine.Context, sector SectorInfo return nil } + if sector.CommitMessage != nil { + mw, err := m.api.StateSearchMsg(ctx.Context(), *sector.CommitMessage) + if err != nil { + // API error + if err := failedCooldown(ctx, sector); err != nil { + return err + } + + return ctx.Send(SectorRetryCommitWait{}) + } + + if mw == nil { + // API error in commit + return ctx.Send(SectorRetryCommitWait{}) + } + + switch mw.Receipt.ExitCode { + case exitcode.Ok: + // API error in CcommitWait + return ctx.Send(SectorRetryCommitWait{}) + case exitcode.SysErrOutOfGas: + // API error in CommitWait AND gas estimator guessed a wrong number in SubmitCommit + return ctx.Send(SectorRetrySubmitCommit{}) + default: + // something else went wrong + } + } + if err := checkPrecommit(ctx.Context(), m.maddr, sector, tok, height, m.api); err != nil { switch err.(type) { case *ErrApi: diff --git a/extern/storage-sealing/states_proving.go b/extern/storage-sealing/states_proving.go index 6684c714d..de7e6c8d0 100644 --- a/extern/storage-sealing/states_proving.go +++ b/extern/storage-sealing/states_proving.go @@ -32,7 +32,7 @@ func (m *Sealing) handleFaultReported(ctx statemachine.Context, sector SectorInf } func (m *Sealing) handleRemoving(ctx statemachine.Context, sector SectorInfo) error { - if err := m.sealer.Remove(ctx.Context(), m.minerSector(sector.SectorNumber)); err != nil { + if err := m.sealer.Remove(ctx.Context(), m.minerSector(sector.SectorType, sector.SectorNumber)); err != nil { return ctx.Send(SectorRemoveFailed{err}) } diff --git a/extern/storage-sealing/states_sealing.go b/extern/storage-sealing/states_sealing.go index a1aee4cde..fca4a8699 100644 --- a/extern/storage-sealing/states_sealing.go +++ b/extern/storage-sealing/states_sealing.go @@ -31,7 +31,12 @@ func (m *Sealing) handlePacking(ctx statemachine.Context, sector SectorInfo) err allocated += piece.Piece.Size.Unpadded() } - ubytes := abi.PaddedPieceSize(m.sealer.SectorSize()).Unpadded() + ssize, err := sector.SectorType.SectorSize() + if err != nil { + return err + } + + ubytes := abi.PaddedPieceSize(ssize).Unpadded() if allocated > ubytes { return xerrors.Errorf("too much data in sector: %d > %d", allocated, ubytes) @@ -46,7 +51,7 @@ func (m *Sealing) handlePacking(ctx statemachine.Context, sector SectorInfo) err log.Warnf("Creating %d filler pieces for sector %d", len(fillerSizes), sector.SectorNumber) } - fillerPieces, err := m.pledgeSector(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorNumber), sector.existingPieceSizes(), fillerSizes...) + fillerPieces, err := m.pledgeSector(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.existingPieceSizes(), fillerSizes...) if err != nil { return xerrors.Errorf("filling up the sector (%v): %w", fillerSizes, err) } @@ -148,7 +153,7 @@ func (m *Sealing) handlePreCommit1(ctx statemachine.Context, sector SectorInfo) // process has just restarted and the worker had the result ready) } - pc1o, err := m.sealer.SealPreCommit1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorNumber), sector.TicketValue, sector.pieceInfos()) + pc1o, err := m.sealer.SealPreCommit1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.TicketValue, sector.pieceInfos()) if err != nil { return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("seal pre commit(1) failed: %w", err)}) } @@ -159,7 +164,7 @@ func (m *Sealing) handlePreCommit1(ctx statemachine.Context, sector SectorInfo) } func (m *Sealing) handlePreCommit2(ctx statemachine.Context, sector SectorInfo) error { - cids, err := m.sealer.SealPreCommit2(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorNumber), sector.PreCommit1Out) + cids, err := m.sealer.SealPreCommit2(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.PreCommit1Out) if err != nil { return ctx.Send(SectorSealPreCommit2Failed{xerrors.Errorf("seal pre commit(2) failed: %w", err)}) } @@ -386,12 +391,12 @@ func (m *Sealing) handleCommitting(ctx statemachine.Context, sector SectorInfo) Unsealed: *sector.CommD, Sealed: *sector.CommR, } - c2in, err := m.sealer.SealCommit1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorNumber), sector.TicketValue, sector.SeedValue, sector.pieceInfos(), cids) + c2in, err := m.sealer.SealCommit1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.TicketValue, sector.SeedValue, sector.pieceInfos(), cids) if err != nil { return ctx.Send(SectorComputeProofFailed{xerrors.Errorf("computing seal proof failed(1): %w", err)}) } - proof, err := m.sealer.SealCommit2(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorNumber), c2in) + proof, err := m.sealer.SealCommit2(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), c2in) if err != nil { return ctx.Send(SectorComputeProofFailed{xerrors.Errorf("computing seal proof failed(2): %w", err)}) } @@ -492,7 +497,7 @@ func (m *Sealing) handleCommitWait(ctx statemachine.Context, sector SectorInfo) func (m *Sealing) handleFinalizeSector(ctx statemachine.Context, sector SectorInfo) error { // TODO: Maybe wait for some finality - if err := m.sealer.FinalizeSector(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorNumber), sector.keepUnsealedRanges(false)); err != nil { + if err := m.sealer.FinalizeSector(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.keepUnsealedRanges(false)); err != nil { return ctx.Send(SectorFinalizeFailed{xerrors.Errorf("finalize sector: %w", err)}) } @@ -503,7 +508,7 @@ func (m *Sealing) handleProvingSector(ctx statemachine.Context, sector SectorInf // TODO: track sector health / expiration log.Infof("Proving sector %d", sector.SectorNumber) - if err := m.sealer.ReleaseUnsealed(ctx.Context(), m.minerSector(sector.SectorNumber), sector.keepUnsealedRanges(true)); err != nil { + if err := m.sealer.ReleaseUnsealed(ctx.Context(), m.minerSector(sector.SectorType, sector.SectorNumber), sector.keepUnsealedRanges(true)); err != nil { log.Error(err) } diff --git a/gen/main.go b/gen/main.go index c2a6d009b..9009172b9 100644 --- a/gen/main.go +++ b/gen/main.go @@ -4,6 +4,8 @@ import ( "fmt" "os" + "github.com/filecoin-project/lotus/chain/market" + gen "github.com/whyrusleeping/cbor-gen" "github.com/filecoin-project/lotus/api" @@ -67,6 +69,14 @@ func main() { os.Exit(1) } + err = gen.WriteTupleEncodersToFile("./chain/market/cbor_gen.go", "market", + market.FundedAddressState{}, + ) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + err = gen.WriteTupleEncodersToFile("./chain/exchange/cbor_gen.go", "exchange", exchange.Request{}, exchange.Response{}, diff --git a/go.mod b/go.mod index 723640930..60c9c7586 100644 --- a/go.mod +++ b/go.mod @@ -23,30 +23,30 @@ require ( github.com/elastic/go-sysinfo v1.3.0 github.com/fatih/color v1.9.0 github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200910194244-f640612a1a1f - github.com/filecoin-project/go-address v0.0.4 + github.com/filecoin-project/go-address v0.0.5-0.20201103152444-f2023ef3f5bb github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349 // indirect - github.com/filecoin-project/go-bitfield v0.2.1 + github.com/filecoin-project/go-bitfield v0.2.3-0.20201110211213-fe2c1862e816 github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 - github.com/filecoin-project/go-data-transfer v0.9.0 + github.com/filecoin-project/go-data-transfer v1.2.0 github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f - github.com/filecoin-project/go-fil-markets v1.0.0 + github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335 github.com/filecoin-project/go-jsonrpc v0.1.2-0.20201008195726-68c6a2704e49 github.com/filecoin-project/go-multistore v0.0.3 github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20 github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261 - github.com/filecoin-project/go-state-types v0.0.0-20201013222834-41ea465f274f + github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe github.com/filecoin-project/go-statestore v0.1.0 github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b - github.com/filecoin-project/specs-actors v0.9.12 - github.com/filecoin-project/specs-actors/v2 v2.2.0 - github.com/filecoin-project/specs-storage v0.1.1-0.20200907031224-ed2e5cd13796 + github.com/filecoin-project/specs-actors v0.9.13 + github.com/filecoin-project/specs-actors/v2 v2.3.2 + github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 github.com/filecoin-project/test-vectors/schema v0.0.5 github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1 github.com/go-kit/kit v0.10.0 github.com/go-ole/go-ole v1.2.4 // indirect - github.com/google/uuid v1.1.1 + github.com/google/uuid v1.1.2 github.com/gorilla/mux v1.7.4 github.com/gorilla/websocket v1.4.2 github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026 @@ -55,9 +55,9 @@ require ( github.com/hashicorp/golang-lru v0.5.4 github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d github.com/ipfs/bbloom v0.0.4 - github.com/ipfs/go-bitswap v0.2.20 + github.com/ipfs/go-bitswap v0.3.2 github.com/ipfs/go-block-format v0.0.2 - github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834 + github.com/ipfs/go-blockservice v0.1.4 github.com/ipfs/go-cid v0.0.7 github.com/ipfs/go-cidutil v0.0.2 github.com/ipfs/go-datastore v0.4.5 @@ -67,8 +67,8 @@ require ( github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459 github.com/ipfs/go-filestore v1.0.0 github.com/ipfs/go-fs-lock v0.0.6 - github.com/ipfs/go-graphsync v0.3.1 - github.com/ipfs/go-ipfs-blockstore v1.0.1 + github.com/ipfs/go-graphsync v0.5.0 + github.com/ipfs/go-ipfs-blockstore v1.0.3 github.com/ipfs/go-ipfs-chunker v0.0.5 github.com/ipfs/go-ipfs-ds-help v1.0.0 github.com/ipfs/go-ipfs-exchange-interface v0.0.1 @@ -76,35 +76,38 @@ require ( github.com/ipfs/go-ipfs-files v0.0.8 github.com/ipfs/go-ipfs-http-client v0.0.5 github.com/ipfs/go-ipfs-routing v0.1.0 - github.com/ipfs/go-ipld-cbor v0.0.5-0.20200428170625-a0bd04d3cbdf + github.com/ipfs/go-ipfs-util v0.0.2 + github.com/ipfs/go-ipld-cbor v0.0.5 github.com/ipfs/go-ipld-format v0.2.0 github.com/ipfs/go-log v1.0.4 github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4 github.com/ipfs/go-merkledag v0.3.2 + github.com/ipfs/go-metrics-interface v0.0.1 github.com/ipfs/go-metrics-prometheus v0.0.2 github.com/ipfs/go-path v0.0.7 github.com/ipfs/go-unixfs v0.2.4 github.com/ipfs/interface-go-ipfs-core v0.2.3 github.com/ipld/go-car v0.1.1-0.20200923150018-8cdef32e2da4 - github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f + github.com/ipld/go-ipld-prime v0.5.1-0.20201021195245-109253e8a018 github.com/kelseyhightower/envconfig v1.4.0 github.com/lib/pq v1.7.0 + github.com/libp2p/go-buffer-pool v0.0.2 github.com/libp2p/go-eventbus v0.2.1 - github.com/libp2p/go-libp2p v0.11.0 + github.com/libp2p/go-libp2p v0.12.0 github.com/libp2p/go-libp2p-connmgr v0.2.4 - github.com/libp2p/go-libp2p-core v0.6.1 + github.com/libp2p/go-libp2p-core v0.7.0 github.com/libp2p/go-libp2p-discovery v0.5.0 - github.com/libp2p/go-libp2p-kad-dht v0.8.3 - github.com/libp2p/go-libp2p-mplex v0.2.4 + github.com/libp2p/go-libp2p-kad-dht v0.11.0 + github.com/libp2p/go-libp2p-mplex v0.3.0 github.com/libp2p/go-libp2p-noise v0.1.2 github.com/libp2p/go-libp2p-peerstore v0.2.6 - github.com/libp2p/go-libp2p-pubsub v0.3.6 - github.com/libp2p/go-libp2p-quic-transport v0.8.2 + github.com/libp2p/go-libp2p-pubsub v0.4.0 + github.com/libp2p/go-libp2p-quic-transport v0.9.0 github.com/libp2p/go-libp2p-record v0.1.3 github.com/libp2p/go-libp2p-routing-helpers v0.2.3 - github.com/libp2p/go-libp2p-swarm v0.2.8 + github.com/libp2p/go-libp2p-swarm v0.3.1 github.com/libp2p/go-libp2p-tls v0.1.3 - github.com/libp2p/go-libp2p-yamux v0.2.8 + github.com/libp2p/go-libp2p-yamux v0.4.1 github.com/libp2p/go-maddr-filter v0.1.0 github.com/mattn/go-colorable v0.1.6 // indirect github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 @@ -132,9 +135,10 @@ require ( go.uber.org/dig v1.10.0 // indirect go.uber.org/fx v1.9.0 go.uber.org/multierr v1.5.0 - go.uber.org/zap v1.15.0 - golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 - golang.org/x/sys v0.0.0-20200926100807-9d91bd62050c + go.uber.org/zap v1.16.0 + golang.org/x/net v0.0.0-20201021035429-f5854403a974 + golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 + golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f golang.org/x/time v0.0.0-20191024005414-555d28b269f0 golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 gopkg.in/cheggaaa/pb.v1 v1.0.28 @@ -150,6 +154,4 @@ replace github.com/filecoin-project/filecoin-ffi => ./extern/filecoin-ffi replace github.com/filecoin-project/test-vectors => ./extern/test-vectors -replace github.com/supranational/blst => ./extern/fil-blst/blst - -replace github.com/filecoin-project/fil-blst => ./extern/fil-blst +replace github.com/supranational/blst => ./extern/blst diff --git a/go.sum b/go.sum index 3ced99135..542f45b45 100644 --- a/go.sum +++ b/go.sum @@ -233,28 +233,30 @@ github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL github.com/fd/go-nat v1.0.0/go.mod h1:BTBu/CKvMmOMUPkKVef1pngt2WFH/lg7E6yQnulfp6E= github.com/filecoin-project/go-address v0.0.3 h1:eVfbdjEbpbzIrbiSa+PiGUY+oDK9HnUn+M1R/ggoHf8= github.com/filecoin-project/go-address v0.0.3/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= -github.com/filecoin-project/go-address v0.0.4 h1:gSNMv0qWwH16fGQs7ycOUrDjY6YCSsgLUl0I0KLjo8w= -github.com/filecoin-project/go-address v0.0.4/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= +github.com/filecoin-project/go-address v0.0.5-0.20201103152444-f2023ef3f5bb h1:Cbu7YYsXHtVlPEJ+eqbBx2S3ElmWCB0NjpGPYvvvCrA= +github.com/filecoin-project/go-address v0.0.5-0.20201103152444-f2023ef3f5bb/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= github.com/filecoin-project/go-amt-ipld/v2 v2.1.0 h1:t6qDiuGYYngDqaLc2ZUvdtAg4UNxPeOYaXhBWSNsVaM= github.com/filecoin-project/go-amt-ipld/v2 v2.1.0/go.mod h1:nfFPoGyX0CU9SkXX8EoCcSuHN1XcbN0c6KBh7yvP5fs= github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349 h1:pIuR0dnMD0i+as8wNnjjHyQrnhP5O5bmba/lmgQeRgU= github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349/go.mod h1:vgmwKBkx+ca5OIeEvstiQgzAZnb7R6QaqE1oEDSqa6g= github.com/filecoin-project/go-bitfield v0.2.0 h1:gCtLcjskIPtdg4NfN7gQZSQF9yrBQ7mkT0qCJxzGI2Q= github.com/filecoin-project/go-bitfield v0.2.0/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= -github.com/filecoin-project/go-bitfield v0.2.1 h1:S6Uuqcspqu81sWJ0He4OAfFLm1tSwPdVjtKTkl5m/xQ= -github.com/filecoin-project/go-bitfield v0.2.1/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= +github.com/filecoin-project/go-bitfield v0.2.3-0.20201110211213-fe2c1862e816 h1:RMdzMqe3mu2Z/3N3b9UEfkbGZxukstmZgNC024ybWhA= +github.com/filecoin-project/go-bitfield v0.2.3-0.20201110211213-fe2c1862e816/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 h1:av5fw6wmm58FYMgJeoB/lK9XXrgdugYiTqkdxjTy9k8= github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= -github.com/filecoin-project/go-data-transfer v0.9.0 h1:nTT8j7Hu3TM0wRWrGy83/ctawG7sleJGdFWtIsUsKgY= -github.com/filecoin-project/go-data-transfer v0.9.0/go.mod h1:i2CqUy7TMQGKukj9BgqIxiP8nDHDXU2VLd771KVaCaQ= +github.com/filecoin-project/go-data-transfer v1.0.1 h1:5sYKDbstyDsdJpVP4UGUW6+BgCNfgnH8hQgf0E3ZAno= +github.com/filecoin-project/go-data-transfer v1.0.1/go.mod h1:UxvfUAY9v3ub0a21BSK9u3pB2aq30Y0KMsG+w9/ysyo= +github.com/filecoin-project/go-data-transfer v1.2.0 h1:LM+K+J+y9t8e3gYskJHWDlyHJsF6aaxoHOP+HIiVE1U= +github.com/filecoin-project/go-data-transfer v1.2.0/go.mod h1:ZAH51JZFR8NZC4FPiDPG+swjgui0q6zTMJbztc6pHhY= github.com/filecoin-project/go-ds-versioning v0.1.0 h1:y/X6UksYTsK8TLCI7rttCKEvl8btmWxyFMEeeWGUxIQ= github.com/filecoin-project/go-ds-versioning v0.1.0/go.mod h1:mp16rb4i2QPmxBnmanUx8i/XANp+PFCCJWiAb+VW4/s= github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f h1:GxJzR3oRIMTPtpZ0b7QF8FKPK6/iPAc7trhlL5k/g+s= github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= -github.com/filecoin-project/go-fil-markets v1.0.0 h1:np9+tlnWXh9xYG4oZfha6HZFLYOaAZoMGR3V4w6DM48= -github.com/filecoin-project/go-fil-markets v1.0.0/go.mod h1:lXExJyYHwpMMddCqhEdNrc7euYJKNkp04K76NZqJLGg= +github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335 h1:DF8eu0WdEBnSVdu71+jfT4YMk6fO7AIJk2ZiWd3l15c= +github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335/go.mod h1:AJySOJC00JRWEZzRG2KsfUnqEf5ITXxeX09BE9N4f9c= github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= @@ -273,8 +275,8 @@ github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go github.com/filecoin-project/go-state-types v0.0.0-20200904021452-1883f36ca2f4/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab h1:cEDC5Ei8UuT99hPWhCjA72SM9AuRtnpvdSTIYbnzN8I= github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= -github.com/filecoin-project/go-state-types v0.0.0-20201013222834-41ea465f274f h1:TZDTu4MtBKSFLXWGKLy+cvC3nHfMFIrVgWLAz/+GgZQ= -github.com/filecoin-project/go-state-types v0.0.0-20201013222834-41ea465f274f/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc h1:+hbMY4Pcx2oizrfH08VWXwrj5mU8aJT6g0UNxGHFCGU= +github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe h1:dF8u+LEWeIcTcfUcCf3WFVlc81Fr2JKg8zPzIbBDKDw= github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= github.com/filecoin-project/go-statestore v0.1.0 h1:t56reH59843TwXHkMcwyuayStBIiWBRilQjQ+5IiwdQ= @@ -284,11 +286,13 @@ github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/ github.com/filecoin-project/specs-actors v0.9.4/go.mod h1:BStZQzx5x7TmCkLv0Bpa07U6cPKol6fd3w9KjMPZ6Z4= github.com/filecoin-project/specs-actors v0.9.12 h1:iIvk58tuMtmloFNHhAOQHG+4Gci6Lui0n7DYQGi3cJk= github.com/filecoin-project/specs-actors v0.9.12/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= +github.com/filecoin-project/specs-actors v0.9.13 h1:rUEOQouefi9fuVY/2HOroROJlZbOzWYXXeIh41KF2M4= +github.com/filecoin-project/specs-actors v0.9.13/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= github.com/filecoin-project/specs-actors/v2 v2.0.1/go.mod h1:v2NZVYinNIKA9acEMBm5wWXxqv5+frFEbekBFemYghY= -github.com/filecoin-project/specs-actors/v2 v2.2.0 h1:IyCICb0NHYeD0sdSqjVGwWydn/7r7xXuxdpvGAcRCGY= -github.com/filecoin-project/specs-actors/v2 v2.2.0/go.mod h1:rlv5Mx9wUhV8Qsz+vUezZNm+zL4tK08O0HreKKPB2Wc= -github.com/filecoin-project/specs-storage v0.1.1-0.20200907031224-ed2e5cd13796 h1:dJsTPWpG2pcTeojO2pyn0c6l+x/3MZYCBgo/9d11JEk= -github.com/filecoin-project/specs-storage v0.1.1-0.20200907031224-ed2e5cd13796/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g= +github.com/filecoin-project/specs-actors/v2 v2.3.2 h1:2Vcf4CGa29kRh4JJ02m+FbvD/p3YNnLGsaHfw7Uj49g= +github.com/filecoin-project/specs-actors/v2 v2.3.2/go.mod h1:UuJQLoTx/HPvvWeqlIFmC/ywlOLHNe8SNQ3OunFbu2Y= +github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 h1:Ur/l2+6qN+lQiqjozWWc5p9UDaAMDZKTlDS98oRnlIw= +github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g= github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg= github.com/filecoin-project/test-vectors/schema v0.0.5/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= @@ -399,6 +403,8 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4 github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -486,8 +492,8 @@ github.com/ipfs/go-bitswap v0.0.9/go.mod h1:kAPf5qgn2W2DrgAcscZ3HrM9qh4pH+X8Fkk3 github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0= github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiLzBpJQIs= github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMiCSA4MzM= -github.com/ipfs/go-bitswap v0.2.20 h1:Zfi5jDUoqxDThORUznqdeL77DdGniAzlccNJ4vr+Itc= -github.com/ipfs/go-bitswap v0.2.20/go.mod h1:C7TwBgHnu89Q8sHsTJP7IhUqF9XYLe71P4tT5adgmYo= +github.com/ipfs/go-bitswap v0.3.2 h1:TdKx7lpidYe2dMAKfdeNS26y6Pc/AZX/i8doI1GV210= +github.com/ipfs/go-bitswap v0.3.2/go.mod h1:AyWWfN3moBzQX0banEtfKOfbXb3ZeoOeXnZGNPV9S6w= github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= github.com/ipfs/go-block-format v0.0.2 h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE= github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= @@ -497,6 +503,8 @@ github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7s github.com/ipfs/go-blockservice v0.1.3/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834 h1:hFJoI1D2a3MqiNkSb4nKwrdkhCngUxUTFNwVwovZX2s= github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= +github.com/ipfs/go-blockservice v0.1.4 h1:Vq+MlsH8000KbbUciRyYMEw/NNP8UAGmcqKi4uWmFGA= +github.com/ipfs/go-blockservice v0.1.4/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= @@ -547,10 +555,12 @@ github.com/ipfs/go-filestore v1.0.0/go.mod h1:/XOCuNtIe2f1YPbiXdYvD0BKLA0JR1MgPi github.com/ipfs/go-fs-lock v0.0.6 h1:sn3TWwNVQqSeNjlWy6zQ1uUGAZrV3hPOyEA6y1/N2a0= github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28L7zESmM= github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE= -github.com/ipfs/go-graphsync v0.3.0 h1:I6Y20kSuCWkUvPoUWo4V3am704/9QjgDVVkf0zIV8+8= -github.com/ipfs/go-graphsync v0.3.0/go.mod h1:gEBvJUNelzMkaRPJTpg/jaKN4AQW/7wDWu0K92D8o10= -github.com/ipfs/go-graphsync v0.3.1 h1:dJLYrck4oyJDfMVhGEKiWHxaY8oYMWko4m2Fi+4bofo= -github.com/ipfs/go-graphsync v0.3.1/go.mod h1:bw4LiLM5Oq/uLdzEtih9LK8GrwSijv+XqYiWCTxHMqs= +github.com/ipfs/go-graphsync v0.4.2 h1:Y/jt5r619yj0LI7OLtGKh4jYm8goYUcuJ09y7TZ3zMo= +github.com/ipfs/go-graphsync v0.4.2/go.mod h1:/VmbZTUdUMTbNkgzAiCEucIIAU3BkLE2cZrDCVUhyi0= +github.com/ipfs/go-graphsync v0.4.3 h1:2t+oCpufufs1oqChoWiIK7V5uC1XCtf06PK9nqMV6pM= +github.com/ipfs/go-graphsync v0.4.3/go.mod h1:mPOwDYv128gf8gxPFgXnz4fNrSYPsWyqisJ7ych+XDY= +github.com/ipfs/go-graphsync v0.5.0 h1:iaByvxq88Ys1KcaQzTS1wmRhNsNEo3SaUiSGqTSbGmM= +github.com/ipfs/go-graphsync v0.5.0/go.mod h1:e2ZxnClqBBYAtd901g9vXMJzS47labjAtOzsWtOzKNk= github.com/ipfs/go-hamt-ipld v0.1.1 h1:0IQdvwnAAUKmDE+PMJa5y1QiwOPHpI9+eAbQEEEYthk= github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk= github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= @@ -559,6 +569,8 @@ github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86 github.com/ipfs/go-ipfs-blockstore v1.0.0/go.mod h1:knLVdhVU9L7CC4T+T4nvGdeUIPAXlnd9zmXfp+9MIjU= github.com/ipfs/go-ipfs-blockstore v1.0.1 h1:fnuVj4XdZp4yExhd0CnUwAiMNJHiPnfInhiuwz4lW1w= github.com/ipfs/go-ipfs-blockstore v1.0.1/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE= +github.com/ipfs/go-ipfs-blockstore v1.0.3 h1:RDhK6fdg5YsonkpMuMpdvk/pRtOQlrIRIybuQfkvB2M= +github.com/ipfs/go-ipfs-blockstore v1.0.3/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE= github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw= @@ -603,8 +615,8 @@ github.com/ipfs/go-ipld-cbor v0.0.2/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= github.com/ipfs/go-ipld-cbor v0.0.5-0.20200204214505-252690b78669/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= -github.com/ipfs/go-ipld-cbor v0.0.5-0.20200428170625-a0bd04d3cbdf h1:PRCy+w3GocY77CBEwTprp6hn7PLiEU1YToKe7B+1FVk= -github.com/ipfs/go-ipld-cbor v0.0.5-0.20200428170625-a0bd04d3cbdf/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= +github.com/ipfs/go-ipld-cbor v0.0.5 h1:ovz4CHKogtG2KB/h1zUp5U0c/IzZrL435rCh5+K/5G8= +github.com/ipfs/go-ipld-cbor v0.0.5/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= github.com/ipfs/go-ipld-format v0.2.0 h1:xGlJKkArkmBvowr+GMCX0FEZtkro71K1AwiKnL37mwA= @@ -662,9 +674,13 @@ github.com/ipld/go-car v0.1.1-0.20200923150018-8cdef32e2da4/go.mod h1:xrMEcuSq+D github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e/go.mod h1:uVIwe/u0H4VdKv3kaN1ck7uCb6yD9cFLS9/ELyXbsw8= github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f h1:XpOuNQ5GbXxUcSukbQcW9jkE7REpaFGJU2/T00fo9kA= github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM= +github.com/ipld/go-ipld-prime v0.5.1-0.20201021195245-109253e8a018 h1:RbRHv8epkmvBYA5cGfz68GUSbOgx5j/7ObLIl4Rsif0= +github.com/ipld/go-ipld-prime v0.5.1-0.20201021195245-109253e8a018/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM= github.com/ipld/go-ipld-prime-proto v0.0.0-20200428191222-c1ffdadc01e1/go.mod h1:OAV6xBmuTLsPZ+epzKkPB1e25FHk/vCtyatkdHcArLs= github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6 h1:6Mq+tZGSEMEoJJ1NbJRhddeelkXZcU8yfH/ZRYUo/Es= github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6/go.mod h1:3pHYooM9Ea65jewRwrb2u5uHZCNkNTe9ABsVB+SrkH0= +github.com/ipld/go-ipld-prime-proto v0.1.0 h1:j7gjqrfwbT4+gXpHwEx5iMssma3mnctC7YaCimsFP70= +github.com/ipld/go-ipld-prime-proto v0.1.0/go.mod h1:11zp8f3sHVgIqtb/c9Kr5ZGqpnCLF1IVTNOez9TopzE= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4= github.com/jackpal/gateway v1.0.4/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= @@ -747,6 +763,8 @@ github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwn github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs= github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= +github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= +github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= github.com/libp2p/go-conn-security v0.0.1/go.mod h1:bGmu51N0KU9IEjX7kl2PQjgZa40JQWnayTvNMgD/vyk= github.com/libp2p/go-conn-security-multistream v0.0.1/go.mod h1:nc9vud7inQ+d6SO0I/6dSWrdMnHnzZNHeyUQqrAJulE= github.com/libp2p/go-conn-security-multistream v0.0.2/go.mod h1:nc9vud7inQ+d6SO0I/6dSWrdMnHnzZNHeyUQqrAJulE= @@ -775,8 +793,10 @@ github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qD github.com/libp2p/go-libp2p v0.8.3/go.mod h1:EsH1A+8yoWK+L4iKcbPYu6MPluZ+CHWI9El8cTaefiM= github.com/libp2p/go-libp2p v0.9.2/go.mod h1:cunHNLDVus66Ct9iXXcjKRLdmHdFdHVe1TAnbubJQqQ= github.com/libp2p/go-libp2p v0.10.0/go.mod h1:yBJNpb+mGJdgrwbKAKrhPU0u3ogyNFTfjJ6bdM+Q/G8= -github.com/libp2p/go-libp2p v0.11.0 h1:jb5mqdqYEBAybTEhD8io43Cz5LzVKuWxOK7znSN69jE= -github.com/libp2p/go-libp2p v0.11.0/go.mod h1:3/ogJDXsbbepEfqtZKBR/DedzxJXCeK17t2Z9RE9bEE= +github.com/libp2p/go-libp2p v0.12.0 h1:+xai9RQnQ9l5elFOKvp5wRyjyWisSwEx+6nU2+onpUA= +github.com/libp2p/go-libp2p v0.12.0/go.mod h1:FpHZrfC1q7nA8jitvdjKBDF31hguaC676g/nT9PgQM0= +github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052 h1:BM7aaOF7RpmNn9+9g6uTjGJ0cTzWr5j9i9IKeun2M8U= +github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052/go.mod h1:nRMRTab+kZuk0LnKZpxhOVH/ndsdr2Nr//Zltc/vwgo= github.com/libp2p/go-libp2p-autonat v0.0.2/go.mod h1:fs71q5Xk+pdnKU014o2iq1RhMs9/PMaG5zXRFNnIIT4= github.com/libp2p/go-libp2p-autonat v0.0.6/go.mod h1:uZneLdOkZHro35xIhpbtTzLlgYturpu4J5+0cZK3MqE= github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= @@ -785,8 +805,8 @@ github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQ github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI= github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A= github.com/libp2p/go-libp2p-autonat v0.2.3/go.mod h1:2U6bNWCNsAG9LEbwccBDQbjzQ8Krdjge1jLTE9rdoMM= -github.com/libp2p/go-libp2p-autonat v0.3.2 h1:OhDSwVVaq7liTaRIsFFYvsaPp0pn2yi0WazejZ4DUmo= -github.com/libp2p/go-libp2p-autonat v0.3.2/go.mod h1:0OzOi1/cVc7UcxfOddemYD5vzEqi4fwRbnZcJGLi68U= +github.com/libp2p/go-libp2p-autonat v0.4.0 h1:3y8XQbpr+ssX8QfZUHekjHCYK64sj6/4hnf/awD4+Ug= +github.com/libp2p/go-libp2p-autonat v0.4.0/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= github.com/libp2p/go-libp2p-autonat-svc v0.1.0/go.mod h1:fqi8Obl/z3R4PFVLm8xFtZ6PBL9MlV/xumymRFkKq5A= github.com/libp2p/go-libp2p-blankhost v0.0.1/go.mod h1:Ibpbw/7cPPYwFb7PACIWdvxxv0t0XCCI10t7czjAjTc= github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= @@ -804,8 +824,8 @@ github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3 github.com/libp2p/go-libp2p-circuit v0.2.1/go.mod h1:BXPwYDN5A8z4OEY9sOfr2DUQMLQvKt/6oku45YUmjIo= github.com/libp2p/go-libp2p-circuit v0.2.2/go.mod h1:nkG3iE01tR3FoQ2nMm06IUrCpCyJp1Eo4A1xYdpjfs4= github.com/libp2p/go-libp2p-circuit v0.2.3/go.mod h1:nkG3iE01tR3FoQ2nMm06IUrCpCyJp1Eo4A1xYdpjfs4= -github.com/libp2p/go-libp2p-circuit v0.3.1 h1:69ENDoGnNN45BNDnBd+8SXSetDuw0eJFcGmOvvtOgBw= -github.com/libp2p/go-libp2p-circuit v0.3.1/go.mod h1:8RMIlivu1+RxhebipJwFDA45DasLx+kkrp4IlJj53F4= +github.com/libp2p/go-libp2p-circuit v0.4.0 h1:eqQ3sEYkGTtybWgr6JLqJY6QLtPWRErvFjFDfAOO1wc= +github.com/libp2p/go-libp2p-circuit v0.4.0/go.mod h1:t/ktoFIUzM6uLQ+o1G6NuBl2ANhBKN9Bc8jRIk31MoA= github.com/libp2p/go-libp2p-connmgr v0.1.1/go.mod h1:wZxh8veAmU5qdrfJ0ZBLcU8oJe9L82ciVP/fl1VHjXk= github.com/libp2p/go-libp2p-connmgr v0.2.3/go.mod h1:Gqjg29zI8CwXX21zRxy6gOg8VYu3zVerJRt2KyktzH4= github.com/libp2p/go-libp2p-connmgr v0.2.4 h1:TMS0vc0TCBomtQJyWr7fYxcVYYhx+q/2gF++G5Jkl/w= @@ -834,8 +854,11 @@ github.com/libp2p/go-libp2p-core v0.5.7/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX github.com/libp2p/go-libp2p-core v0.6.0/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= github.com/libp2p/go-libp2p-core v0.6.1 h1:XS+Goh+QegCDojUZp00CaPMfiEADCrLjNZskWE7pvqs= github.com/libp2p/go-libp2p-core v0.6.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.7.0 h1:4a0TMjrWNTZlNvcqxZmrMRDi/NQWrhwO2pkTuLSQ/IQ= +github.com/libp2p/go-libp2p-core v0.7.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-crypto v0.0.1/go.mod h1:yJkNyDmO341d5wwXxDUGO0LykUVT72ImHNUqh5D/dBE= github.com/libp2p/go-libp2p-crypto v0.0.2/go.mod h1:eETI5OUfBnvARGOHrJz2eWNyTUxEGZnBxMcbUjfIj4I= +github.com/libp2p/go-libp2p-crypto v0.1.0 h1:k9MFy+o2zGDNGsaoZl0MA3iZ75qXxr9OOoAZF+sD5OQ= github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= github.com/libp2p/go-libp2p-daemon v0.2.2/go.mod h1:kyrpsLB2JeNYR2rvXSVWyY0iZuRIMhqzWR3im9BV6NQ= github.com/libp2p/go-libp2p-discovery v0.0.1/go.mod h1:ZkkF9xIFRLA1xCc7bstYFkd80gBGK8Fc1JqGoU2i+zI= @@ -853,11 +876,11 @@ github.com/libp2p/go-libp2p-interface-connmgr v0.0.4/go.mod h1:GarlRLH0LdeWcLnYM github.com/libp2p/go-libp2p-interface-connmgr v0.0.5/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= github.com/libp2p/go-libp2p-interface-pnet v0.0.1/go.mod h1:el9jHpQAXK5dnTpKA4yfCNBZXvrzdOU75zz+C6ryp3k= github.com/libp2p/go-libp2p-kad-dht v0.2.1/go.mod h1:k7ONOlup7HKzQ68dE6lSnp07cdxdkmnRa+6B4Fh9/w0= -github.com/libp2p/go-libp2p-kad-dht v0.8.3 h1:ceK5ML6s/I8UAcw6veoNsuEHdHvfo88leU/5uWOIFWs= -github.com/libp2p/go-libp2p-kad-dht v0.8.3/go.mod h1:HnYYy8taJWESkqiESd1ngb9XX/XGGsMA5G0Vj2HoSh4= +github.com/libp2p/go-libp2p-kad-dht v0.11.0 h1:ZLhlmDKsFiOkPhTzfEqBrMy/1Tqx+Dk6UgbHM5//IQM= +github.com/libp2p/go-libp2p-kad-dht v0.11.0/go.mod h1:5ojtR2acDPqh/jXf5orWy8YGb8bHQDS+qeDcoscL/PI= github.com/libp2p/go-libp2p-kbucket v0.2.1/go.mod h1:/Rtu8tqbJ4WQ2KTCOMJhggMukOLNLNPY1EtEWWLxUvc= -github.com/libp2p/go-libp2p-kbucket v0.4.2 h1:wg+VPpCtY61bCasGRexCuXOmEmdKjN+k1w+JtTwu9gA= -github.com/libp2p/go-libp2p-kbucket v0.4.2/go.mod h1:7sCeZx2GkNK1S6lQnGUW5JYZCFPnXzAZCCBBS70lytY= +github.com/libp2p/go-libp2p-kbucket v0.4.7 h1:spZAcgxifvFZHBD8tErvppbnNiKA5uokDu3CV7axu70= +github.com/libp2p/go-libp2p-kbucket v0.4.7/go.mod h1:XyVo99AfQH0foSf176k4jY1xUJ2+jUJIZCSDm7r2YKk= github.com/libp2p/go-libp2p-loggables v0.0.1/go.mod h1:lDipDlBNYbpyqyPX/KcoO+eq0sJYEVR2JgOexcivchg= github.com/libp2p/go-libp2p-loggables v0.1.0 h1:h3w8QFfCt2UJl/0/NW4K829HX/0S4KD31PQ7m8UXXO8= github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90= @@ -867,8 +890,8 @@ github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3 github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= github.com/libp2p/go-libp2p-mplex v0.2.2/go.mod h1:74S9eum0tVQdAfFiKxAyKzNdSuLqw5oadDq7+L/FELo= github.com/libp2p/go-libp2p-mplex v0.2.3/go.mod h1:CK3p2+9qH9x+7ER/gWWDYJ3QW5ZxWDkm+dVvjfuG3ek= -github.com/libp2p/go-libp2p-mplex v0.2.4 h1:XFFXaN4jhqnIuJVjYOR3k6bnRj0mFfJOlIuDVww+4Zo= -github.com/libp2p/go-libp2p-mplex v0.2.4/go.mod h1:mI7iOezdWFOisvUwaYd3IDrJ4oVmgoXK8H331ui39CE= +github.com/libp2p/go-libp2p-mplex v0.3.0 h1:CZyqqKP0BSGQyPLvpRQougbfXaaaJZdGgzhCpJNuNSk= +github.com/libp2p/go-libp2p-mplex v0.3.0/go.mod h1:l9QWxRbbb5/hQMECEb908GbS9Sm2UAR2KFZKUJEynEs= github.com/libp2p/go-libp2p-nat v0.0.2/go.mod h1:QrjXQSD5Dj4IJOdEcjHRkWTSomyxRo6HnUkf/TfQpLQ= github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= @@ -885,6 +908,7 @@ github.com/libp2p/go-libp2p-noise v0.1.2 h1:IH9GRihQJTx56obm+GnpdPX4KeVIlvpXrP6x github.com/libp2p/go-libp2p-noise v0.1.2/go.mod h1:9B10b7ueo7TIxZHHcjcDCo5Hd6kfKT2m77by82SFRfE= github.com/libp2p/go-libp2p-peer v0.0.1/go.mod h1:nXQvOBbwVqoP+T5Y5nCjeH4sP9IX/J0AMzcDUVruVoo= github.com/libp2p/go-libp2p-peer v0.1.1/go.mod h1:jkF12jGB4Gk/IOo+yomm+7oLWxF278F7UnrYUQ1Q8es= +github.com/libp2p/go-libp2p-peer v0.2.0 h1:EQ8kMjaCUwt/Y5uLgjT8iY2qg0mGUT0N1zUjer50DsY= github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= github.com/libp2p/go-libp2p-peerstore v0.0.1/go.mod h1:RabLyPVJLuNQ+GFyoEkfi8H4Ti6k/HtZJ7YKgtSq+20= github.com/libp2p/go-libp2p-peerstore v0.0.6/go.mod h1:RabLyPVJLuNQ+GFyoEkfi8H4Ti6k/HtZJ7YKgtSq+20= @@ -904,12 +928,12 @@ github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1 github.com/libp2p/go-libp2p-protocol v0.1.0/go.mod h1:KQPHpAabB57XQxGrXCNvbL6UEXfQqUgC/1adR2Xtflk= github.com/libp2p/go-libp2p-pubsub v0.1.1/go.mod h1:ZwlKzRSe1eGvSIdU5bD7+8RZN/Uzw0t1Bp9R1znpR/Q= github.com/libp2p/go-libp2p-pubsub v0.3.2-0.20200527132641-c0712c6e92cf/go.mod h1:TxPOBuo1FPdsTjFnv+FGZbNbWYsp74Culx+4ViQpato= -github.com/libp2p/go-libp2p-pubsub v0.3.6 h1:9oO8W7qIWCYQYyz5z8nUsPcb3rrFehBlkbqvbSVjBxY= -github.com/libp2p/go-libp2p-pubsub v0.3.6/go.mod h1:DTMSVmZZfXodB/pvdTGrY2eHPZ9W2ev7hzTH83OKHrI= +github.com/libp2p/go-libp2p-pubsub v0.4.0 h1:YNVRyXqBgv9i4RG88jzoTtkSOaSB45CqHkL29NNBZb4= +github.com/libp2p/go-libp2p-pubsub v0.4.0/go.mod h1:izkeMLvz6Ht8yAISXjx60XUQZMq9ZMe5h2ih4dLIBIQ= github.com/libp2p/go-libp2p-quic-transport v0.1.1/go.mod h1:wqG/jzhF3Pu2NrhJEvE+IE0NTHNXslOPn9JQzyCAxzU= github.com/libp2p/go-libp2p-quic-transport v0.5.0/go.mod h1:IEcuC5MLxvZ5KuHKjRu+dr3LjCT1Be3rcD/4d8JrX8M= -github.com/libp2p/go-libp2p-quic-transport v0.8.2 h1:FDaXBCBJ1e5hY6gnWEJ4NbYyLk8eezr4J6AY3q3KqwM= -github.com/libp2p/go-libp2p-quic-transport v0.8.2/go.mod h1:L+e0q15ZNaYm3seHgbsXjWP8kXLEqz+elLWKk9l8DhM= +github.com/libp2p/go-libp2p-quic-transport v0.9.0 h1:WPuq5nV/chmIZIzvrkC2ulSdAQ0P0BDvgvAhZFOZ59E= +github.com/libp2p/go-libp2p-quic-transport v0.9.0/go.mod h1:xyY+IgxL0qsW7Kiutab0+NlxM0/p9yRtrGTYsuMWf70= github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q= github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= github.com/libp2p/go-libp2p-record v0.1.1/go.mod h1:VRgKajOyMVgP/F0L5g3kH7SVskp17vFi2xheb5uMJtg= @@ -936,6 +960,9 @@ github.com/libp2p/go-libp2p-swarm v0.2.4/go.mod h1:/xIpHFPPh3wmSthtxdGbkHZ0OET1h github.com/libp2p/go-libp2p-swarm v0.2.7/go.mod h1:ZSJ0Q+oq/B1JgfPHJAT2HTall+xYRNYp1xs4S2FBWKA= github.com/libp2p/go-libp2p-swarm v0.2.8 h1:cIUUvytBzNQmGSjnXFlI6UpoBGsaud82mJPIJVfkDlg= github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM= +github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= +github.com/libp2p/go-libp2p-swarm v0.3.1 h1:UTobu+oQHGdXTOGpZ4RefuVqYoJXcT0EBtSR74m2LkI= +github.com/libp2p/go-libp2p-swarm v0.3.1/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= @@ -944,6 +971,8 @@ github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eq github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8 h1:v4dvk7YEW8buwCdIVWnhpv0Hp/AAJKRWIxBhmLRZrsk= github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc= +github.com/libp2p/go-libp2p-testing v0.3.0 h1:ZiBYstPamsi7y6NJZebRudUzsYmVkt998hltyLqf8+g= +github.com/libp2p/go-libp2p-testing v0.3.0/go.mod h1:efZkql4UZ7OVsEfaxNHZPzIehtsBXMrXnCfJIgDti5g= github.com/libp2p/go-libp2p-tls v0.1.3 h1:twKMhMu44jQO+HgQK9X8NHO5HkeJu2QbhLzLJpa8oNM= github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= github.com/libp2p/go-libp2p-transport v0.0.1/go.mod h1:UzbUs9X+PHOSw7S3ZmeOxfnwaQY5vGDzZmKPod3N3tk= @@ -964,6 +993,10 @@ github.com/libp2p/go-libp2p-yamux v0.2.5/go.mod h1:Zpgj6arbyQrmZ3wxSZxfBmbdnWtbZ github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhLEn0XhIoZ5viCwU= github.com/libp2p/go-libp2p-yamux v0.2.8 h1:0s3ELSLu2O7hWKfX1YjzudBKCP0kZ+m9e2+0veXzkn4= github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4= +github.com/libp2p/go-libp2p-yamux v0.4.0 h1:qunEZzWwwmfSBYTtSyd81PlD1TjB5uuWcGYHWVXLbUg= +github.com/libp2p/go-libp2p-yamux v0.4.0/go.mod h1:+DWDjtFMzoAwYLVkNZftoucn7PelNoy5nm3tZ3/Zw30= +github.com/libp2p/go-libp2p-yamux v0.4.1 h1:TJxRVPY9SjH7TNrNC80l1OJMBiWhs1qpKmeB+1Ug3xU= +github.com/libp2p/go-libp2p-yamux v0.4.1/go.mod h1:FA/NjRYRVNjqOzpGuGqcruH7jAU2mYIjtKBicVOL3dc= github.com/libp2p/go-maddr-filter v0.0.1/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= @@ -976,6 +1009,8 @@ github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6 github.com/libp2p/go-mplex v0.1.1/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= github.com/libp2p/go-mplex v0.1.2 h1:qOg1s+WdGLlpkrczDqmhYzyk3vCfsQ8+RxRTQjOZWwI= github.com/libp2p/go-mplex v0.1.2/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= +github.com/libp2p/go-mplex v0.2.0 h1:Ov/D+8oBlbRkjBs1R1Iua8hJ8cUfbdiW8EOdZuxcgaI= +github.com/libp2p/go-mplex v0.2.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= github.com/libp2p/go-msgio v0.0.1/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= @@ -1038,6 +1073,10 @@ github.com/libp2p/go-yamux v1.3.5/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZ github.com/libp2p/go-yamux v1.3.6/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.3.7 h1:v40A1eSPJDIZwz2AvrV3cxpTZEGDP11QJbukmEhYyQI= github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= +github.com/libp2p/go-yamux v1.4.0 h1:7nqe0T95T2CWh40IdJ/tp8RMor4ubc9/wYZpB2a/Hx0= +github.com/libp2p/go-yamux v1.4.0/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= +github.com/libp2p/go-yamux v1.4.1 h1:P1Fe9vF4th5JOxxgQvfbOHkrGqIZniTLf+ddhZp8YTI= +github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lucas-clemente/quic-go v0.11.2/go.mod h1:PpMmPfPKO9nKJ/psF49ESTAGQSdfXxlg1otPbEB2nOw= @@ -1169,8 +1208,8 @@ github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wS github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= -github.com/multiformats/go-multistream v0.1.2 h1:knyamLYMPFPngQjGQ0lhnlys3jtVR/3xV6TREUJr+fE= -github.com/multiformats/go-multistream v0.1.2/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= +github.com/multiformats/go-multistream v0.2.0 h1:6AuNmQVKUkRnddw2YiDjt5Elit40SFxMJkVnhmETXtU= +github.com/multiformats/go-multistream v0.2.0/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= @@ -1408,7 +1447,6 @@ github.com/valyala/fasttemplate v1.0.1 h1:tY9CJiPnMXf1ERmG2EyK7gNUd+c6RKGD0IfU8W github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= -github.com/wangjia184/sortedset v0.0.0-20160527075905-f5d03557ba30/go.mod h1:YkocrP2K2tcw938x9gCOmT5G5eCD6jsTz0SZuyAqwIE= github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a h1:G++j5e0OC488te356JvdhaM8YS6nMsjLAYF7JxCv07w= @@ -1459,8 +1497,8 @@ github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1: github.com/whyrusleeping/yamux v1.1.5/go.mod h1:E8LnQQ8HKx5KD29HZFUwM1PxCOdPRzGwur1mcYhXcD8= github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/c-for-go v0.0.0-20201002084316-c134bfab968f h1:nMhj+x/m7ZQsHBz0L3gpytp0v6ogokdbrQDnhB8Kh7s= -github.com/xlab/c-for-go v0.0.0-20201002084316-c134bfab968f/go.mod h1:h/1PEBwj7Ym/8kOuMWvO2ujZ6Lt+TMbySEXNhjjR87I= +github.com/xlab/c-for-go v0.0.0-20201112171043-ea6dce5809cb h1:/7/dQyiKnxAOj9L69FhST7uMe17U015XPzX7cy+5ykM= +github.com/xlab/c-for-go v0.0.0-20201112171043-ea6dce5809cb/go.mod h1:pbNsDSxn1ICiNn9Ct4ZGNrwzfkkwYbx/lw8VuyutFIg= github.com/xlab/pkgconfig v0.0.0-20170226114623-cea12a0fd245 h1:Sw125DKxZhPUI4JLlWugkzsrlB50jR9v2khiD9FxuSo= github.com/xlab/pkgconfig v0.0.0-20170226114623-cea12a0fd245/go.mod h1:C+diUUz7pxhNY6KAoLgrTYARGWnt82zWTylZlxT92vk= github.com/xorcare/golden v0.6.0/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/U6FtvQ= @@ -1468,6 +1506,7 @@ github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542 h1:oWgZJmC1DorFZD github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/U6FtvQ= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/zondax/hid v0.9.0 h1:eiT3P6vNxAEVxXMw66eZUAAnU2zD33JBkfG/EnfAKl8= github.com/zondax/hid v0.9.0/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= github.com/zondax/ledger-go v0.12.1 h1:hYRcyznPRJp+5mzF2sazTLP2nGvGjYDD2VzhHhFomLU= @@ -1519,6 +1558,8 @@ go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM= +go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= go4.org v0.0.0-20200411211856-f5505b9728dd h1:BNJlw5kRTzdmyfh5U8F93HA2OwkP7ZGwA51eJ/0wKOU= go4.org v0.0.0-20200411211856-f5505b9728dd/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZMZXMSBGhxRdsvzbkg= @@ -1556,6 +1597,7 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20181106170214-d68db9428509/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1630,6 +1672,8 @@ golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1647,6 +1691,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180202135801-37707fdb30a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1713,11 +1759,15 @@ golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200926100807-9d91bd62050c h1:38q6VNPWR010vN82/SB121GujZNIfAUb4YttE2rhGuc= golang.org/x/sys v0.0.0-20200926100807-9d91bd62050c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1763,6 +1813,8 @@ golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200711155855-7342f9734a7d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3 h1:r3P/5xOq/dK1991B65Oy6E1fRF/2d/fSYZJ/fXGVfJc= golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20201112185108-eeaa07dd7696 h1:Bfazo+enXJET5SbHeh95NtxabJF6fJ9r/jpfRJgd3j4= +golang.org/x/tools v0.0.0-20201112185108-eeaa07dd7696/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1828,8 +1880,11 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.31.1 h1:SfXqXS5hkufcdZ/mHtYCh53P2b+92WQq/DZcKLgsFRs= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1888,8 +1943,13 @@ launchpad.net/gocheck v0.0.0-20140225173054-000000000087 h1:Izowp2XBH6Ya6rv+hqbc launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80Vse0e+BUHsHMTEhd0O4cpUHr/e/BUM= modernc.org/cc v1.0.0 h1:nPibNuDEx6tvYrUAtvDTTw98rx5juGsa5zuDnKwEEQQ= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= +modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8= modernc.org/golex v1.0.0 h1:wWpDlbK8ejRfSyi0frMyhilD3JBvtcx2AdGDnU+JtsE= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= +modernc.org/golex v1.0.1 h1:EYKY1a3wStt0RzHaH8mdSRNg78Ub0OHxYfCRWw35YtM= +modernc.org/golex v1.0.1/go.mod h1:QCA53QtsT1NdGkaZZkF5ezFwk4IXh4BGNafAARTC254= +modernc.org/lex v1.0.0/go.mod h1:G6rxMTy3cH2iA0iXL/HRRv4Znu8MK4higxph/lE7ypk= +modernc.org/lexer v1.0.0/go.mod h1:F/Dld0YKYdZCLQ7bD0USbWL4YKCyTDRDHiDTOs0q0vk= modernc.org/mathutil v1.1.1 h1:FeylZSVX8S+58VsyJlkEj2bcpdytmp9MmDKZkKx8OIE= modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= modernc.org/strutil v1.1.0 h1:+1/yCzZxY2pZwwrsbH+4T7BQMoLQ9QiBshRC9eicYsc= diff --git a/lib/blockstore/badger/blockstore.go b/lib/blockstore/badger/blockstore.go new file mode 100644 index 000000000..fa9d55bdb --- /dev/null +++ b/lib/blockstore/badger/blockstore.go @@ -0,0 +1,427 @@ +package badgerbs + +import ( + "context" + "fmt" + "io" + "sync/atomic" + + "github.com/dgraph-io/badger/v2" + "github.com/dgraph-io/badger/v2/options" + "github.com/multiformats/go-base32" + "go.uber.org/zap" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + logger "github.com/ipfs/go-log/v2" + pool "github.com/libp2p/go-buffer-pool" + + "github.com/filecoin-project/lotus/lib/blockstore" +) + +var ( + // KeyPool is the buffer pool we use to compute storage keys. + KeyPool *pool.BufferPool = pool.GlobalPool +) + +var ( + // ErrBlockstoreClosed is returned from blockstore operations after + // the blockstore has been closed. + ErrBlockstoreClosed = fmt.Errorf("badger blockstore closed") + + log = logger.Logger("badgerbs") +) + +// aliases to mask badger dependencies. +const ( + // FileIO is equivalent to badger/options.FileIO. + FileIO = options.FileIO + // MemoryMap is equivalent to badger/options.MemoryMap. + MemoryMap = options.MemoryMap + // LoadToRAM is equivalent to badger/options.LoadToRAM. + LoadToRAM = options.LoadToRAM +) + +// Options embeds the badger options themselves, and augments them with +// blockstore-specific options. +type Options struct { + badger.Options + + // Prefix is an optional prefix to prepend to keys. Default: "". + Prefix string +} + +func DefaultOptions(path string) Options { + return Options{ + Options: badger.DefaultOptions(path), + Prefix: "", + } +} + +// badgerLogger is a local wrapper for go-log to make the interface +// compatible with badger.Logger (namely, aliasing Warnf to Warningf) +type badgerLogger struct { + *zap.SugaredLogger // skips 1 caller to get useful line info, skipping over badger.Options. + + skip2 *zap.SugaredLogger // skips 2 callers, just like above + this logger. +} + +// Warningf is required by the badger logger APIs. +func (b *badgerLogger) Warningf(format string, args ...interface{}) { + b.skip2.Warnf(format, args...) +} + +const ( + stateOpen int64 = iota + stateClosing + stateClosed +) + +// Blockstore is a badger-backed IPLD blockstore. +// +// NOTE: once Close() is called, methods will try their best to return +// ErrBlockstoreClosed. This will guaranteed to happen for all subsequent +// operation calls after Close() has returned, but it may not happen for +// operations in progress. Those are likely to fail with a different error. +type Blockstore struct { + DB *badger.DB + + // state is guarded by atomic. + state int64 + + prefixing bool + prefix []byte + prefixLen int +} + +var _ blockstore.Blockstore = (*Blockstore)(nil) +var _ blockstore.Viewer = (*Blockstore)(nil) +var _ io.Closer = (*Blockstore)(nil) + +// Open creates a new badger-backed blockstore, with the supplied options. +func Open(opts Options) (*Blockstore, error) { + opts.Logger = &badgerLogger{ + SugaredLogger: log.Desugar().WithOptions(zap.AddCallerSkip(1)).Sugar(), + skip2: log.Desugar().WithOptions(zap.AddCallerSkip(2)).Sugar(), + } + + db, err := badger.Open(opts.Options) + if err != nil { + return nil, fmt.Errorf("failed to open badger blockstore: %w", err) + } + + bs := &Blockstore{ + DB: db, + } + + if p := opts.Prefix; p != "" { + bs.prefixing = true + bs.prefix = []byte(p) + bs.prefixLen = len(bs.prefix) + } + + return bs, nil +} + +// Close closes the store. If the store has already been closed, this noops and +// returns an error, even if the first closure resulted in error. +func (b *Blockstore) Close() error { + if !atomic.CompareAndSwapInt64(&b.state, stateOpen, stateClosing) { + return nil + } + + defer atomic.StoreInt64(&b.state, stateClosed) + return b.DB.Close() +} + +// View implements blockstore.Viewer, which leverages zero-copy read-only +// access to values. +func (b *Blockstore) View(cid cid.Cid, fn func([]byte) error) error { + if atomic.LoadInt64(&b.state) != stateOpen { + return ErrBlockstoreClosed + } + + k, pooled := b.PooledStorageKey(cid) + if pooled { + defer KeyPool.Put(k) + } + + return b.DB.View(func(txn *badger.Txn) error { + switch item, err := txn.Get(k); err { + case nil: + return item.Value(fn) + case badger.ErrKeyNotFound: + return blockstore.ErrNotFound + default: + return fmt.Errorf("failed to view block from badger blockstore: %w", err) + } + }) +} + +// Has implements Blockstore.Has. +func (b *Blockstore) Has(cid cid.Cid) (bool, error) { + if atomic.LoadInt64(&b.state) != stateOpen { + return false, ErrBlockstoreClosed + } + + k, pooled := b.PooledStorageKey(cid) + if pooled { + defer KeyPool.Put(k) + } + + err := b.DB.View(func(txn *badger.Txn) error { + _, err := txn.Get(k) + return err + }) + + switch err { + case badger.ErrKeyNotFound: + return false, nil + case nil: + return true, nil + default: + return false, fmt.Errorf("failed to check if block exists in badger blockstore: %w", err) + } +} + +// Get implements Blockstore.Get. +func (b *Blockstore) Get(cid cid.Cid) (blocks.Block, error) { + if !cid.Defined() { + return nil, blockstore.ErrNotFound + } + + if atomic.LoadInt64(&b.state) != stateOpen { + return nil, ErrBlockstoreClosed + } + + k, pooled := b.PooledStorageKey(cid) + if pooled { + defer KeyPool.Put(k) + } + + var val []byte + err := b.DB.View(func(txn *badger.Txn) error { + switch item, err := txn.Get(k); err { + case nil: + val, err = item.ValueCopy(nil) + return err + case badger.ErrKeyNotFound: + return blockstore.ErrNotFound + default: + return fmt.Errorf("failed to get block from badger blockstore: %w", err) + } + }) + if err != nil { + return nil, err + } + return blocks.NewBlockWithCid(val, cid) +} + +// GetSize implements Blockstore.GetSize. +func (b *Blockstore) GetSize(cid cid.Cid) (int, error) { + if atomic.LoadInt64(&b.state) != stateOpen { + return -1, ErrBlockstoreClosed + } + + k, pooled := b.PooledStorageKey(cid) + if pooled { + defer KeyPool.Put(k) + } + + var size int + err := b.DB.View(func(txn *badger.Txn) error { + switch item, err := txn.Get(k); err { + case nil: + size = int(item.ValueSize()) + case badger.ErrKeyNotFound: + return blockstore.ErrNotFound + default: + return fmt.Errorf("failed to get block size from badger blockstore: %w", err) + } + return nil + }) + if err != nil { + size = -1 + } + return size, err +} + +// Put implements Blockstore.Put. +func (b *Blockstore) Put(block blocks.Block) error { + if atomic.LoadInt64(&b.state) != stateOpen { + return ErrBlockstoreClosed + } + + k, pooled := b.PooledStorageKey(block.Cid()) + if pooled { + defer KeyPool.Put(k) + } + + err := b.DB.Update(func(txn *badger.Txn) error { + return txn.Set(k, block.RawData()) + }) + if err != nil { + err = fmt.Errorf("failed to put block in badger blockstore: %w", err) + } + return err +} + +// PutMany implements Blockstore.PutMany. +func (b *Blockstore) PutMany(blocks []blocks.Block) error { + if atomic.LoadInt64(&b.state) != stateOpen { + return ErrBlockstoreClosed + } + + batch := b.DB.NewWriteBatch() + defer batch.Cancel() + + // toReturn tracks the byte slices to return to the pool, if we're using key + // prefixing. we can't return each slice to the pool after each Set, because + // badger holds on to the slice. + var toReturn [][]byte + if b.prefixing { + toReturn = make([][]byte, 0, len(blocks)) + defer func() { + for _, b := range toReturn { + KeyPool.Put(b) + } + }() + } + + for _, block := range blocks { + k, pooled := b.PooledStorageKey(block.Cid()) + if pooled { + toReturn = append(toReturn, k) + } + if err := batch.Set(k, block.RawData()); err != nil { + return err + } + } + + err := batch.Flush() + if err != nil { + err = fmt.Errorf("failed to put blocks in badger blockstore: %w", err) + } + return err +} + +// DeleteBlock implements Blockstore.DeleteBlock. +func (b *Blockstore) DeleteBlock(cid cid.Cid) error { + if atomic.LoadInt64(&b.state) != stateOpen { + return ErrBlockstoreClosed + } + + k, pooled := b.PooledStorageKey(cid) + if pooled { + defer KeyPool.Put(k) + } + + return b.DB.Update(func(txn *badger.Txn) error { + return txn.Delete(k) + }) +} + +// AllKeysChan implements Blockstore.AllKeysChan. +func (b *Blockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + if atomic.LoadInt64(&b.state) != stateOpen { + return nil, ErrBlockstoreClosed + } + + txn := b.DB.NewTransaction(false) + opts := badger.IteratorOptions{PrefetchSize: 100} + if b.prefixing { + opts.Prefix = b.prefix + } + iter := txn.NewIterator(opts) + + ch := make(chan cid.Cid) + go func() { + defer close(ch) + defer iter.Close() + + // NewCidV1 makes a copy of the multihash buffer, so we can reuse it to + // contain allocs. + var buf []byte + for iter.Rewind(); iter.Valid(); iter.Next() { + if ctx.Err() != nil { + return // context has fired. + } + if atomic.LoadInt64(&b.state) != stateOpen { + // open iterators will run even after the database is closed... + return // closing, yield. + } + k := iter.Item().Key() + if b.prefixing { + k = k[b.prefixLen:] + } + + if reqlen := base32.RawStdEncoding.DecodedLen(len(k)); len(buf) < reqlen { + buf = make([]byte, reqlen) + } + if n, err := base32.RawStdEncoding.Decode(buf, k); err == nil { + select { + case ch <- cid.NewCidV1(cid.Raw, buf[:n]): + case <-ctx.Done(): + return + } + } else { + log.Warnf("failed to decode key %s in badger AllKeysChan; err: %s", k, err) + } + } + }() + + return ch, nil +} + +// HashOnRead implements Blockstore.HashOnRead. It is not supported by this +// blockstore. +func (b *Blockstore) HashOnRead(_ bool) { + log.Warnf("called HashOnRead on badger blockstore; function not supported; ignoring") +} + +// PooledStorageKey returns the storage key under which this CID is stored. +// +// The key is: prefix + base32_no_padding(cid.Hash) +// +// This method may return pooled byte slice, which MUST be returned to the +// KeyPool if pooled=true, or a leak will occur. +func (b *Blockstore) PooledStorageKey(cid cid.Cid) (key []byte, pooled bool) { + h := cid.Hash() + size := base32.RawStdEncoding.EncodedLen(len(h)) + if !b.prefixing { // optimize for branch prediction. + k := pool.Get(size) + base32.RawStdEncoding.Encode(k, h) + return k, true // slicing upto length unnecessary; the pool has already done this. + } + + size += b.prefixLen + k := pool.Get(size) + copy(k, b.prefix) + base32.RawStdEncoding.Encode(k[b.prefixLen:], h) + return k, true // slicing upto length unnecessary; the pool has already done this. +} + +// Storage acts like PooledStorageKey, but attempts to write the storage key +// into the provided slice. If the slice capacity is insufficient, it allocates +// a new byte slice with enough capacity to accommodate the result. This method +// returns the resulting slice. +func (b *Blockstore) StorageKey(dst []byte, cid cid.Cid) []byte { + h := cid.Hash() + reqsize := base32.RawStdEncoding.EncodedLen(len(h)) + b.prefixLen + if reqsize > cap(dst) { + // passed slice is smaller than required size; create new. + dst = make([]byte, reqsize) + } else if reqsize > len(dst) { + // passed slice has enough capacity, but its length is + // restricted, expand. + dst = dst[:cap(dst)] + } + + if b.prefixing { // optimize for branch prediction. + copy(dst, b.prefix) + base32.RawStdEncoding.Encode(dst[b.prefixLen:], h) + } else { + base32.RawStdEncoding.Encode(dst, h) + } + return dst[:reqsize] +} diff --git a/lib/blockstore/badger/blockstore_test.go b/lib/blockstore/badger/blockstore_test.go new file mode 100644 index 000000000..e357117e5 --- /dev/null +++ b/lib/blockstore/badger/blockstore_test.go @@ -0,0 +1,90 @@ +package badgerbs + +import ( + "io/ioutil" + "os" + "testing" + + blocks "github.com/ipfs/go-block-format" + blockstore "github.com/ipfs/go-ipfs-blockstore" + "github.com/stretchr/testify/require" +) + +func TestBadgerBlockstore(t *testing.T) { + (&Suite{ + NewBlockstore: newBlockstore(DefaultOptions), + OpenBlockstore: openBlockstore(DefaultOptions), + }).RunTests(t, "non_prefixed") + + prefixed := func(path string) Options { + opts := DefaultOptions(path) + opts.Prefix = "/prefixed/" + return opts + } + + (&Suite{ + NewBlockstore: newBlockstore(prefixed), + OpenBlockstore: openBlockstore(prefixed), + }).RunTests(t, "prefixed") +} + +func TestStorageKey(t *testing.T) { + bs, _ := newBlockstore(DefaultOptions)(t) + bbs := bs.(*Blockstore) + defer bbs.Close() //nolint:errcheck + + cid1 := blocks.NewBlock([]byte("some data")).Cid() + cid2 := blocks.NewBlock([]byte("more data")).Cid() + cid3 := blocks.NewBlock([]byte("a little more data")).Cid() + require.NotEqual(t, cid1, cid2) // sanity check + require.NotEqual(t, cid2, cid3) // sanity check + + // nil slice; let StorageKey allocate for us. + k1 := bbs.StorageKey(nil, cid1) + require.Len(t, k1, 55) + require.True(t, cap(k1) == len(k1)) + + // k1's backing array is reused. + k2 := bbs.StorageKey(k1, cid2) + require.Len(t, k2, 55) + require.True(t, cap(k2) == len(k1)) + + // bring k2 to len=0, and verify that its backing array gets reused + // (i.e. k1 and k2 are overwritten) + k3 := bbs.StorageKey(k2[:0], cid3) + require.Len(t, k3, 55) + require.True(t, cap(k3) == len(k3)) + + // backing array of k1 and k2 has been modified, i.e. memory is shared. + require.Equal(t, k3, k1) + require.Equal(t, k3, k2) +} + +func newBlockstore(optsSupplier func(path string) Options) func(tb testing.TB) (bs blockstore.Blockstore, path string) { + return func(tb testing.TB) (bs blockstore.Blockstore, path string) { + tb.Helper() + + path, err := ioutil.TempDir("", "") + if err != nil { + tb.Fatal(err) + } + + db, err := Open(optsSupplier(path)) + if err != nil { + tb.Fatal(err) + } + + tb.Cleanup(func() { + _ = os.RemoveAll(path) + }) + + return db, path + } +} + +func openBlockstore(optsSupplier func(path string) Options) func(tb testing.TB, path string) (bs blockstore.Blockstore, err error) { + return func(tb testing.TB, path string) (bs blockstore.Blockstore, err error) { + tb.Helper() + return Open(optsSupplier(path)) + } +} diff --git a/lib/blockstore/badger/blockstore_test_suite.go b/lib/blockstore/badger/blockstore_test_suite.go new file mode 100644 index 000000000..b11fc4e23 --- /dev/null +++ b/lib/blockstore/badger/blockstore_test_suite.go @@ -0,0 +1,310 @@ +package badgerbs + +import ( + "context" + "fmt" + "io" + "reflect" + "strings" + "testing" + + "github.com/filecoin-project/lotus/lib/blockstore" + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + u "github.com/ipfs/go-ipfs-util" + + "github.com/stretchr/testify/require" +) + +// TODO: move this to go-ipfs-blockstore. +type Suite struct { + NewBlockstore func(tb testing.TB) (bs blockstore.Blockstore, path string) + OpenBlockstore func(tb testing.TB, path string) (bs blockstore.Blockstore, err error) +} + +func (s *Suite) RunTests(t *testing.T, prefix string) { + v := reflect.TypeOf(s) + f := func(t *testing.T) { + for i := 0; i < v.NumMethod(); i++ { + if m := v.Method(i); strings.HasPrefix(m.Name, "Test") { + f := m.Func.Interface().(func(*Suite, *testing.T)) + t.Run(m.Name, func(t *testing.T) { + f(s, t) + }) + } + } + } + + if prefix == "" { + f(t) + } else { + t.Run(prefix, f) + } +} + +func (s *Suite) TestGetWhenKeyNotPresent(t *testing.T) { + bs, _ := s.NewBlockstore(t) + if c, ok := bs.(io.Closer); ok { + defer func() { require.NoError(t, c.Close()) }() + } + + c := cid.NewCidV0(u.Hash([]byte("stuff"))) + bl, err := bs.Get(c) + require.Nil(t, bl) + require.Equal(t, blockstore.ErrNotFound, err) +} + +func (s *Suite) TestGetWhenKeyIsNil(t *testing.T) { + bs, _ := s.NewBlockstore(t) + if c, ok := bs.(io.Closer); ok { + defer func() { require.NoError(t, c.Close()) }() + } + + _, err := bs.Get(cid.Undef) + require.Equal(t, blockstore.ErrNotFound, err) +} + +func (s *Suite) TestPutThenGetBlock(t *testing.T) { + bs, _ := s.NewBlockstore(t) + if c, ok := bs.(io.Closer); ok { + defer func() { require.NoError(t, c.Close()) }() + } + + orig := blocks.NewBlock([]byte("some data")) + + err := bs.Put(orig) + require.NoError(t, err) + + fetched, err := bs.Get(orig.Cid()) + require.NoError(t, err) + require.Equal(t, orig.RawData(), fetched.RawData()) +} + +func (s *Suite) TestHas(t *testing.T) { + bs, _ := s.NewBlockstore(t) + if c, ok := bs.(io.Closer); ok { + defer func() { require.NoError(t, c.Close()) }() + } + + orig := blocks.NewBlock([]byte("some data")) + + err := bs.Put(orig) + require.NoError(t, err) + + ok, err := bs.Has(orig.Cid()) + require.NoError(t, err) + require.True(t, ok) + + ok, err = bs.Has(blocks.NewBlock([]byte("another thing")).Cid()) + require.NoError(t, err) + require.False(t, ok) +} + +func (s *Suite) TestCidv0v1(t *testing.T) { + bs, _ := s.NewBlockstore(t) + if c, ok := bs.(io.Closer); ok { + defer func() { require.NoError(t, c.Close()) }() + } + + orig := blocks.NewBlock([]byte("some data")) + + err := bs.Put(orig) + require.NoError(t, err) + + fetched, err := bs.Get(cid.NewCidV1(cid.DagProtobuf, orig.Cid().Hash())) + require.NoError(t, err) + require.Equal(t, orig.RawData(), fetched.RawData()) +} + +func (s *Suite) TestPutThenGetSizeBlock(t *testing.T) { + bs, _ := s.NewBlockstore(t) + if c, ok := bs.(io.Closer); ok { + defer func() { require.NoError(t, c.Close()) }() + } + + block := blocks.NewBlock([]byte("some data")) + missingBlock := blocks.NewBlock([]byte("missingBlock")) + emptyBlock := blocks.NewBlock([]byte{}) + + err := bs.Put(block) + require.NoError(t, err) + + blockSize, err := bs.GetSize(block.Cid()) + require.NoError(t, err) + require.Len(t, block.RawData(), blockSize) + + err = bs.Put(emptyBlock) + require.NoError(t, err) + + emptySize, err := bs.GetSize(emptyBlock.Cid()) + require.NoError(t, err) + require.Zero(t, emptySize) + + missingSize, err := bs.GetSize(missingBlock.Cid()) + require.Equal(t, blockstore.ErrNotFound, err) + require.Equal(t, -1, missingSize) +} + +func (s *Suite) TestAllKeysSimple(t *testing.T) { + bs, _ := s.NewBlockstore(t) + if c, ok := bs.(io.Closer); ok { + defer func() { require.NoError(t, c.Close()) }() + } + + keys := insertBlocks(t, bs, 100) + + ctx := context.Background() + ch, err := bs.AllKeysChan(ctx) + require.NoError(t, err) + actual := collect(ch) + + require.ElementsMatch(t, keys, actual) +} + +func (s *Suite) TestAllKeysRespectsContext(t *testing.T) { + bs, _ := s.NewBlockstore(t) + if c, ok := bs.(io.Closer); ok { + defer func() { require.NoError(t, c.Close()) }() + } + + _ = insertBlocks(t, bs, 100) + + ctx, cancel := context.WithCancel(context.Background()) + ch, err := bs.AllKeysChan(ctx) + require.NoError(t, err) + + // consume 2, then cancel context. + v, ok := <-ch + require.NotEqual(t, cid.Undef, v) + require.True(t, ok) + + v, ok = <-ch + require.NotEqual(t, cid.Undef, v) + require.True(t, ok) + + cancel() + + v, ok = <-ch + require.Equal(t, cid.Undef, v) + require.False(t, ok) +} + +func (s *Suite) TestDoubleClose(t *testing.T) { + bs, _ := s.NewBlockstore(t) + c, ok := bs.(io.Closer) + if !ok { + t.SkipNow() + } + require.NoError(t, c.Close()) + require.NoError(t, c.Close()) +} + +func (s *Suite) TestReopenPutGet(t *testing.T) { + bs, path := s.NewBlockstore(t) + c, ok := bs.(io.Closer) + if !ok { + t.SkipNow() + } + + orig := blocks.NewBlock([]byte("some data")) + err := bs.Put(orig) + require.NoError(t, err) + + err = c.Close() + require.NoError(t, err) + + bs, err = s.OpenBlockstore(t, path) + require.NoError(t, err) + + fetched, err := bs.Get(orig.Cid()) + require.NoError(t, err) + require.Equal(t, orig.RawData(), fetched.RawData()) + + err = bs.(io.Closer).Close() + require.NoError(t, err) +} + +func (s *Suite) TestPutMany(t *testing.T) { + bs, _ := s.NewBlockstore(t) + if c, ok := bs.(io.Closer); ok { + defer func() { require.NoError(t, c.Close()) }() + } + + blks := []blocks.Block{ + blocks.NewBlock([]byte("foo1")), + blocks.NewBlock([]byte("foo2")), + blocks.NewBlock([]byte("foo3")), + } + err := bs.PutMany(blks) + require.NoError(t, err) + + for _, blk := range blks { + fetched, err := bs.Get(blk.Cid()) + require.NoError(t, err) + require.Equal(t, blk.RawData(), fetched.RawData()) + + ok, err := bs.Has(blk.Cid()) + require.NoError(t, err) + require.True(t, ok) + } + + ch, err := bs.AllKeysChan(context.Background()) + require.NoError(t, err) + + cids := collect(ch) + require.Len(t, cids, 3) +} + +func (s *Suite) TestDelete(t *testing.T) { + bs, _ := s.NewBlockstore(t) + if c, ok := bs.(io.Closer); ok { + defer func() { require.NoError(t, c.Close()) }() + } + + blks := []blocks.Block{ + blocks.NewBlock([]byte("foo1")), + blocks.NewBlock([]byte("foo2")), + blocks.NewBlock([]byte("foo3")), + } + err := bs.PutMany(blks) + require.NoError(t, err) + + err = bs.DeleteBlock(blks[1].Cid()) + require.NoError(t, err) + + ch, err := bs.AllKeysChan(context.Background()) + require.NoError(t, err) + + cids := collect(ch) + require.Len(t, cids, 2) + require.ElementsMatch(t, cids, []cid.Cid{ + cid.NewCidV1(cid.Raw, blks[0].Cid().Hash()), + cid.NewCidV1(cid.Raw, blks[2].Cid().Hash()), + }) + + has, err := bs.Has(blks[1].Cid()) + require.NoError(t, err) + require.False(t, has) + +} + +func insertBlocks(t *testing.T, bs blockstore.Blockstore, count int) []cid.Cid { + keys := make([]cid.Cid, count) + for i := 0; i < count; i++ { + block := blocks.NewBlock([]byte(fmt.Sprintf("some data %d", i))) + err := bs.Put(block) + require.NoError(t, err) + // NewBlock assigns a CIDv0; we convert it to CIDv1 because that's what + // the store returns. + keys[i] = cid.NewCidV1(cid.Raw, block.Multihash()) + } + return keys +} + +func collect(ch <-chan cid.Cid) []cid.Cid { + var keys []cid.Cid + for k := range ch { + keys = append(keys, k) + } + return keys +} diff --git a/lib/blockstore/blockstore.go b/lib/blockstore/blockstore.go index 99d849188..eb28f1bf0 100644 --- a/lib/blockstore/blockstore.go +++ b/lib/blockstore/blockstore.go @@ -44,12 +44,9 @@ func NewBlockstore(dstore ds.Batching) blockstore.Blockstore { // Alias so other packages don't have to import go-ipfs-blockstore type Blockstore = blockstore.Blockstore -type GCBlockstore = blockstore.GCBlockstore +type Viewer = blockstore.Viewer type CacheOpts = blockstore.CacheOpts -type GCLocker = blockstore.GCLocker -var NewGCLocker = blockstore.NewGCLocker -var NewGCBlockstore = blockstore.NewGCBlockstore var ErrNotFound = blockstore.ErrNotFound func DefaultCacheOpts() CacheOpts { diff --git a/lib/blockstore/fallbackstore.go b/lib/blockstore/fallbackstore.go new file mode 100644 index 000000000..0ce397d44 --- /dev/null +++ b/lib/blockstore/fallbackstore.go @@ -0,0 +1,95 @@ +package blockstore + +import ( + "context" + "sync" + "time" + + "golang.org/x/xerrors" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + blockstore "github.com/ipfs/go-ipfs-blockstore" + logging "github.com/ipfs/go-log" +) + +var log = logging.Logger("blockstore") + +type FallbackStore struct { + blockstore.Blockstore + + fallbackGetBlock func(context.Context, cid.Cid) (blocks.Block, error) + lk sync.RWMutex +} + +func (fbs *FallbackStore) SetFallback(fg func(context.Context, cid.Cid) (blocks.Block, error)) { + fbs.lk.Lock() + defer fbs.lk.Unlock() + + fbs.fallbackGetBlock = fg +} + +func (fbs *FallbackStore) getFallback(c cid.Cid) (blocks.Block, error) { + log.Errorw("fallbackstore: Block not found locally, fetching from the network", "cid", c) + fbs.lk.RLock() + defer fbs.lk.RUnlock() + + if fbs.fallbackGetBlock == nil { + // FallbackStore wasn't configured yet (chainstore/bitswap aren't up yet) + // Wait for a bit and retry + fbs.lk.RUnlock() + time.Sleep(5 * time.Second) + fbs.lk.RLock() + + if fbs.fallbackGetBlock == nil { + log.Errorw("fallbackstore: fallbackGetBlock not configured yet") + return nil, blockstore.ErrNotFound + } + } + + ctx, cancel := context.WithTimeout(context.TODO(), 120*time.Second) + defer cancel() + + b, err := fbs.fallbackGetBlock(ctx, c) + if err != nil { + return nil, err + } + + // chain bitswap puts blocks in temp blockstore which is cleaned up + // every few min (to drop any messages we fetched but don't want) + // in this case we want to keep this block around + if err := fbs.Put(b); err != nil { + return nil, xerrors.Errorf("persisting fallback-fetched block: %w", err) + } + return b, nil +} + +func (fbs *FallbackStore) Get(c cid.Cid) (blocks.Block, error) { + b, err := fbs.Blockstore.Get(c) + switch err { + case nil: + return b, nil + case blockstore.ErrNotFound: + return fbs.getFallback(c) + default: + return b, err + } +} + +func (fbs *FallbackStore) GetSize(c cid.Cid) (int, error) { + sz, err := fbs.Blockstore.GetSize(c) + switch err { + case nil: + return sz, nil + case blockstore.ErrNotFound: + b, err := fbs.getFallback(c) + if err != nil { + return 0, err + } + return len(b.RawData()), nil + default: + return sz, err + } +} + +var _ blockstore.Blockstore = &FallbackStore{} diff --git a/lib/blockstore/memstore.go b/lib/blockstore/memstore.go index 9745d6f03..5cfaf40a9 100644 --- a/lib/blockstore/memstore.go +++ b/lib/blockstore/memstore.go @@ -5,23 +5,33 @@ import ( blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" - blockstore "github.com/ipfs/go-ipfs-blockstore" ) +// MemStore is a terminal blockstore that keeps blocks in memory. type MemStore map[cid.Cid]blocks.Block func (m MemStore) DeleteBlock(k cid.Cid) error { delete(m, k) return nil } + func (m MemStore) Has(k cid.Cid) (bool, error) { _, ok := m[k] return ok, nil } + +func (m MemStore) View(k cid.Cid, callback func([]byte) error) error { + b, ok := m[k] + if !ok { + return ErrNotFound + } + return callback(b.RawData()) +} + func (m MemStore) Get(k cid.Cid) (blocks.Block, error) { b, ok := m[k] if !ok { - return nil, blockstore.ErrNotFound + return nil, ErrNotFound } return b, nil } @@ -30,7 +40,7 @@ func (m MemStore) Get(k cid.Cid) (blocks.Block, error) { func (m MemStore) GetSize(k cid.Cid) (int, error) { b, ok := m[k] if !ok { - return 0, blockstore.ErrNotFound + return 0, ErrNotFound } return len(b.RawData()), nil } diff --git a/lib/blockstore/syncstore.go b/lib/blockstore/syncstore.go index be9f6b5c4..86786a0c4 100644 --- a/lib/blockstore/syncstore.go +++ b/lib/blockstore/syncstore.go @@ -8,6 +8,8 @@ import ( "github.com/ipfs/go-cid" ) +// SyncStore is a terminal blockstore that is a synchronized version +// of MemStore. type SyncStore struct { mu sync.RWMutex bs MemStore // specifically use a memStore to save indirection overhead. @@ -18,11 +20,20 @@ func (m *SyncStore) DeleteBlock(k cid.Cid) error { defer m.mu.Unlock() return m.bs.DeleteBlock(k) } + func (m *SyncStore) Has(k cid.Cid) (bool, error) { m.mu.RLock() defer m.mu.RUnlock() return m.bs.Has(k) } + +func (m *SyncStore) View(k cid.Cid, callback func([]byte) error) error { + m.mu.RLock() + defer m.mu.RUnlock() + + return m.bs.View(k, callback) +} + func (m *SyncStore) Get(k cid.Cid) (blocks.Block, error) { m.mu.RLock() defer m.mu.RUnlock() diff --git a/lib/bufbstore/buf_bstore.go b/lib/bufbstore/buf_bstore.go index 4ea746444..5b21ace5b 100644 --- a/lib/bufbstore/buf_bstore.go +++ b/lib/bufbstore/buf_bstore.go @@ -16,6 +16,9 @@ var log = logging.Logger("bufbs") type BufferedBS struct { read bstore.Blockstore write bstore.Blockstore + + readviewer bstore.Viewer + writeviewer bstore.Viewer } func NewBufferedBstore(base bstore.Blockstore) *BufferedBS { @@ -27,10 +30,20 @@ func NewBufferedBstore(base bstore.Blockstore) *BufferedBS { buf = bstore.NewTemporary() } - return &BufferedBS{ + bs := &BufferedBS{ read: base, write: buf, } + if v, ok := base.(bstore.Viewer); ok { + bs.readviewer = v + } + if v, ok := buf.(bstore.Viewer); ok { + bs.writeviewer = v + } + if (bs.writeviewer == nil) != (bs.readviewer == nil) { + log.Warnf("one of the stores is not viewable; running less efficiently") + } + return bs } func NewTieredBstore(r bstore.Blockstore, w bstore.Blockstore) *BufferedBS { @@ -40,7 +53,8 @@ func NewTieredBstore(r bstore.Blockstore, w bstore.Blockstore) *BufferedBS { } } -var _ (bstore.Blockstore) = &BufferedBS{} +var _ bstore.Blockstore = (*BufferedBS)(nil) +var _ bstore.Viewer = (*BufferedBS)(nil) func (bs *BufferedBS) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { a, err := bs.read.AllKeysChan(ctx) @@ -93,8 +107,27 @@ func (bs *BufferedBS) DeleteBlock(c cid.Cid) error { return bs.write.DeleteBlock(c) } +func (bs *BufferedBS) View(c cid.Cid, callback func([]byte) error) error { + if bs.writeviewer == nil || bs.readviewer == nil { + // one of the stores isn't Viewer; fall back to pure Get behaviour. + blk, err := bs.Get(c) + if err != nil { + return err + } + return callback(blk.RawData()) + } + + // both stores are viewable. + if err := bs.writeviewer.View(c, callback); err == bstore.ErrNotFound { + // not found in write blockstore; fall through. + } else { + return err // propagate errors, or nil, i.e. found. + } + return bs.readviewer.View(c, callback) +} + func (bs *BufferedBS) Get(c cid.Cid) (block.Block, error) { - if out, err := bs.read.Get(c); err != nil { + if out, err := bs.write.Get(c); err != nil { if err != bstore.ErrNotFound { return nil, err } @@ -102,7 +135,7 @@ func (bs *BufferedBS) Get(c cid.Cid) (block.Block, error) { return out, nil } - return bs.write.Get(c) + return bs.read.Get(c) } func (bs *BufferedBS) GetSize(c cid.Cid) (int, error) { @@ -115,7 +148,7 @@ func (bs *BufferedBS) GetSize(c cid.Cid) (int, error) { } func (bs *BufferedBS) Put(blk block.Block) error { - has, err := bs.read.Has(blk.Cid()) + has, err := bs.read.Has(blk.Cid()) // TODO: consider dropping this check if err != nil { return err } @@ -128,7 +161,7 @@ func (bs *BufferedBS) Put(blk block.Block) error { } func (bs *BufferedBS) Has(c cid.Cid) (bool, error) { - has, err := bs.read.Has(c) + has, err := bs.write.Has(c) if err != nil { return false, err } @@ -136,7 +169,7 @@ func (bs *BufferedBS) Has(c cid.Cid) (bool, error) { return true, nil } - return bs.write.Has(c) + return bs.read.Has(c) } func (bs *BufferedBS) HashOnRead(hor bool) { diff --git a/lib/cachebs/cachebs.go b/lib/cachebs/cachebs.go deleted file mode 100644 index 046f100c0..000000000 --- a/lib/cachebs/cachebs.go +++ /dev/null @@ -1,89 +0,0 @@ -package cachebs - -import ( - "context" - - lru "github.com/hashicorp/golang-lru" - block "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - - bstore "github.com/filecoin-project/lotus/lib/blockstore" -) - -//nolint:deadcode,varcheck -var log = logging.Logger("cachebs") - -type CacheBS struct { - cache *lru.ARCCache - bs bstore.Blockstore -} - -func NewBufferedBstore(base bstore.Blockstore, size int) bstore.Blockstore { - c, err := lru.NewARC(size) - if err != nil { - panic(err) - } - // Wrap this in an ID blockstore to avoid caching blocks inlined into - // CIDs. - return bstore.WrapIDStore(&CacheBS{ - cache: c, - bs: base, - }) -} - -var _ (bstore.Blockstore) = &CacheBS{} - -func (bs *CacheBS) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { - return bs.bs.AllKeysChan(ctx) -} - -func (bs *CacheBS) DeleteBlock(c cid.Cid) error { - bs.cache.Remove(c) - - return bs.bs.DeleteBlock(c) -} - -func (bs *CacheBS) Get(c cid.Cid) (block.Block, error) { - v, ok := bs.cache.Get(c) - if ok { - return v.(block.Block), nil - } - - out, err := bs.bs.Get(c) - if err != nil { - return nil, err - } - - bs.cache.Add(c, out) - return out, nil -} - -func (bs *CacheBS) GetSize(c cid.Cid) (int, error) { - return bs.bs.GetSize(c) -} - -func (bs *CacheBS) Put(blk block.Block) error { - bs.cache.Add(blk.Cid(), blk) - - return bs.bs.Put(blk) -} - -func (bs *CacheBS) Has(c cid.Cid) (bool, error) { - if bs.cache.Contains(c) { - return true, nil - } - - return bs.bs.Has(c) -} - -func (bs *CacheBS) HashOnRead(hor bool) { - bs.bs.HashOnRead(hor) -} - -func (bs *CacheBS) PutMany(blks []block.Block) error { - for _, blk := range blks { - bs.cache.Add(blk.Cid(), blk) - } - return bs.bs.PutMany(blks) -} diff --git a/lib/peermgr/peermgr.go b/lib/peermgr/peermgr.go index 2f9d34674..2fe54caea 100644 --- a/lib/peermgr/peermgr.go +++ b/lib/peermgr/peermgr.go @@ -191,11 +191,17 @@ func (pmgr *PeerMgr) doExpand(ctx context.Context) { } log.Info("connecting to bootstrap peers") + wg := sync.WaitGroup{} for _, bsp := range pmgr.bootstrappers { - if err := pmgr.h.Connect(ctx, bsp); err != nil { - log.Warnf("failed to connect to bootstrap peer: %s", err) - } + wg.Add(1) + go func(bsp peer.AddrInfo) { + defer wg.Done() + if err := pmgr.h.Connect(ctx, bsp); err != nil { + log.Warnf("failed to connect to bootstrap peer: %s", err) + } + }(bsp) } + wg.Wait() return } diff --git a/lotuspond/front/src/chain/send.js b/lotuspond/front/src/chain/send.js index a0fc89437..c0d36b0a3 100644 --- a/lotuspond/front/src/chain/send.js +++ b/lotuspond/front/src/chain/send.js @@ -30,7 +30,7 @@ async function pushMessage(client, from, inmsg) { console.log(inmsg) - await client.call('Filecoin.MpoolPushMessage', [inmsg]) + await client.call('Filecoin.MpoolPushMessage', [inmsg, null]) } export default pushMessage diff --git a/markets/retrievaladapter/provider.go b/markets/retrievaladapter/provider.go index 674ec4793..440e2a480 100644 --- a/markets/retrievaladapter/provider.go +++ b/markets/retrievaladapter/provider.go @@ -15,6 +15,7 @@ import ( "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/shared" "github.com/filecoin-project/go-state-types/abi" + specstorage "github.com/filecoin-project/specs-storage/storage" "github.com/ipfs/go-cid" ) @@ -52,9 +53,12 @@ func (rpn *retrievalProviderNode) UnsealSector(ctx context.Context, sectorID abi return nil, err } - sid := abi.SectorID{ - Miner: abi.ActorID(mid), - Number: sectorID, + ref := specstorage.SectorRef{ + ID: abi.SectorID{ + Miner: abi.ActorID(mid), + Number: sectorID, + }, + ProofType: si.SectorType, } r, w := io.Pipe() @@ -63,7 +67,7 @@ func (rpn *retrievalProviderNode) UnsealSector(ctx context.Context, sectorID abi if si.CommD != nil { commD = *si.CommD } - err := rpn.sealer.ReadPiece(ctx, w, sid, storiface.UnpaddedByteIndex(offset), length, si.TicketValue, commD) + err := rpn.sealer.ReadPiece(ctx, w, ref, storiface.UnpaddedByteIndex(offset), length, si.TicketValue, commD) _ = w.CloseWithError(err) }() diff --git a/markets/storageadapter/client.go b/markets/storageadapter/client.go index 482183bf9..36fe0d771 100644 --- a/markets/storageadapter/client.go +++ b/markets/storageadapter/client.go @@ -23,7 +23,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + marketactor "github.com/filecoin-project/lotus/chain/actors/builtin/market" "github.com/filecoin-project/lotus/chain/events" "github.com/filecoin-project/lotus/chain/events/state" "github.com/filecoin-project/lotus/chain/market" @@ -38,7 +38,7 @@ type ClientNodeAdapter struct { full.ChainAPI full.MpoolAPI - fm *market.FundMgr + fundmgr *market.FundManager ev *events.Events dsMatcher *dealStateMatcher } @@ -48,16 +48,16 @@ type clientApi struct { full.StateAPI } -func NewClientNodeAdapter(stateapi full.StateAPI, chain full.ChainAPI, mpool full.MpoolAPI, fm *market.FundMgr) storagemarket.StorageClientNode { +func NewClientNodeAdapter(stateapi full.StateAPI, chain full.ChainAPI, mpool full.MpoolAPI, fundmgr *market.FundManager) storagemarket.StorageClientNode { capi := &clientApi{chain, stateapi} return &ClientNodeAdapter{ StateAPI: stateapi, ChainAPI: chain, MpoolAPI: mpool, - fm: fm, + fundmgr: fundmgr, ev: events.NewEvents(context.TODO(), capi), - dsMatcher: newDealStateMatcher(state.NewStatePredicates(capi)), + dsMatcher: newDealStateMatcher(state.NewStatePredicates(state.WrapFastAPI(capi))), } } @@ -112,8 +112,12 @@ func (c *ClientNodeAdapter) AddFunds(ctx context.Context, addr address.Address, return smsg.Cid(), nil } -func (c *ClientNodeAdapter) EnsureFunds(ctx context.Context, addr, wallet address.Address, amount abi.TokenAmount, ts shared.TipSetToken) (cid.Cid, error) { - return c.fm.EnsureAvailable(ctx, addr, wallet, amount) +func (c *ClientNodeAdapter) ReserveFunds(ctx context.Context, wallet, addr address.Address, amt abi.TokenAmount) (cid.Cid, error) { + return c.fundmgr.Reserve(ctx, wallet, addr, amt) +} + +func (c *ClientNodeAdapter) ReleaseFunds(ctx context.Context, addr address.Address, amt abi.TokenAmount) error { + return c.fundmgr.Release(addr, amt) } func (c *ClientNodeAdapter) GetBalance(ctx context.Context, addr address.Address, encodedTs shared.TipSetToken) (storagemarket.Balance, error) { @@ -213,105 +217,8 @@ func (c *ClientNodeAdapter) DealProviderCollateralBounds(ctx context.Context, si return big.Mul(bounds.Min, big.NewInt(clientOverestimation)), bounds.Max, nil } -func (c *ClientNodeAdapter) OnDealSectorCommitted(ctx context.Context, provider address.Address, dealId abi.DealID, cb storagemarket.DealSectorCommittedCallback) error { - checkFunc := func(ts *types.TipSet) (done bool, more bool, err error) { - sd, err := c.StateMarketStorageDeal(ctx, dealId, ts.Key()) - - if err != nil { - // TODO: This may be fine for some errors - return false, false, xerrors.Errorf("client: failed to look up deal on chain: %w", err) - } - - if sd.State.SectorStartEpoch > 0 { - cb(nil) - return true, false, nil - } - - return false, true, nil - } - - called := func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (more bool, err error) { - defer func() { - if err != nil { - cb(xerrors.Errorf("handling applied event: %w", err)) - } - }() - - if msg == nil { - log.Error("timed out waiting for deal activation... what now?") - return false, nil - } - - sd, err := c.StateMarketStorageDeal(ctx, dealId, ts.Key()) - if err != nil { - return false, xerrors.Errorf("failed to look up deal on chain: %w", err) - } - - if sd.State.SectorStartEpoch < 1 { - return false, xerrors.Errorf("deal wasn't active: deal=%d, parentState=%s, h=%d", dealId, ts.ParentState(), ts.Height()) - } - - log.Infof("Storage deal %d activated at epoch %d", dealId, sd.State.SectorStartEpoch) - - cb(nil) - - return false, nil - } - - revert := func(ctx context.Context, ts *types.TipSet) error { - log.Warn("deal activation reverted; TODO: actually handle this!") - // TODO: Just go back to DealSealing? - return nil - } - - var sectorNumber abi.SectorNumber - var sectorFound bool - matchEvent := func(msg *types.Message) (matched bool, err error) { - if msg.To != provider { - return false, nil - } - - switch msg.Method { - case miner2.MethodsMiner.PreCommitSector: - var params miner.SectorPreCommitInfo - if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { - return false, xerrors.Errorf("unmarshal pre commit: %w", err) - } - - for _, did := range params.DealIDs { - if did == dealId { - sectorNumber = params.SectorNumber - sectorFound = true - return false, nil - } - } - - return false, nil - case miner2.MethodsMiner.ProveCommitSector: - var params miner.ProveCommitSectorParams - if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { - return false, xerrors.Errorf("failed to unmarshal prove commit sector params: %w", err) - } - - if !sectorFound { - return false, nil - } - - if params.SectorNumber != sectorNumber { - return false, nil - } - - return true, nil - default: - return false, nil - } - } - - if err := c.ev.Called(checkFunc, called, revert, int(build.MessageConfidence+1), events.NoTimeout, matchEvent); err != nil { - return xerrors.Errorf("failed to set up called handler: %w", err) - } - - return nil +func (c *ClientNodeAdapter) OnDealSectorCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, proposal market2.DealProposal, publishCid *cid.Cid, cb storagemarket.DealSectorCommittedCallback) error { + return OnDealSectorCommitted(ctx, c, c.ev, provider, dealID, marketactor.DealProposal(proposal), publishCid, cb) } func (c *ClientNodeAdapter) OnDealExpiredOrSlashed(ctx context.Context, dealID abi.DealID, onDealExpired storagemarket.DealExpiredCallback, onDealSlashed storagemarket.DealSlashedCallback) error { diff --git a/markets/storageadapter/getcurrentdealinfo.go b/markets/storageadapter/getcurrentdealinfo.go new file mode 100644 index 000000000..ab8c3f52f --- /dev/null +++ b/markets/storageadapter/getcurrentdealinfo.go @@ -0,0 +1,99 @@ +package storageadapter + +import ( + "bytes" + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/types" + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" +) + +type getCurrentDealInfoAPI interface { + StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) + StateMarketStorageDeal(context.Context, abi.DealID, types.TipSetKey) (*api.MarketDeal, error) + StateSearchMsg(context.Context, cid.Cid) (*api.MsgLookup, error) +} + +// GetCurrentDealInfo gets current information on a deal, and corrects the deal ID as needed +func GetCurrentDealInfo(ctx context.Context, ts *types.TipSet, api getCurrentDealInfoAPI, dealID abi.DealID, proposal market.DealProposal, publishCid *cid.Cid) (abi.DealID, *api.MarketDeal, error) { + marketDeal, dealErr := api.StateMarketStorageDeal(ctx, dealID, ts.Key()) + if dealErr == nil { + equal, err := checkDealEquality(ctx, ts, api, proposal, marketDeal.Proposal) + if err != nil { + return dealID, nil, err + } + if equal { + return dealID, marketDeal, nil + } + dealErr = xerrors.Errorf("Deal proposals did not match") + } + if publishCid == nil { + return dealID, nil, dealErr + } + // attempt deal id correction + lookup, err := api.StateSearchMsg(ctx, *publishCid) + if err != nil { + return dealID, nil, err + } + + if lookup.Receipt.ExitCode != exitcode.Ok { + return dealID, nil, xerrors.Errorf("looking for publish deal message %s: non-ok exit code: %s", *publishCid, lookup.Receipt.ExitCode) + } + + var retval market.PublishStorageDealsReturn + if err := retval.UnmarshalCBOR(bytes.NewReader(lookup.Receipt.Return)); err != nil { + return dealID, nil, xerrors.Errorf("looking for publish deal message: unmarshaling message return: %w", err) + } + + if len(retval.IDs) != 1 { + // market currently only ever sends messages with 1 deal + return dealID, nil, xerrors.Errorf("can't recover dealIDs from publish deal message with more than 1 deal") + } + + if retval.IDs[0] == dealID { + // DealID did not change, so we are stuck with the original lookup error + return dealID, nil, dealErr + } + + dealID = retval.IDs[0] + marketDeal, err = api.StateMarketStorageDeal(ctx, dealID, ts.Key()) + + if err == nil { + equal, err := checkDealEquality(ctx, ts, api, proposal, marketDeal.Proposal) + if err != nil { + return dealID, nil, err + } + if !equal { + return dealID, nil, xerrors.Errorf("Deal proposals did not match") + } + } + return dealID, marketDeal, err +} + +func checkDealEquality(ctx context.Context, ts *types.TipSet, api getCurrentDealInfoAPI, p1, p2 market.DealProposal) (bool, error) { + p1ClientID, err := api.StateLookupID(ctx, p1.Client, ts.Key()) + if err != nil { + return false, err + } + p2ClientID, err := api.StateLookupID(ctx, p2.Client, ts.Key()) + if err != nil { + return false, err + } + return p1.PieceCID.Equals(p2.PieceCID) && + p1.PieceSize == p2.PieceSize && + p1.VerifiedDeal == p2.VerifiedDeal && + p1.Label == p2.Label && + p1.StartEpoch == p2.StartEpoch && + p1.EndEpoch == p2.EndEpoch && + p1.StoragePricePerEpoch.Equals(p2.StoragePricePerEpoch) && + p1.ProviderCollateral.Equals(p2.ProviderCollateral) && + p1.ClientCollateral.Equals(p2.ClientCollateral) && + p1.Provider == p2.Provider && + p1ClientID == p2ClientID, nil +} diff --git a/markets/storageadapter/getcurrentdealinfo_test.go b/markets/storageadapter/getcurrentdealinfo_test.go new file mode 100644 index 000000000..ed5d36c5b --- /dev/null +++ b/markets/storageadapter/getcurrentdealinfo_test.go @@ -0,0 +1,263 @@ +package storageadapter + +import ( + "bytes" + "errors" + "math/rand" + "testing" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + test "github.com/filecoin-project/lotus/chain/events/state/mock" + "github.com/filecoin-project/lotus/chain/types" + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" + "golang.org/x/net/context" + "golang.org/x/xerrors" +) + +var errNotFound = errors.New("Could not find") + +func TestGetCurrentDealInfo(t *testing.T) { + ctx := context.Background() + dummyCid, _ := cid.Parse("bafkqaaa") + startDealID := abi.DealID(rand.Uint64()) + newDealID := abi.DealID(rand.Uint64()) + twoValuesReturn := makePublishDealsReturnBytes(t, []abi.DealID{abi.DealID(rand.Uint64()), abi.DealID(rand.Uint64())}) + sameValueReturn := makePublishDealsReturnBytes(t, []abi.DealID{startDealID}) + newValueReturn := makePublishDealsReturnBytes(t, []abi.DealID{newDealID}) + proposal := market.DealProposal{ + PieceCID: dummyCid, + PieceSize: abi.PaddedPieceSize(rand.Uint64()), + Label: "success", + } + otherProposal := market.DealProposal{ + PieceCID: dummyCid, + PieceSize: abi.PaddedPieceSize(rand.Uint64()), + Label: "other", + } + successDeal := &api.MarketDeal{ + Proposal: proposal, + State: market.DealState{ + SectorStartEpoch: 1, + LastUpdatedEpoch: 2, + }, + } + otherDeal := &api.MarketDeal{ + Proposal: otherProposal, + State: market.DealState{ + SectorStartEpoch: 1, + LastUpdatedEpoch: 2, + }, + } + testCases := map[string]struct { + searchMessageLookup *api.MsgLookup + searchMessageErr error + marketDeals map[abi.DealID]*api.MarketDeal + publishCid *cid.Cid + expectedDealID abi.DealID + expectedMarketDeal *api.MarketDeal + expectedError error + }{ + "deal lookup succeeds": { + marketDeals: map[abi.DealID]*api.MarketDeal{ + startDealID: successDeal, + }, + expectedDealID: startDealID, + expectedMarketDeal: successDeal, + }, + "publish CID = nil": { + expectedDealID: startDealID, + expectedError: errNotFound, + }, + "publish CID = nil, other deal on lookup": { + marketDeals: map[abi.DealID]*api.MarketDeal{ + startDealID: otherDeal, + }, + expectedDealID: startDealID, + expectedError: xerrors.Errorf("Deal proposals did not match"), + }, + "search message fails": { + publishCid: &dummyCid, + searchMessageErr: errors.New("something went wrong"), + expectedDealID: startDealID, + expectedError: errors.New("something went wrong"), + }, + "return code not ok": { + publishCid: &dummyCid, + searchMessageLookup: &api.MsgLookup{ + Receipt: types.MessageReceipt{ + ExitCode: exitcode.ErrIllegalState, + }, + }, + expectedDealID: startDealID, + expectedError: xerrors.Errorf("looking for publish deal message %s: non-ok exit code: %s", dummyCid, exitcode.ErrIllegalState), + }, + "unable to unmarshal params": { + publishCid: &dummyCid, + searchMessageLookup: &api.MsgLookup{ + Receipt: types.MessageReceipt{ + ExitCode: exitcode.Ok, + Return: []byte("applesauce"), + }, + }, + expectedDealID: startDealID, + expectedError: xerrors.Errorf("looking for publish deal message: unmarshaling message return: cbor input should be of type array"), + }, + "more than one returned id": { + publishCid: &dummyCid, + searchMessageLookup: &api.MsgLookup{ + Receipt: types.MessageReceipt{ + ExitCode: exitcode.Ok, + Return: twoValuesReturn, + }, + }, + expectedDealID: startDealID, + expectedError: xerrors.Errorf("can't recover dealIDs from publish deal message with more than 1 deal"), + }, + "deal ids still match": { + publishCid: &dummyCid, + searchMessageLookup: &api.MsgLookup{ + Receipt: types.MessageReceipt{ + ExitCode: exitcode.Ok, + Return: sameValueReturn, + }, + }, + expectedDealID: startDealID, + expectedError: errNotFound, + }, + "new deal id success": { + publishCid: &dummyCid, + searchMessageLookup: &api.MsgLookup{ + Receipt: types.MessageReceipt{ + ExitCode: exitcode.Ok, + Return: newValueReturn, + }, + }, + marketDeals: map[abi.DealID]*api.MarketDeal{ + newDealID: successDeal, + }, + expectedDealID: newDealID, + expectedMarketDeal: successDeal, + }, + "new deal id after other deal found": { + publishCid: &dummyCid, + searchMessageLookup: &api.MsgLookup{ + Receipt: types.MessageReceipt{ + ExitCode: exitcode.Ok, + Return: newValueReturn, + }, + }, + marketDeals: map[abi.DealID]*api.MarketDeal{ + startDealID: otherDeal, + newDealID: successDeal, + }, + expectedDealID: newDealID, + expectedMarketDeal: successDeal, + }, + "new deal id failure": { + publishCid: &dummyCid, + searchMessageLookup: &api.MsgLookup{ + Receipt: types.MessageReceipt{ + ExitCode: exitcode.Ok, + Return: newValueReturn, + }, + }, + expectedDealID: newDealID, + expectedError: errNotFound, + }, + "new deal id, failure due to other deal present": { + publishCid: &dummyCid, + searchMessageLookup: &api.MsgLookup{ + Receipt: types.MessageReceipt{ + ExitCode: exitcode.Ok, + Return: newValueReturn, + }, + }, + marketDeals: map[abi.DealID]*api.MarketDeal{ + newDealID: otherDeal, + }, + expectedDealID: newDealID, + expectedError: xerrors.Errorf("Deal proposals did not match"), + }, + } + runTestCase := func(testCase string, data struct { + searchMessageLookup *api.MsgLookup + searchMessageErr error + marketDeals map[abi.DealID]*api.MarketDeal + publishCid *cid.Cid + expectedDealID abi.DealID + expectedMarketDeal *api.MarketDeal + expectedError error + }) { + t.Run(testCase, func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + ts, err := test.MockTipset(address.TestAddress, rand.Uint64()) + require.NoError(t, err) + marketDeals := make(map[marketDealKey]*api.MarketDeal) + for dealID, deal := range data.marketDeals { + marketDeals[marketDealKey{dealID, ts.Key()}] = deal + } + api := &mockGetCurrentDealInfoAPI{ + SearchMessageLookup: data.searchMessageLookup, + SearchMessageErr: data.searchMessageErr, + MarketDeals: marketDeals, + } + + dealID, marketDeal, err := GetCurrentDealInfo(ctx, ts, api, startDealID, proposal, data.publishCid) + require.Equal(t, data.expectedDealID, dealID) + require.Equal(t, data.expectedMarketDeal, marketDeal) + if data.expectedError == nil { + require.NoError(t, err) + } else { + require.EqualError(t, err, data.expectedError.Error()) + } + }) + } + for testCase, data := range testCases { + runTestCase(testCase, data) + } +} + +type marketDealKey struct { + abi.DealID + types.TipSetKey +} + +type mockGetCurrentDealInfoAPI struct { + SearchMessageLookup *api.MsgLookup + SearchMessageErr error + + MarketDeals map[marketDealKey]*api.MarketDeal +} + +func (mapi *mockGetCurrentDealInfoAPI) StateMarketStorageDeal(ctx context.Context, dealID abi.DealID, ts types.TipSetKey) (*api.MarketDeal, error) { + deal, ok := mapi.MarketDeals[marketDealKey{dealID, ts}] + if !ok { + return nil, errNotFound + } + return deal, nil +} + +func (mapi *mockGetCurrentDealInfoAPI) StateSearchMsg(context.Context, cid.Cid) (*api.MsgLookup, error) { + return mapi.SearchMessageLookup, mapi.SearchMessageErr +} + +func (mapi *mockGetCurrentDealInfoAPI) StateLookupID(ctx context.Context, addr address.Address, ts types.TipSetKey) (address.Address, error) { + return addr, nil +} + +func makePublishDealsReturnBytes(t *testing.T, dealIDs []abi.DealID) []byte { + buf := new(bytes.Buffer) + dealsReturn := market.PublishStorageDealsReturn{ + IDs: dealIDs, + } + err := dealsReturn.MarshalCBOR(buf) + require.NoError(t, err) + return buf.Bytes() +} diff --git a/markets/storageadapter/ondealsectorcommitted.go b/markets/storageadapter/ondealsectorcommitted.go new file mode 100644 index 000000000..59e649147 --- /dev/null +++ b/markets/storageadapter/ondealsectorcommitted.go @@ -0,0 +1,134 @@ +package storageadapter + +import ( + "bytes" + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/events" + "github.com/filecoin-project/lotus/chain/types" + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" +) + +type sectorCommittedEventsAPI interface { + Called(check events.CheckFunc, msgHnd events.MsgHandler, rev events.RevertHandler, confidence int, timeout abi.ChainEpoch, mf events.MsgMatchFunc) error +} + +func OnDealSectorCommitted(ctx context.Context, api getCurrentDealInfoAPI, eventsApi sectorCommittedEventsAPI, provider address.Address, dealID abi.DealID, proposal market.DealProposal, publishCid *cid.Cid, cb storagemarket.DealSectorCommittedCallback) error { + checkFunc := func(ts *types.TipSet) (done bool, more bool, err error) { + newDealID, sd, err := GetCurrentDealInfo(ctx, ts, api, dealID, proposal, publishCid) + if err != nil { + // TODO: This may be fine for some errors + return false, false, xerrors.Errorf("failed to look up deal on chain: %w", err) + } + dealID = newDealID + + if sd.State.SectorStartEpoch > 0 { + cb(nil) + return true, false, nil + } + + return false, true, nil + } + + var sectorNumber abi.SectorNumber + var sectorFound bool + + called := func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (more bool, err error) { + defer func() { + if err != nil { + cb(xerrors.Errorf("handling applied event: %w", err)) + } + }() + switch msg.Method { + case miner.Methods.PreCommitSector: + var params miner.SectorPreCommitInfo + if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { + return false, xerrors.Errorf("unmarshal pre commit: %w", err) + } + + dealID, _, err = GetCurrentDealInfo(ctx, ts, api, dealID, proposal, publishCid) + if err != nil { + return false, err + } + + for _, did := range params.DealIDs { + if did == dealID { + sectorNumber = params.SectorNumber + sectorFound = true + return true, nil + } + } + return true, nil + case miner.Methods.ProveCommitSector: + if msg == nil { + log.Error("timed out waiting for deal activation... what now?") + return false, nil + } + + _, sd, err := GetCurrentDealInfo(ctx, ts, api, dealID, proposal, publishCid) + if err != nil { + return false, xerrors.Errorf("failed to look up deal on chain: %w", err) + } + + if sd.State.SectorStartEpoch < 1 { + return false, xerrors.Errorf("deal wasn't active: deal=%d, parentState=%s, h=%d", dealID, ts.ParentState(), ts.Height()) + } + + log.Infof("Storage deal %d activated at epoch %d", dealID, sd.State.SectorStartEpoch) + + cb(nil) + + return false, nil + default: + return false, nil + } + } + + revert := func(ctx context.Context, ts *types.TipSet) error { + log.Warn("deal activation reverted; TODO: actually handle this!") + // TODO: Just go back to DealSealing? + return nil + } + + matchEvent := func(msg *types.Message) (matched bool, err error) { + if msg.To != provider { + return false, nil + } + + switch msg.Method { + case miner.Methods.PreCommitSector: + return !sectorFound, nil + case miner.Methods.ProveCommitSector: + if !sectorFound { + return false, nil + } + + var params miner.ProveCommitSectorParams + if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { + return false, xerrors.Errorf("failed to unmarshal prove commit sector params: %w", err) + } + + if params.SectorNumber != sectorNumber { + return false, nil + } + + return true, nil + default: + return false, nil + } + + } + + if err := eventsApi.Called(checkFunc, called, revert, int(build.MessageConfidence+1), events.NoTimeout, matchEvent); err != nil { + return xerrors.Errorf("failed to set up called handler: %w", err) + } + + return nil +} diff --git a/markets/storageadapter/ondealsectorcommitted_test.go b/markets/storageadapter/ondealsectorcommitted_test.go new file mode 100644 index 000000000..8bab7c3d4 --- /dev/null +++ b/markets/storageadapter/ondealsectorcommitted_test.go @@ -0,0 +1,372 @@ +package storageadapter + +import ( + "bytes" + "context" + "errors" + "fmt" + "math/rand" + "testing" + + blocks "github.com/ipfs/go-block-format" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/cbor" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/events" + test "github.com/filecoin-project/lotus/chain/events/state/mock" + "github.com/filecoin-project/lotus/chain/types" + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" +) + +func TestOnDealSectorCommitted(t *testing.T) { + provider := address.TestAddress + ctx := context.Background() + publishCid := generateCids(1)[0] + sealedCid := generateCids(1)[0] + pieceCid := generateCids(1)[0] + startDealID := abi.DealID(rand.Uint64()) + newDealID := abi.DealID(rand.Uint64()) + newValueReturn := makePublishDealsReturnBytes(t, []abi.DealID{newDealID}) + sectorNumber := abi.SectorNumber(rand.Uint64()) + proposal := market.DealProposal{ + PieceCID: pieceCid, + PieceSize: abi.PaddedPieceSize(rand.Uint64()), + Label: "success", + } + unfinishedDeal := &api.MarketDeal{ + Proposal: proposal, + State: market.DealState{ + SectorStartEpoch: -1, + LastUpdatedEpoch: 2, + }, + } + successDeal := &api.MarketDeal{ + Proposal: proposal, + State: market.DealState{ + SectorStartEpoch: 1, + LastUpdatedEpoch: 2, + }, + } + testCases := map[string]struct { + searchMessageLookup *api.MsgLookup + searchMessageErr error + checkTsDeals map[abi.DealID]*api.MarketDeal + matchStates []matchState + expectedCBCallCount uint64 + expectedCBError error + expectedError error + }{ + "normal sequence": { + checkTsDeals: map[abi.DealID]*api.MarketDeal{ + startDealID: unfinishedDeal, + }, + matchStates: []matchState{ + { + msg: makeMessage(t, provider, miner.Methods.PreCommitSector, &miner.SectorPreCommitInfo{ + SectorNumber: sectorNumber, + SealedCID: sealedCid, + DealIDs: []abi.DealID{startDealID}, + }), + deals: map[abi.DealID]*api.MarketDeal{ + startDealID: unfinishedDeal, + }, + }, + { + msg: makeMessage(t, provider, miner.Methods.ProveCommitSector, &miner.ProveCommitSectorParams{ + SectorNumber: sectorNumber, + }), + deals: map[abi.DealID]*api.MarketDeal{ + startDealID: successDeal, + }, + }, + }, + expectedCBCallCount: 1, + }, + "deal id changes in called": { + searchMessageLookup: &api.MsgLookup{ + Receipt: types.MessageReceipt{ + ExitCode: exitcode.Ok, + Return: newValueReturn, + }, + }, + checkTsDeals: map[abi.DealID]*api.MarketDeal{ + newDealID: unfinishedDeal, + }, + matchStates: []matchState{ + { + msg: makeMessage(t, provider, miner.Methods.PreCommitSector, &miner.SectorPreCommitInfo{ + SectorNumber: sectorNumber, + SealedCID: sealedCid, + DealIDs: []abi.DealID{newDealID}, + }), + deals: map[abi.DealID]*api.MarketDeal{ + newDealID: unfinishedDeal, + }, + }, + { + msg: makeMessage(t, provider, miner.Methods.ProveCommitSector, &miner.ProveCommitSectorParams{ + SectorNumber: sectorNumber, + }), + deals: map[abi.DealID]*api.MarketDeal{ + newDealID: successDeal, + }, + }, + }, + expectedCBCallCount: 1, + }, + "deal id changes in precommit": { + searchMessageLookup: &api.MsgLookup{ + Receipt: types.MessageReceipt{ + ExitCode: exitcode.Ok, + Return: newValueReturn, + }, + }, + checkTsDeals: map[abi.DealID]*api.MarketDeal{ + startDealID: unfinishedDeal, + }, + matchStates: []matchState{ + { + msg: makeMessage(t, provider, miner.Methods.PreCommitSector, &miner.SectorPreCommitInfo{ + SectorNumber: sectorNumber, + SealedCID: sealedCid, + DealIDs: []abi.DealID{newDealID}, + }), + deals: map[abi.DealID]*api.MarketDeal{ + newDealID: unfinishedDeal, + }, + }, + { + msg: makeMessage(t, provider, miner.Methods.ProveCommitSector, &miner.ProveCommitSectorParams{ + SectorNumber: sectorNumber, + }), + deals: map[abi.DealID]*api.MarketDeal{ + newDealID: successDeal, + }, + }, + }, + expectedCBCallCount: 1, + }, + "deal id changes in prove-commit": { + searchMessageLookup: &api.MsgLookup{ + Receipt: types.MessageReceipt{ + ExitCode: exitcode.Ok, + Return: newValueReturn, + }, + }, + checkTsDeals: map[abi.DealID]*api.MarketDeal{ + startDealID: unfinishedDeal, + }, + matchStates: []matchState{ + { + msg: makeMessage(t, provider, miner.Methods.PreCommitSector, &miner.SectorPreCommitInfo{ + SectorNumber: sectorNumber, + SealedCID: sealedCid, + DealIDs: []abi.DealID{startDealID}, + }), + deals: map[abi.DealID]*api.MarketDeal{ + startDealID: unfinishedDeal, + }, + }, + { + msg: makeMessage(t, provider, miner.Methods.ProveCommitSector, &miner.ProveCommitSectorParams{ + SectorNumber: sectorNumber, + }), + deals: map[abi.DealID]*api.MarketDeal{ + newDealID: successDeal, + }, + }, + }, + expectedCBCallCount: 1, + }, + "prove commit but no sector recorded": { + checkTsDeals: map[abi.DealID]*api.MarketDeal{ + startDealID: unfinishedDeal, + }, + matchStates: []matchState{ + { + msg: makeMessage(t, provider, miner.Methods.ProveCommitSector, &miner.ProveCommitSectorParams{ + SectorNumber: sectorNumber, + }), + deals: map[abi.DealID]*api.MarketDeal{ + startDealID: successDeal, + }, + }, + }, + expectedCBCallCount: 0, + }, + "error on deal in check": { + checkTsDeals: map[abi.DealID]*api.MarketDeal{}, + searchMessageErr: errors.New("something went wrong"), + expectedCBCallCount: 0, + expectedError: errors.New("failed to set up called handler: failed to look up deal on chain: something went wrong"), + }, + "sector start epoch > 0 in check": { + checkTsDeals: map[abi.DealID]*api.MarketDeal{ + startDealID: successDeal, + }, + expectedCBCallCount: 1, + }, + "error on deal in pre-commit": { + searchMessageErr: errors.New("something went wrong"), + checkTsDeals: map[abi.DealID]*api.MarketDeal{ + startDealID: unfinishedDeal, + }, + matchStates: []matchState{ + { + msg: makeMessage(t, provider, miner.Methods.PreCommitSector, &miner.SectorPreCommitInfo{ + SectorNumber: sectorNumber, + SealedCID: sealedCid, + DealIDs: []abi.DealID{startDealID}, + }), + deals: map[abi.DealID]*api.MarketDeal{}, + }, + }, + expectedCBCallCount: 1, + expectedCBError: errors.New("handling applied event: something went wrong"), + expectedError: errors.New("failed to set up called handler: something went wrong"), + }, + } + runTestCase := func(testCase string, data struct { + searchMessageLookup *api.MsgLookup + searchMessageErr error + checkTsDeals map[abi.DealID]*api.MarketDeal + matchStates []matchState + expectedCBCallCount uint64 + expectedCBError error + expectedError error + }) { + t.Run(testCase, func(t *testing.T) { + // ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + // defer cancel() + api := &mockGetCurrentDealInfoAPI{ + SearchMessageLookup: data.searchMessageLookup, + SearchMessageErr: data.searchMessageErr, + MarketDeals: make(map[marketDealKey]*api.MarketDeal), + } + checkTs, err := test.MockTipset(provider, rand.Uint64()) + require.NoError(t, err) + for dealID, deal := range data.checkTsDeals { + api.MarketDeals[marketDealKey{dealID, checkTs.Key()}] = deal + } + matchMessages := make([]matchMessage, len(data.matchStates)) + for i, ms := range data.matchStates { + matchTs, err := test.MockTipset(provider, rand.Uint64()) + require.NoError(t, err) + for dealID, deal := range ms.deals { + api.MarketDeals[marketDealKey{dealID, matchTs.Key()}] = deal + } + matchMessages[i] = matchMessage{ + curH: 5, + msg: ms.msg, + msgReceipt: nil, + ts: matchTs, + } + } + eventsAPI := &fakeEvents{ + Ctx: ctx, + CheckTs: checkTs, + MatchMessages: matchMessages, + } + cbCallCount := uint64(0) + var cbError error + cb := func(err error) { + cbCallCount++ + cbError = err + } + err = OnDealSectorCommitted(ctx, api, eventsAPI, provider, startDealID, proposal, &publishCid, cb) + if data.expectedError == nil { + require.NoError(t, err) + } else { + require.EqualError(t, err, data.expectedError.Error()) + } + require.Equal(t, data.expectedCBCallCount, cbCallCount) + if data.expectedCBError == nil { + require.NoError(t, cbError) + } else { + require.EqualError(t, cbError, data.expectedCBError.Error()) + } + }) + } + for testCase, data := range testCases { + runTestCase(testCase, data) + } +} + +type matchState struct { + msg *types.Message + deals map[abi.DealID]*api.MarketDeal +} + +type matchMessage struct { + curH abi.ChainEpoch + msg *types.Message + msgReceipt *types.MessageReceipt + ts *types.TipSet + doesRevert bool +} +type fakeEvents struct { + Ctx context.Context + CheckTs *types.TipSet + MatchMessages []matchMessage +} + +func (fe *fakeEvents) Called(check events.CheckFunc, msgHnd events.MsgHandler, rev events.RevertHandler, confidence int, timeout abi.ChainEpoch, mf events.MsgMatchFunc) error { + _, more, err := check(fe.CheckTs) + if err != nil { + return err + } + if !more { + return nil + } + for _, matchMessage := range fe.MatchMessages { + matched, err := mf(matchMessage.msg) + if err != nil { + return err + } + if matched { + more, err := msgHnd(matchMessage.msg, matchMessage.msgReceipt, matchMessage.ts, matchMessage.curH) + if err != nil { + return err + } + if matchMessage.doesRevert { + err := rev(fe.Ctx, matchMessage.ts) + if err != nil { + return err + } + } + if !more { + return nil + } + } + } + return nil +} + +func makeMessage(t *testing.T, to address.Address, method abi.MethodNum, params cbor.Marshaler) *types.Message { + buf := new(bytes.Buffer) + err := params.MarshalCBOR(buf) + require.NoError(t, err) + return &types.Message{ + To: to, + Method: method, + Params: buf.Bytes(), + } +} + +var seq int + +func generateCids(n int) []cid.Cid { + cids := make([]cid.Cid, 0, n) + for i := 0; i < n; i++ { + c := blocks.NewBlock([]byte(fmt.Sprint(seq))).Cid() + seq++ + cids = append(cids, c) + } + return cids +} diff --git a/markets/storageadapter/provider.go b/markets/storageadapter/provider.go index 82c3a278f..cfff6cfe1 100644 --- a/markets/storageadapter/provider.go +++ b/markets/storageadapter/provider.go @@ -3,7 +3,6 @@ package storageadapter // this file implements storagemarket.StorageProviderNode import ( - "bytes" "context" "io" "time" @@ -25,7 +24,6 @@ import ( "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/builtin/market" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/events" "github.com/filecoin-project/lotus/chain/events/state" "github.com/filecoin-project/lotus/chain/types" @@ -62,7 +60,7 @@ func NewProviderNodeAdapter(fc *config.MinerFeeConfig) func(dag dtypes.StagingDA dag: dag, secb: secb, ev: events.NewEvents(context.TODO(), full), - dsMatcher: newDealStateMatcher(state.NewStatePredicates(full)), + dsMatcher: newDealStateMatcher(state.NewStatePredicates(state.WrapFastAPI(full))), } if fc != nil { na.publishSpec = &api.MessageSendSpec{MaxFee: abi.TokenAmount(fc.MaxPublishDealsFee)} @@ -169,6 +167,19 @@ func (n *ProviderNodeAdapter) GetMinerWorkerAddress(ctx context.Context, miner a return mi.Worker, nil } +func (n *ProviderNodeAdapter) GetProofType(ctx context.Context, miner address.Address, tok shared.TipSetToken) (abi.RegisteredSealProof, error) { + tsk, err := types.TipSetKeyFromBytes(tok) + if err != nil { + return 0, err + } + + mi, err := n.StateMinerInfo(ctx, miner, tsk) + if err != nil { + return 0, err + } + return mi.SealProofType, nil +} + func (n *ProviderNodeAdapter) SignBytes(ctx context.Context, signer address.Address, b []byte) (*crypto.Signature, error) { signer, err := n.StateAccountKey(ctx, signer, types.EmptyTSK) if err != nil { @@ -182,8 +193,12 @@ func (n *ProviderNodeAdapter) SignBytes(ctx context.Context, signer address.Addr return localSignature, nil } -func (n *ProviderNodeAdapter) EnsureFunds(ctx context.Context, addr, wallet address.Address, amt abi.TokenAmount, encodedTs shared.TipSetToken) (cid.Cid, error) { - return n.MarketEnsureAvailable(ctx, addr, wallet, amt) +func (n *ProviderNodeAdapter) ReserveFunds(ctx context.Context, wallet, addr address.Address, amt abi.TokenAmount) (cid.Cid, error) { + return n.MarketReserveFunds(ctx, wallet, addr, amt) +} + +func (n *ProviderNodeAdapter) ReleaseFunds(ctx context.Context, addr address.Address, amt abi.TokenAmount) error { + return n.MarketReleaseFunds(ctx, addr, amt) } // Adds funds with the StorageMinerActor for a storage participant. Used by both providers and clients. @@ -255,107 +270,8 @@ func (n *ProviderNodeAdapter) DealProviderCollateralBounds(ctx context.Context, return bounds.Min, bounds.Max, nil } -func (n *ProviderNodeAdapter) OnDealSectorCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, cb storagemarket.DealSectorCommittedCallback) error { - checkFunc := func(ts *types.TipSet) (done bool, more bool, err error) { - sd, err := n.StateMarketStorageDeal(ctx, dealID, ts.Key()) - - if err != nil { - // TODO: This may be fine for some errors - return false, false, xerrors.Errorf("failed to look up deal on chain: %w", err) - } - - if sd.State.SectorStartEpoch > 0 { - cb(nil) - return true, false, nil - } - - return false, true, nil - } - - called := func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (more bool, err error) { - defer func() { - if err != nil { - cb(xerrors.Errorf("handling applied event: %w", err)) - } - }() - - if msg == nil { - log.Error("timed out waiting for deal activation... what now?") - return false, nil - } - - sd, err := n.StateMarketStorageDeal(ctx, dealID, ts.Key()) - if err != nil { - return false, xerrors.Errorf("failed to look up deal on chain: %w", err) - } - - if sd.State.SectorStartEpoch < 1 { - return false, xerrors.Errorf("deal wasn't active: deal=%d, parentState=%s, h=%d", dealID, ts.ParentState(), ts.Height()) - } - - log.Infof("Storage deal %d activated at epoch %d", dealID, sd.State.SectorStartEpoch) - - cb(nil) - - return false, nil - } - - revert := func(ctx context.Context, ts *types.TipSet) error { - log.Warn("deal activation reverted; TODO: actually handle this!") - // TODO: Just go back to DealSealing? - return nil - } - - var sectorNumber abi.SectorNumber - var sectorFound bool - - matchEvent := func(msg *types.Message) (matched bool, err error) { - if msg.To != provider { - return false, nil - } - - switch msg.Method { - case miner.Methods.PreCommitSector: - var params miner.SectorPreCommitInfo - if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { - return false, xerrors.Errorf("unmarshal pre commit: %w", err) - } - - for _, did := range params.DealIDs { - if did == dealID { - sectorNumber = params.SectorNumber - sectorFound = true - return false, nil - } - } - - return false, nil - case miner.Methods.ProveCommitSector: - var params miner.ProveCommitSectorParams - if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { - return false, xerrors.Errorf("failed to unmarshal prove commit sector params: %w", err) - } - - if !sectorFound { - return false, nil - } - - if params.SectorNumber != sectorNumber { - return false, nil - } - - return true, nil - default: - return false, nil - } - - } - - if err := n.ev.Called(checkFunc, called, revert, int(build.MessageConfidence+1), events.NoTimeout, matchEvent); err != nil { - return xerrors.Errorf("failed to set up called handler: %w", err) - } - - return nil +func (n *ProviderNodeAdapter) OnDealSectorCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, proposal market2.DealProposal, publishCid *cid.Cid, cb storagemarket.DealSectorCommittedCallback) error { + return OnDealSectorCommitted(ctx, n, n.ev, provider, dealID, market.DealProposal(proposal), publishCid, cb) } func (n *ProviderNodeAdapter) GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) { diff --git a/metrics/metrics.go b/metrics/metrics.go index 33d9e9174..9f0cad27f 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -53,6 +53,8 @@ var ( PubsubSendRPC = stats.Int64("pubsub/send_rpc", "Counter for total sent RPCs", stats.UnitDimensionless) PubsubDropRPC = stats.Int64("pubsub/drop_rpc", "Counter for total dropped RPCs", stats.UnitDimensionless) APIRequestDuration = stats.Float64("api/request_duration_ms", "Duration of API requests", stats.UnitMilliseconds) + VMFlushCopyDuration = stats.Float64("vm/flush_copy_ms", "Time spent in VM Flush Copy", stats.UnitMilliseconds) + VMFlushCopyCount = stats.Int64("vm/flush_copy_count", "Number of copied objects", stats.UnitDimensionless) ) var ( @@ -146,6 +148,14 @@ var ( Aggregation: defaultMillisecondsDistribution, TagKeys: []tag.Key{APIInterface, Endpoint}, } + VMFlushCopyDurationView = &view.View{ + Measure: VMFlushCopyDuration, + Aggregation: view.Sum(), + } + VMFlushCopyCountView = &view.View{ + Measure: VMFlushCopyCount, + Aggregation: view.Sum(), + } ) // DefaultViews is an array of OpenCensus views for metric gathering purposes @@ -171,6 +181,8 @@ var DefaultViews = append([]*view.View{ PubsubSendRPCView, PubsubDropRPCView, APIRequestDurationView, + VMFlushCopyCountView, + VMFlushCopyDurationView, }, rpcmetrics.DefaultViews...) diff --git a/miner/miner.go b/miner/miner.go index f2468a911..cabf26ecd 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -150,6 +150,8 @@ func (m *Miner) mine(ctx context.Context) { ctx, span := trace.StartSpan(ctx, "/mine") defer span.End() + go m.doWinPoStWarmup(ctx) + var lastBase MiningBase minerLoop: for { diff --git a/miner/warmup.go b/miner/warmup.go new file mode 100644 index 000000000..991679c09 --- /dev/null +++ b/miner/warmup.go @@ -0,0 +1,84 @@ +package miner + +import ( + "context" + "crypto/rand" + "math" + "time" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + + proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + + "github.com/filecoin-project/lotus/chain/types" +) + +func (m *Miner) winPoStWarmup(ctx context.Context) error { + deadlines, err := m.api.StateMinerDeadlines(ctx, m.address, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting deadlines: %w", err) + } + + var sector abi.SectorNumber = math.MaxUint64 + +out: + for dlIdx := range deadlines { + partitions, err := m.api.StateMinerPartitions(ctx, m.address, uint64(dlIdx), types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting partitions for deadline %d: %w", dlIdx, err) + } + + for _, partition := range partitions { + b, err := partition.ActiveSectors.First() + if err == bitfield.ErrNoBitsSet { + continue + } + if err != nil { + return err + } + + sector = abi.SectorNumber(b) + break out + } + } + + if sector == math.MaxUint64 { + log.Info("skipping winning PoSt warmup, no sectors") + return nil + } + + log.Infow("starting winning PoSt warmup", "sector", sector) + start := time.Now() + + var r abi.PoStRandomness = make([]byte, abi.RandomnessLength) + _, _ = rand.Read(r) + + si, err := m.api.StateSectorGetInfo(ctx, m.address, sector, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting sector info: %w", err) + } + + _, err = m.epp.ComputeProof(ctx, []proof2.SectorInfo{ + { + SealProof: si.SealProof, + SectorNumber: sector, + SealedCID: si.SealedCID, + }, + }, r) + if err != nil { + return xerrors.Errorf("failed to compute proof: %w", err) + } + + log.Infow("winning PoSt warmup successful", "took", time.Now().Sub(start)) + return nil +} + +func (m *Miner) doWinPoStWarmup(ctx context.Context) { + err := m.winPoStWarmup(ctx) + if err != nil { + log.Errorw("winning PoSt warmup failed", "error", err) + } +} diff --git a/node/builder.go b/node/builder.go index 05409df04..8c49e44ad 100644 --- a/node/builder.go +++ b/node/builder.go @@ -3,8 +3,12 @@ package node import ( "context" "errors" + "os" "time" + metricsi "github.com/ipfs/go-metrics-interface" + + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/chain" "github.com/filecoin-project/lotus/chain/exchange" "github.com/filecoin-project/lotus/chain/store" @@ -52,7 +56,6 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/storiface" sealing "github.com/filecoin-project/lotus/extern/storage-sealing" "github.com/filecoin-project/lotus/journal" - "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/lib/peermgr" _ "github.com/filecoin-project/lotus/lib/sigs/bls" _ "github.com/filecoin-project/lotus/lib/sigs/secp" @@ -122,11 +125,12 @@ const ( HandleIncomingBlocksKey HandleIncomingMessagesKey - + HandleMigrateClientFundsKey HandlePaymentChannelManagerKey // miner GetParamsKey + HandleMigrateProviderFundsKey HandleDealsKey HandleRetrievalKey RunSectorServiceKey @@ -136,6 +140,7 @@ const ( HeadMetricsKey SettlePaymentChannelsKey RunPeerTaggerKey + SetupFallbackBlockstoreKey SetApiEndpointKey @@ -167,7 +172,10 @@ func defaults() []Option { Override(new(journal.DisabledEvents), journal.EnvDisabledEvents), Override(new(journal.Journal), modules.OpenFilesystemJournal), - Override(new(helpers.MetricsCtx), context.Background), + Override(new(helpers.MetricsCtx), func() context.Context { + return metricsi.CtxScope(context.Background(), "lotus") + }), + Override(new(record.Validator), modules.RecordValidator), Override(new(dtypes.Bootstrapper), dtypes.Bootstrapper(false)), Override(new(dtypes.ShutdownChan), make(chan struct{})), @@ -256,8 +264,6 @@ func Online() Option { Override(new(api.WalletAPI), From(new(wallet.MultiWallet))), Override(new(*messagesigner.MessageSigner), messagesigner.NewMessageSigner), - Override(new(dtypes.ChainGCLocker), blockstore.NewGCLocker), - Override(new(dtypes.ChainGCBlockstore), modules.ChainGCBlockstore), Override(new(dtypes.ChainBitswap), modules.ChainBitswap), Override(new(dtypes.ChainBlockService), modules.ChainBlockService), @@ -287,14 +293,14 @@ func Online() Option { Override(new(retrievalmarket.RetrievalClient), modules.RetrievalClient), Override(new(dtypes.ClientDatastore), modules.NewClientDatastore), Override(new(dtypes.ClientDataTransfer), modules.NewClientGraphsyncDataTransfer), - Override(new(modules.ClientDealFunds), modules.NewClientDealFunds), Override(new(storagemarket.StorageClient), modules.StorageClient), Override(new(storagemarket.StorageClientNode), storageadapter.NewClientNodeAdapter), Override(new(beacon.Schedule), modules.RandomSchedule), Override(new(*paychmgr.Store), paychmgr.NewStore), Override(new(*paychmgr.Manager), paychmgr.NewManager), - Override(new(*market.FundMgr), market.StartFundManager), + Override(new(*market.FundManager), market.NewFundManager), + Override(HandleMigrateClientFundsKey, modules.HandleMigrateClientFunds), Override(HandlePaymentChannelManagerKey, paychmgr.HandleManager), Override(SettlePaymentChannelsKey, settler.SettlePaymentChannels), ), @@ -334,7 +340,7 @@ func Online() Option { Override(new(stores.SectorIndex), From(new(*stores.Index))), Override(new(dtypes.MinerID), modules.MinerID), Override(new(dtypes.MinerAddress), modules.MinerAddress), - Override(new(*ffiwrapper.Config), modules.ProofsConfig), + Override(new(abi.RegisteredSealProof), modules.SealProofType), Override(new(stores.LocalStorage), From(new(repo.LockedRepo))), Override(new(sealing.SectorIDCounter), modules.SectorIDCounter), Override(new(*sectorstorage.Manager), modules.SectorStorage), @@ -358,9 +364,9 @@ func Online() Option { Override(new(*storedask.StoredAsk), modules.NewStorageAsk), Override(new(dtypes.StorageDealFilter), modules.BasicDealFilter(nil)), Override(new(dtypes.RetrievalDealFilter), modules.RetrievalDealFilter(nil)), - Override(new(modules.ProviderDealFunds), modules.NewProviderDealFunds), Override(new(storagemarket.StorageProvider), modules.StorageProvider), Override(new(storagemarket.StorageProviderNode), storageadapter.NewProviderNodeAdapter(nil)), + Override(HandleMigrateProviderFundsKey, modules.HandleMigrateProviderFunds), Override(HandleRetrievalKey, modules.HandleRetrieval), Override(GetParamsKey, modules.GetParams), Override(HandleDealsKey, modules.HandleDeals), @@ -516,7 +522,13 @@ func Repo(r repo.Repo) Option { Override(new(repo.LockedRepo), modules.LockedRepo(lr)), // module handles closing Override(new(dtypes.MetadataDS), modules.Datastore), - Override(new(dtypes.ChainBlockstore), modules.ChainBlockstore), + Override(new(dtypes.ChainRawBlockstore), modules.ChainRawBlockstore), + Override(new(dtypes.ChainBlockstore), From(new(dtypes.ChainRawBlockstore))), + + If(os.Getenv("LOTUS_ENABLE_CHAINSTORE_FALLBACK") == "1", + Override(new(dtypes.ChainBlockstore), modules.FallbackChainBlockstore), + Override(SetupFallbackBlockstoreKey, modules.SetupFallbackBlockstore), + ), Override(new(dtypes.ClientImportMgr), modules.ClientImportMgr), Override(new(dtypes.ClientMultiDstore), modules.ClientMultiDatastore), diff --git a/node/hello/hello.go b/node/hello/hello.go index 05d53de06..d4c631206 100644 --- a/node/hello/hello.go +++ b/node/hello/hello.go @@ -118,7 +118,7 @@ func (hs *Service) HandleStream(s inet.Stream) { hs.h.ConnManager().TagPeer(s.Conn().RemotePeer(), "fcpeer", 10) // don't bother informing about genesis - log.Infof("Got new tipset through Hello: %s from %s", ts.Cids(), s.Conn().RemotePeer()) + log.Debugf("Got new tipset through Hello: %s from %s", ts.Cids(), s.Conn().RemotePeer()) hs.syncer.InformNewHead(s.Conn().RemotePeer(), ts) } @@ -161,7 +161,7 @@ func (hs *Service) SayHello(ctx context.Context, pid peer.ID) error { _ = s.SetReadDeadline(build.Clock.Now().Add(10 * time.Second)) err := cborutil.ReadCborRPC(s, lmsg) if err != nil { - log.Infow("reading latency message", "error", err) + log.Debugw("reading latency message", "error", err) } t3 := build.Clock.Now() @@ -177,7 +177,9 @@ func (hs *Service) SayHello(ctx context.Context, pid peer.ID) error { t2 := time.Unix(0, lmsg.TSent) offset := t0.Sub(t1) + t3.Sub(t2) offset /= 2 - log.Infow("time offset", "offset", offset.Seconds(), "peerid", pid.String()) + if offset > 5*time.Second || offset < -5*time.Second { + log.Infow("time offset", "offset", offset.Seconds(), "peerid", pid.String()) + } } } }() diff --git a/node/impl/client/client.go b/node/impl/client/client.go index 1e3374950..ef8e9e022 100644 --- a/node/impl/client/client.go +++ b/node/impl/client/client.go @@ -43,7 +43,6 @@ import ( "github.com/filecoin-project/go-multistore" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" marketevents "github.com/filecoin-project/lotus/markets/loggers" "github.com/filecoin-project/lotus/api" @@ -141,11 +140,6 @@ func (a *API) ClientStartDeal(ctx context.Context, params *api.StartDealParams) return nil, xerrors.Errorf("failed getting miner's deadline info: %w", err) } - rt, err := ffiwrapper.SealProofTypeFromSectorSize(mi.SectorSize) - if err != nil { - return nil, xerrors.Errorf("bad sector size: %w", err) - } - if uint64(params.Data.PieceSize.Padded()) > uint64(mi.SectorSize) { return nil, xerrors.New("data doesn't fit in a sector") } @@ -171,7 +165,7 @@ func (a *API) ClientStartDeal(ctx context.Context, params *api.StartDealParams) EndEpoch: calcDealExpiration(params.MinBlocksDuration, md, dealStart), Price: params.EpochPrice, Collateral: params.ProviderCollateral, - Rt: rt, + Rt: mi.SealProofType, FastRetrieval: params.FastRetrieval, VerifiedDeal: params.VerifiedDeal, StoreID: storeID, @@ -647,7 +641,7 @@ func (a *API) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Addre func (a *API) ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet, error) { - // Hard-code the sector size to 32GiB, because: + // Hard-code the sector type to 32GiBV1_1, because: // - pieceio.GeneratePieceCommitment requires a RegisteredSealProof // - commP itself is sector-size independent, with rather low probability of that changing // ( note how the final rust call is identical for every RegSP type ) @@ -655,12 +649,7 @@ func (a *API) ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet // // IF/WHEN this changes in the future we will have to be able to calculate // "old style" commP, and thus will need to introduce a version switch or similar - arbitrarySectorSize := abi.SectorSize(32 << 30) - - rt, err := ffiwrapper.SealProofTypeFromSectorSize(arbitrarySectorSize) - if err != nil { - return nil, xerrors.Errorf("bad sector size: %w", err) - } + arbitraryProofType := abi.RegisteredSealProof_StackedDrg32GiBV1_1 rdr, err := os.Open(inpath) if err != nil { @@ -673,7 +662,7 @@ func (a *API) ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet return nil, err } - commP, pieceSize, err := pieceio.GeneratePieceCommitment(rt, rdr, uint64(stat.Size())) + commP, pieceSize, err := pieceio.GeneratePieceCommitment(arbitraryProofType, rdr, uint64(stat.Size())) if err != nil { return nil, xerrors.Errorf("computing commP failed: %w", err) diff --git a/node/impl/full/gas.go b/node/impl/full/gas.go index 5d21121ee..13c344599 100644 --- a/node/impl/full/gas.go +++ b/node/impl/full/gas.go @@ -192,11 +192,19 @@ func gasEstimateGasPremium(cstore *store.ChainStore, nblocksincl uint64) (types. return premium, nil } -func (a *GasAPI) GasEstimateGasLimit(ctx context.Context, msgIn *types.Message, _ types.TipSetKey) (int64, error) { - return gasEstimateGasLimit(ctx, a.Chain, a.Stmgr, a.Mpool, msgIn) +func (a *GasAPI) GasEstimateGasLimit(ctx context.Context, msgIn *types.Message, tsk types.TipSetKey) (int64, error) { + ts, err := a.Chain.GetTipSetFromKey(tsk) + if err != nil { + return -1, xerrors.Errorf("getting tipset: %w", err) + } + return gasEstimateGasLimit(ctx, a.Chain, a.Stmgr, a.Mpool, msgIn, ts) } -func (m *GasModule) GasEstimateGasLimit(ctx context.Context, msgIn *types.Message, _ types.TipSetKey) (int64, error) { - return gasEstimateGasLimit(ctx, m.Chain, m.Stmgr, m.Mpool, msgIn) +func (m *GasModule) GasEstimateGasLimit(ctx context.Context, msgIn *types.Message, tsk types.TipSetKey) (int64, error) { + ts, err := m.Chain.GetTipSetFromKey(tsk) + if err != nil { + return -1, xerrors.Errorf("getting tipset: %w", err) + } + return gasEstimateGasLimit(ctx, m.Chain, m.Stmgr, m.Mpool, msgIn, ts) } func gasEstimateGasLimit( ctx context.Context, @@ -204,13 +212,13 @@ func gasEstimateGasLimit( smgr *stmgr.StateManager, mpool *messagepool.MessagePool, msgIn *types.Message, + currTs *types.TipSet, ) (int64, error) { msg := *msgIn msg.GasLimit = build.BlockGasLimit msg.GasFeeCap = types.NewInt(uint64(build.MinimumBaseFee) + 1) msg.GasPremium = types.NewInt(1) - currTs := cstore.GetHeaviestTipSet() fromA, err := smgr.ResolveToKeyAddress(ctx, msgIn.From, currTs) if err != nil { return -1, xerrors.Errorf("getting key address: %w", err) diff --git a/node/impl/full/state.go b/node/impl/full/state.go index 126ff0d7b..5e01eaa5b 100644 --- a/node/impl/full/state.go +++ b/node/impl/full/state.go @@ -123,7 +123,12 @@ func (a *StateAPI) StateMinerActiveSectors(ctx context.Context, maddr address.Ad } func (m *StateModule) StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error) { - act, err := m.StateManager.LoadActorTsk(ctx, actor, tsk) + ts, err := m.Chain.GetTipSetFromKey(tsk) + if err != nil { + return miner.MinerInfo{}, xerrors.Errorf("failed to load tipset: %w", err) + } + + act, err := m.StateManager.LoadActor(ctx, actor, ts) if err != nil { return miner.MinerInfo{}, xerrors.Errorf("failed to load miner actor: %w", err) } @@ -133,7 +138,16 @@ func (m *StateModule) StateMinerInfo(ctx context.Context, actor address.Address, return miner.MinerInfo{}, xerrors.Errorf("failed to load miner actor state: %w", err) } - return mas.Info() + // TODO: You know, this is terrible. + // I mean, we _really_ shouldn't do this. Maybe we should convert somewhere else? + info, err := mas.Info() + if err != nil { + return miner.MinerInfo{}, err + } + if m.StateManager.GetNtwkVersion(ctx, ts.Height()) >= network.Version7 && info.SealProofType < abi.RegisteredSealProof_StackedDrg2KiBV1_1 { + info.SealProofType += abi.RegisteredSealProof_StackedDrg2KiBV1_1 + } + return info, nil } func (a *StateAPI) StateMinerDeadlines(ctx context.Context, m address.Address, tsk types.TipSetKey) ([]api.Deadline, error) { diff --git a/node/impl/full/sync.go b/node/impl/full/sync.go index 05d4c9cb7..1a088fb77 100644 --- a/node/impl/full/sync.go +++ b/node/impl/full/sync.go @@ -37,13 +37,14 @@ func (a *SyncAPI) SyncState(ctx context.Context) (*api.SyncState, error) { for i := range states { ss := &states[i] out.ActiveSyncs = append(out.ActiveSyncs, api.ActiveSync{ - Base: ss.Base, - Target: ss.Target, - Stage: ss.Stage, - Height: ss.Height, - Start: ss.Start, - End: ss.End, - Message: ss.Message, + WorkerID: ss.WorkerID, + Base: ss.Base, + Target: ss.Target, + Stage: ss.Stage, + Height: ss.Height, + Start: ss.Start, + End: ss.End, + Message: ss.Message, }) } return out, nil diff --git a/node/impl/market/market.go b/node/impl/market/market.go index 26d4a9edc..9e75a4db7 100644 --- a/node/impl/market/market.go +++ b/node/impl/market/market.go @@ -14,9 +14,13 @@ import ( type MarketAPI struct { fx.In - FMgr *market.FundMgr + FMgr *market.FundManager } -func (a *MarketAPI) MarketEnsureAvailable(ctx context.Context, addr, wallet address.Address, amt types.BigInt) (cid.Cid, error) { - return a.FMgr.EnsureAvailable(ctx, addr, wallet, amt) +func (a *MarketAPI) MarketReserveFunds(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error) { + return a.FMgr.Reserve(ctx, wallet, addr, amt) +} + +func (a *MarketAPI) MarketReleaseFunds(ctx context.Context, addr address.Address, amt types.BigInt) error { + return a.FMgr.Release(addr, amt) } diff --git a/node/impl/storminer.go b/node/impl/storminer.go index a58621c97..89c4bbb8a 100644 --- a/node/impl/storminer.go +++ b/node/impl/storminer.go @@ -24,7 +24,6 @@ import ( "github.com/filecoin-project/go-state-types/big" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" - "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" "github.com/filecoin-project/lotus/extern/sector-storage/stores" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" @@ -43,7 +42,6 @@ import ( type StorageMinerAPI struct { common.CommonAPI - ProofsConfig *ffiwrapper.Config SectorBlocks *sectorblocks.SectorBlocks PieceStore dtypes.ProviderPieceStore @@ -300,6 +298,10 @@ func (sm *StorageMinerAPI) SealingSchedDiag(ctx context.Context, doSched bool) ( return sm.StorageMgr.SchedDiag(ctx, doSched) } +func (sm *StorageMinerAPI) SealingAbort(ctx context.Context, call storiface.CallID) error { + return sm.StorageMgr.Abort(ctx, call) +} + func (sm *StorageMinerAPI) MarketImportDealData(ctx context.Context, propCid cid.Cid, path string) error { fi, err := os.Open(path) if err != nil { diff --git a/node/modules/chain.go b/node/modules/chain.go index d1414b307..095bb501c 100644 --- a/node/modules/chain.go +++ b/node/modules/chain.go @@ -38,7 +38,7 @@ import ( "github.com/filecoin-project/lotus/node/repo" ) -func ChainBitswap(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, rt routing.Routing, bs dtypes.ChainGCBlockstore) dtypes.ChainBitswap { +func ChainBitswap(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, rt routing.Routing, bs dtypes.ChainBlockstore) dtypes.ChainBitswap { // prefix protocol for chain bitswap // (so bitswap uses /chain/ipfs/bitswap/1.0.0 internally for chain sync stuff) bitswapNetwork := network.NewFromIpfsHost(host, rt, network.Prefix("/chain")) @@ -76,13 +76,13 @@ func MessagePool(lc fx.Lifecycle, sm *stmgr.StateManager, ps *pubsub.PubSub, ds return mp, nil } -func ChainBlockstore(lc fx.Lifecycle, mctx helpers.MetricsCtx, r repo.LockedRepo) (dtypes.ChainBlockstore, error) { - blocks, err := r.Datastore("/chain") +func ChainRawBlockstore(lc fx.Lifecycle, mctx helpers.MetricsCtx, r repo.LockedRepo) (dtypes.ChainRawBlockstore, error) { + bs, err := r.Blockstore(repo.BlockstoreChain) if err != nil { return nil, err } - bs := blockstore.NewBlockstore(blocks) + // TODO potentially replace this cached blockstore by a CBOR cache. cbs, err := blockstore.CachedBlockstore(helpers.LifecycleCtx(mctx, lc), bs, blockstore.DefaultCacheOpts()) if err != nil { return nil, err @@ -91,21 +91,39 @@ func ChainBlockstore(lc fx.Lifecycle, mctx helpers.MetricsCtx, r repo.LockedRepo return cbs, nil } -func ChainGCBlockstore(bs dtypes.ChainBlockstore, gcl dtypes.ChainGCLocker) dtypes.ChainGCBlockstore { - return blockstore.NewGCBlockstore(bs, gcl) -} - -func ChainBlockService(bs dtypes.ChainBlockstore, rem dtypes.ChainBitswap) dtypes.ChainBlockService { +func ChainBlockService(bs dtypes.ChainRawBlockstore, rem dtypes.ChainBitswap) dtypes.ChainBlockService { return blockservice.New(bs, rem) } -func ChainStore(lc fx.Lifecycle, bs dtypes.ChainBlockstore, ds dtypes.MetadataDS, syscalls vm.SyscallBuilder, j journal.Journal) *store.ChainStore { - chain := store.NewChainStore(bs, ds, syscalls, j) +func FallbackChainBlockstore(rbs dtypes.ChainRawBlockstore) dtypes.ChainBlockstore { + return &blockstore.FallbackStore{ + Blockstore: rbs, + } +} + +func SetupFallbackBlockstore(cbs dtypes.ChainBlockstore, rem dtypes.ChainBitswap) error { + fbs, ok := cbs.(*blockstore.FallbackStore) + if !ok { + return xerrors.Errorf("expected a FallbackStore") + } + + fbs.SetFallback(rem.GetBlock) + return nil +} + +func ChainStore(lc fx.Lifecycle, bs dtypes.ChainBlockstore, lbs dtypes.ChainRawBlockstore, ds dtypes.MetadataDS, syscalls vm.SyscallBuilder, j journal.Journal) *store.ChainStore { + chain := store.NewChainStore(bs, lbs, ds, syscalls, j) if err := chain.Load(); err != nil { log.Warnf("loading chain state from disk: %s", err) } + lc.Append(fx.Hook{ + OnStop: func(_ context.Context) error { + return chain.Close() + }, + }) + return chain } diff --git a/node/modules/client.go b/node/modules/client.go index f1380bc97..67eaffe88 100644 --- a/node/modules/client.go +++ b/node/modules/client.go @@ -1,10 +1,14 @@ package modules import ( + "bytes" "context" + "os" + "path/filepath" "time" "github.com/filecoin-project/go-multistore" + "github.com/filecoin-project/go-state-types/abi" "golang.org/x/xerrors" "go.uber.org/fx" @@ -19,7 +23,6 @@ import ( rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network" "github.com/filecoin-project/go-fil-markets/storagemarket" storageimpl "github.com/filecoin-project/go-fil-markets/storagemarket/impl" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/funds" "github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation" smnet "github.com/filecoin-project/go-fil-markets/storagemarket/network" "github.com/filecoin-project/go-storedcounter" @@ -27,6 +30,7 @@ import ( "github.com/ipfs/go-datastore/namespace" "github.com/libp2p/go-libp2p-core/host" + "github.com/filecoin-project/lotus/chain/market" "github.com/filecoin-project/lotus/journal" "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/markets" @@ -40,6 +44,40 @@ import ( "github.com/filecoin-project/lotus/node/repo/retrievalstoremgr" ) +func HandleMigrateClientFunds(lc fx.Lifecycle, ds dtypes.MetadataDS, wallet full.WalletAPI, fundMgr *market.FundManager) { + lc.Append(fx.Hook{ + OnStart: func(ctx context.Context) error { + addr, err := wallet.WalletDefaultAddress(ctx) + // nothing to be done if there is no default address + if err != nil { + return nil + } + b, err := ds.Get(datastore.NewKey("/marketfunds/client")) + if err != nil { + if xerrors.Is(err, datastore.ErrNotFound) { + return nil + } + log.Errorf("client funds migration - getting datastore value: %w", err) + return nil + } + + var value abi.TokenAmount + if err = value.UnmarshalCBOR(bytes.NewReader(b)); err != nil { + log.Errorf("client funds migration - unmarshalling datastore value: %w", err) + return nil + } + _, err = fundMgr.Reserve(ctx, addr, addr, value) + if err != nil { + log.Errorf("client funds migration - reserving funds (wallet %s, addr %s, funds %d): %w", + addr, addr, value, err) + return nil + } + + return ds.Delete(datastore.NewKey("/marketfunds/client")) + }, + }) +} + func ClientMultiDatastore(lc fx.Lifecycle, r repo.LockedRepo) (dtypes.ClientMultiDstore, error) { ds, err := r.Datastore("/client") if err != nil { @@ -80,13 +118,18 @@ func RegisterClientValidator(crv dtypes.ClientRequestValidator, dtm dtypes.Clien // NewClientGraphsyncDataTransfer returns a data transfer manager that just // uses the clients's Client DAG service for transfers -func NewClientGraphsyncDataTransfer(lc fx.Lifecycle, h host.Host, gs dtypes.Graphsync, ds dtypes.MetadataDS) (dtypes.ClientDataTransfer, error) { +func NewClientGraphsyncDataTransfer(lc fx.Lifecycle, h host.Host, gs dtypes.Graphsync, ds dtypes.MetadataDS, r repo.LockedRepo) (dtypes.ClientDataTransfer, error) { sc := storedcounter.New(ds, datastore.NewKey("/datatransfer/client/counter")) net := dtnet.NewFromLibp2pHost(h) dtDs := namespace.Wrap(ds, datastore.NewKey("/datatransfer/client/transfers")) transport := dtgstransport.NewTransport(h.ID(), gs) - dt, err := dtimpl.NewDataTransfer(dtDs, net, transport, sc) + err := os.MkdirAll(filepath.Join(r.Path(), "data-transfer"), 0755) //nolint: gosec + if err != nil && !os.IsExist(err) { + return nil, err + } + + dt, err := dtimpl.NewDataTransfer(dtDs, filepath.Join(r.Path(), "data-transfer"), net, transport, sc) if err != nil { return nil, err } @@ -108,15 +151,9 @@ func NewClientDatastore(ds dtypes.MetadataDS) dtypes.ClientDatastore { return namespace.Wrap(ds, datastore.NewKey("/deals/client")) } -type ClientDealFunds funds.DealFunds - -func NewClientDealFunds(ds dtypes.MetadataDS) (ClientDealFunds, error) { - return funds.NewDealFunds(ds, datastore.NewKey("/marketfunds/client")) -} - -func StorageClient(lc fx.Lifecycle, h host.Host, ibs dtypes.ClientBlockstore, mds dtypes.ClientMultiDstore, r repo.LockedRepo, dataTransfer dtypes.ClientDataTransfer, discovery *discoveryimpl.Local, deals dtypes.ClientDatastore, scn storagemarket.StorageClientNode, dealFunds ClientDealFunds, j journal.Journal) (storagemarket.StorageClient, error) { +func StorageClient(lc fx.Lifecycle, h host.Host, ibs dtypes.ClientBlockstore, mds dtypes.ClientMultiDstore, r repo.LockedRepo, dataTransfer dtypes.ClientDataTransfer, discovery *discoveryimpl.Local, deals dtypes.ClientDatastore, scn storagemarket.StorageClientNode, j journal.Journal) (storagemarket.StorageClient, error) { net := smnet.NewFromLibp2pHost(h) - c, err := storageimpl.NewClient(net, ibs, mds, dataTransfer, discovery, deals, scn, dealFunds, storageimpl.DealPollingInterval(time.Second)) + c, err := storageimpl.NewClient(net, ibs, mds, dataTransfer, discovery, deals, scn, storageimpl.DealPollingInterval(time.Second)) if err != nil { return nil, err } diff --git a/node/modules/dtypes/storage.go b/node/modules/dtypes/storage.go index 13defda8d..05b830920 100644 --- a/node/modules/dtypes/storage.go +++ b/node/modules/dtypes/storage.go @@ -23,10 +23,9 @@ import ( // dy default it's namespaced under /metadata in main repo datastore type MetadataDS datastore.Batching -type ChainBlockstore blockstore.Blockstore +type ChainRawBlockstore blockstore.Blockstore +type ChainBlockstore blockstore.Blockstore // optionally bitswap backed -type ChainGCLocker blockstore.GCLocker -type ChainGCBlockstore blockstore.GCBlockstore type ChainBitswap exchange.Interface type ChainBlockService bserv.BlockService diff --git a/node/modules/services.go b/node/modules/services.go index e0a7c2eda..011b89163 100644 --- a/node/modules/services.go +++ b/node/modules/services.go @@ -2,6 +2,9 @@ package modules import ( "context" + "os" + "strconv" + "time" "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/namespace" @@ -25,6 +28,7 @@ import ( "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/sub" + "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/journal" "github.com/filecoin-project/lotus/lib/peermgr" marketevents "github.com/filecoin-project/lotus/markets/loggers" @@ -34,6 +38,19 @@ import ( "github.com/filecoin-project/lotus/node/repo" ) +var pubsubMsgsSyncEpochs = 10 + +func init() { + if s := os.Getenv("LOTUS_MSGS_SYNC_EPOCHS"); s != "" { + val, err := strconv.Atoi(s) + if err != nil { + log.Errorf("failed to parse LOTUS_MSGS_SYNC_EPOCHS: %s", err) + return + } + pubsubMsgsSyncEpochs = val + } +} + func RunHello(mctx helpers.MetricsCtx, lc fx.Lifecycle, h host.Host, svc *hello.Service) error { h.SetStreamHandler(hello.ProtocolID, svc.HandleStream) @@ -82,14 +99,45 @@ func RunChainExchange(h host.Host, svc exchange.Server) { h.SetStreamHandler(exchange.ChainExchangeProtocolID, svc.HandleStream) // new } +func waitForSync(stmgr *stmgr.StateManager, epochs int, subscribe func()) { + nearsync := time.Duration(epochs*int(build.BlockDelaySecs)) * time.Second + + // early check, are we synced at start up? + ts := stmgr.ChainStore().GetHeaviestTipSet() + timestamp := ts.MinTimestamp() + timestampTime := time.Unix(int64(timestamp), 0) + if build.Clock.Since(timestampTime) < nearsync { + subscribe() + return + } + + // we are not synced, subscribe to head changes and wait for sync + stmgr.ChainStore().SubscribeHeadChanges(func(rev, app []*types.TipSet) error { + if len(app) == 0 { + return nil + } + + latest := app[0].MinTimestamp() + for _, ts := range app[1:] { + timestamp := ts.MinTimestamp() + if timestamp > latest { + latest = timestamp + } + } + + latestTime := time.Unix(int64(latest), 0) + if build.Clock.Since(latestTime) < nearsync { + subscribe() + return store.ErrNotifeeDone + } + + return nil + }) +} + func HandleIncomingBlocks(mctx helpers.MetricsCtx, lc fx.Lifecycle, ps *pubsub.PubSub, s *chain.Syncer, bserv dtypes.ChainBlockService, chain *store.ChainStore, stmgr *stmgr.StateManager, h host.Host, nn dtypes.NetworkName) { ctx := helpers.LifecycleCtx(mctx, lc) - blocksub, err := ps.Subscribe(build.BlocksTopic(nn)) //nolint - if err != nil { - panic(err) - } - v := sub.NewBlockValidator( h.ID(), chain, stmgr, func(p peer.ID) { @@ -101,24 +149,43 @@ func HandleIncomingBlocks(mctx helpers.MetricsCtx, lc fx.Lifecycle, ps *pubsub.P panic(err) } - go sub.HandleIncomingBlocks(ctx, blocksub, s, bserv, h.ConnManager()) -} + log.Infof("subscribing to pubsub topic %s", build.BlocksTopic(nn)) -func HandleIncomingMessages(mctx helpers.MetricsCtx, lc fx.Lifecycle, ps *pubsub.PubSub, mpool *messagepool.MessagePool, h host.Host, nn dtypes.NetworkName) { - ctx := helpers.LifecycleCtx(mctx, lc) - - msgsub, err := ps.Subscribe(build.MessagesTopic(nn)) //nolint:staticcheck + blocksub, err := ps.Subscribe(build.BlocksTopic(nn)) //nolint if err != nil { panic(err) } + go sub.HandleIncomingBlocks(ctx, blocksub, s, bserv, h.ConnManager()) +} + +func HandleIncomingMessages(mctx helpers.MetricsCtx, lc fx.Lifecycle, ps *pubsub.PubSub, stmgr *stmgr.StateManager, mpool *messagepool.MessagePool, h host.Host, nn dtypes.NetworkName, bootstrapper dtypes.Bootstrapper) { + ctx := helpers.LifecycleCtx(mctx, lc) + v := sub.NewMessageValidator(h.ID(), mpool) if err := ps.RegisterTopicValidator(build.MessagesTopic(nn), v.Validate); err != nil { panic(err) } - go sub.HandleIncomingMessages(ctx, mpool, msgsub) + subscribe := func() { + log.Infof("subscribing to pubsub topic %s", build.MessagesTopic(nn)) + + msgsub, err := ps.Subscribe(build.MessagesTopic(nn)) //nolint + if err != nil { + panic(err) + } + + go sub.HandleIncomingMessages(ctx, mpool, msgsub) + } + + if bootstrapper { + subscribe() + return + } + + // wait until we are synced within 10 epochs -- env var can override + waitForSync(stmgr, pubsubMsgsSyncEpochs, subscribe) } func NewLocalDiscovery(lc fx.Lifecycle, ds dtypes.MetadataDS) (*discoveryimpl.Local, error) { diff --git a/node/modules/storageminer.go b/node/modules/storageminer.go index e9f5db008..3281142b5 100644 --- a/node/modules/storageminer.go +++ b/node/modules/storageminer.go @@ -1,10 +1,13 @@ package modules import ( + "bytes" "context" "errors" "fmt" "net/http" + "os" + "path/filepath" "time" "go.uber.org/fx" @@ -36,7 +39,6 @@ import ( "github.com/filecoin-project/go-fil-markets/shared" "github.com/filecoin-project/go-fil-markets/storagemarket" storageimpl "github.com/filecoin-project/go-fil-markets/storagemarket/impl" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/funds" "github.com/filecoin-project/go-fil-markets/storagemarket/impl/storedask" smnet "github.com/filecoin-project/go-fil-markets/storagemarket/network" "github.com/filecoin-project/go-jsonrpc/auth" @@ -46,6 +48,7 @@ import ( "github.com/filecoin-project/go-statestore" "github.com/filecoin-project/go-storedcounter" + "github.com/filecoin-project/lotus/api" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/sector-storage/stores" @@ -82,8 +85,8 @@ func minerAddrFromDS(ds dtypes.MetadataDS) (address.Address, error) { return address.NewFromBytes(maddrb) } -func GetParams(sbc *ffiwrapper.Config) error { - ssize, err := sbc.SealProofType.SectorSize() +func GetParams(spt abi.RegisteredSealProof) error { + ssize, err := spt.SectorSize() if err != nil { return err } @@ -94,6 +97,7 @@ func GetParams(sbc *ffiwrapper.Config) error { return nil } + // TODO: We should fetch the params for the actual proof type, not just based on the size. if err := paramfetch.GetParams(context.TODO(), build.ParametersJSON(), uint64(ssize)); err != nil { return xerrors.Errorf("fetching proof parameters: %w", err) } @@ -118,22 +122,13 @@ func StorageNetworkName(ctx helpers.MetricsCtx, a lapi.FullNode) (dtypes.Network return a.StateNetworkName(ctx) } -func ProofsConfig(maddr dtypes.MinerAddress, fnapi lapi.FullNode) (*ffiwrapper.Config, error) { +func SealProofType(maddr dtypes.MinerAddress, fnapi lapi.FullNode) (abi.RegisteredSealProof, error) { mi, err := fnapi.StateMinerInfo(context.TODO(), address.Address(maddr), types.EmptyTSK) if err != nil { - return nil, err + return 0, err } - spt, err := ffiwrapper.SealProofTypeFromSectorSize(mi.SectorSize) - if err != nil { - return nil, xerrors.Errorf("bad sector size: %w", err) - } - - sb := &ffiwrapper.Config{ - SealProofType: spt, - } - - return sb, nil + return mi.SealProofType, nil } type sidsc struct { @@ -245,15 +240,59 @@ func HandleDeals(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, h sto }) } +func HandleMigrateProviderFunds(lc fx.Lifecycle, ds dtypes.MetadataDS, node api.FullNode, minerAddress dtypes.MinerAddress) { + lc.Append(fx.Hook{ + OnStart: func(ctx context.Context) error { + b, err := ds.Get(datastore.NewKey("/marketfunds/provider")) + if err != nil { + if xerrors.Is(err, datastore.ErrNotFound) { + return nil + } + return err + } + + var value abi.TokenAmount + if err = value.UnmarshalCBOR(bytes.NewReader(b)); err != nil { + return err + } + ts, err := node.ChainHead(ctx) + if err != nil { + log.Errorf("provider funds migration - getting chain head: %w", err) + return nil + } + + mi, err := node.StateMinerInfo(ctx, address.Address(minerAddress), ts.Key()) + if err != nil { + log.Errorf("provider funds migration - getting miner info %s: %w", minerAddress, err) + return nil + } + + _, err = node.MarketReserveFunds(ctx, mi.Worker, address.Address(minerAddress), value) + if err != nil { + log.Errorf("provider funds migration - reserving funds (wallet %s, addr %s, funds %d): %w", + mi.Worker, minerAddress, value, err) + return nil + } + + return ds.Delete(datastore.NewKey("/marketfunds/provider")) + }, + }) +} + // NewProviderDAGServiceDataTransfer returns a data transfer manager that just // uses the provider's Staging DAG service for transfers -func NewProviderDAGServiceDataTransfer(lc fx.Lifecycle, h host.Host, gs dtypes.StagingGraphsync, ds dtypes.MetadataDS) (dtypes.ProviderDataTransfer, error) { +func NewProviderDAGServiceDataTransfer(lc fx.Lifecycle, h host.Host, gs dtypes.StagingGraphsync, ds dtypes.MetadataDS, r repo.LockedRepo) (dtypes.ProviderDataTransfer, error) { sc := storedcounter.New(ds, datastore.NewKey("/datatransfer/provider/counter")) net := dtnet.NewFromLibp2pHost(h) dtDs := namespace.Wrap(ds, datastore.NewKey("/datatransfer/provider/transfers")) transport := dtgstransport.NewTransport(h.ID(), gs) - dt, err := dtimpl.NewDataTransfer(dtDs, net, transport, sc) + err := os.MkdirAll(filepath.Join(r.Path(), "data-transfer"), 0755) //nolint: gosec + if err != nil && !os.IsExist(err) { + return nil, err + } + + dt, err := dtimpl.NewDataTransfer(dtDs, filepath.Join(r.Path(), "data-transfer"), net, transport, sc) if err != nil { return nil, err } @@ -395,12 +434,6 @@ func NewStorageAsk(ctx helpers.MetricsCtx, fapi lapi.FullNode, ds dtypes.Metadat return storedAsk, nil } -type ProviderDealFunds funds.DealFunds - -func NewProviderDealFunds(ds dtypes.MetadataDS) (ProviderDealFunds, error) { - return funds.NewDealFunds(ds, datastore.NewKey("/marketfunds/provider")) -} - func BasicDealFilter(user dtypes.StorageDealFilter) func(onlineOk dtypes.ConsiderOnlineStorageDealsConfigFunc, offlineOk dtypes.ConsiderOfflineStorageDealsConfigFunc, blocklistFunc dtypes.StorageDealPieceCidBlocklistConfigFunc, @@ -478,7 +511,6 @@ func BasicDealFilter(user dtypes.StorageDealFilter) func(onlineOk dtypes.Conside } func StorageProvider(minerAddress dtypes.MinerAddress, - ffiConfig *ffiwrapper.Config, storedAsk *storedask.StoredAsk, h host.Host, ds dtypes.MetadataDS, mds dtypes.StagingMultiDstore, @@ -487,7 +519,6 @@ func StorageProvider(minerAddress dtypes.MinerAddress, dataTransfer dtypes.ProviderDataTransfer, spn storagemarket.StorageProviderNode, df dtypes.StorageDealFilter, - funds ProviderDealFunds, ) (storagemarket.StorageProvider, error) { net := smnet.NewFromLibp2pHost(h) store, err := piecefilestore.NewLocalFileStore(piecefilestore.OsPath(r.Path())) @@ -497,7 +528,7 @@ func StorageProvider(minerAddress dtypes.MinerAddress, opt := storageimpl.CustomDealDecisionLogic(storageimpl.DealDeciderFunc(df)) - return storageimpl.NewProvider(net, namespace.Wrap(ds, datastore.NewKey("/deals/provider")), store, mds, pieceStore, dataTransfer, spn, address.Address(minerAddress), ffiConfig.SealProofType, storedAsk, funds, opt) + return storageimpl.NewProvider(net, namespace.Wrap(ds, datastore.NewKey("/deals/provider")), store, mds, pieceStore, dataTransfer, spn, address.Address(minerAddress), storedAsk, opt) } func RetrievalDealFilter(userFilter dtypes.RetrievalDealFilter) func(onlineOk dtypes.ConsiderOnlineRetrievalDealsConfigFunc, @@ -562,13 +593,13 @@ func RetrievalProvider(h host.Host, var WorkerCallsPrefix = datastore.NewKey("/worker/calls") var ManagerWorkPrefix = datastore.NewKey("/stmgr/calls") -func SectorStorage(mctx helpers.MetricsCtx, lc fx.Lifecycle, ls stores.LocalStorage, si stores.SectorIndex, cfg *ffiwrapper.Config, sc sectorstorage.SealerConfig, urls sectorstorage.URLs, sa sectorstorage.StorageAuth, ds dtypes.MetadataDS) (*sectorstorage.Manager, error) { +func SectorStorage(mctx helpers.MetricsCtx, lc fx.Lifecycle, ls stores.LocalStorage, si stores.SectorIndex, sc sectorstorage.SealerConfig, urls sectorstorage.URLs, sa sectorstorage.StorageAuth, ds dtypes.MetadataDS) (*sectorstorage.Manager, error) { ctx := helpers.LifecycleCtx(mctx, lc) wsts := statestore.New(namespace.Wrap(ds, WorkerCallsPrefix)) smsts := statestore.New(namespace.Wrap(ds, ManagerWorkPrefix)) - sst, err := sectorstorage.New(ctx, ls, si, cfg, sc, urls, sa, wsts, smsts) + sst, err := sectorstorage.New(ctx, ls, si, sc, urls, sa, wsts, smsts) if err != nil { return nil, err } @@ -685,9 +716,10 @@ func NewSetSealConfigFunc(r repo.LockedRepo) (dtypes.SetSealingConfigFunc, error return func(cfg sealiface.Config) (err error) { err = mutateCfg(r, func(c *config.StorageMiner) { c.Sealing = config.SealingConfig{ - MaxWaitDealsSectors: cfg.MaxWaitDealsSectors, - MaxSealingSectors: cfg.MaxSealingSectors, - WaitDealsDelay: config.Duration(cfg.WaitDealsDelay), + MaxWaitDealsSectors: cfg.MaxWaitDealsSectors, + MaxSealingSectors: cfg.MaxSealingSectors, + MaxSealingSectorsForDeals: cfg.MaxSealingSectorsForDeals, + WaitDealsDelay: config.Duration(cfg.WaitDealsDelay), } }) return diff --git a/node/modules/testing/genesis.go b/node/modules/testing/genesis.go index fa9e0cff7..a3d25e36a 100644 --- a/node/modules/testing/genesis.go +++ b/node/modules/testing/genesis.go @@ -81,7 +81,7 @@ func MakeGenesis(outFile, genesisTemplate string) func(bs dtypes.ChainBlockstore fmt.Printf("GENESIS MINER ADDRESS: t0%d\n", genesis2.MinerStart) - f, err := os.OpenFile(outFile, os.O_CREATE|os.O_WRONLY, 0644) + f, err := os.OpenFile(outFile, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) if err != nil { return nil, err } diff --git a/node/node_test.go b/node/node_test.go index e553e83b2..b8009aa78 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -93,6 +93,22 @@ func TestDealMining(t *testing.T) { test.TestDealMining(t, builder.MockSbBuilder, 50*time.Millisecond, false) } +func TestSDRUpgrade(t *testing.T) { + logging.SetLogLevel("miner", "ERROR") + logging.SetLogLevel("chainstore", "ERROR") + logging.SetLogLevel("chain", "ERROR") + logging.SetLogLevel("sub", "ERROR") + logging.SetLogLevel("storageminer", "ERROR") + + oldDelay := policy.GetPreCommitChallengeDelay() + policy.SetPreCommitChallengeDelay(5) + t.Cleanup(func() { + policy.SetPreCommitChallengeDelay(oldDelay) + }) + + test.TestSDRUpgrade(t, builder.MockSbBuilder, 50*time.Millisecond) +} + func TestPledgeSectors(t *testing.T) { logging.SetLogLevel("miner", "ERROR") logging.SetLogLevel("chainstore", "ERROR") diff --git a/node/repo/blockstore_opts.go b/node/repo/blockstore_opts.go new file mode 100644 index 000000000..d8d852d84 --- /dev/null +++ b/node/repo/blockstore_opts.go @@ -0,0 +1,51 @@ +package repo + +import badgerbs "github.com/filecoin-project/lotus/lib/blockstore/badger" + +// BadgerBlockstoreOptions returns the badger options to apply for the provided +// domain. +func BadgerBlockstoreOptions(domain BlockstoreDomain, path string, readonly bool) (badgerbs.Options, error) { + if domain != BlockstoreChain { + return badgerbs.Options{}, ErrInvalidBlockstoreDomain + } + + opts := badgerbs.DefaultOptions(path) + + // Due to legacy usage of blockstore.Blockstore, over a datastore, all + // blocks are prefixed with this namespace. In the future, this can go away, + // in order to shorten keys, but it'll require a migration. + opts.Prefix = "/blocks/" + + // Blockstore values are immutable; therefore we do not expect any + // conflicts to emerge. + opts.DetectConflicts = false + + // This is to optimize the database on close so it can be opened + // read-only and efficiently queried. We don't do that and hanging on + // stop isn't nice. + opts.CompactL0OnClose = false + + // The alternative is "crash on start and tell the user to fix it". This + // will truncate corrupt and unsynced data, which we don't guarantee to + // persist anyways. + opts.Truncate = true + + // We mmap the index and the value logs; this is important to enable + // zero-copy value access. + opts.ValueLogLoadingMode = badgerbs.MemoryMap + opts.TableLoadingMode = badgerbs.MemoryMap + + // Embed only values < 128 bytes in the LSM tree; larger values are stored + // in value logs. + opts.ValueThreshold = 128 + + // Default table size is already 64MiB. This is here to make it explicit. + opts.MaxTableSize = 64 << 20 + + // NOTE: The chain blockstore doesn't require any GC (blocks are never + // deleted). This will change if we move to a tiered blockstore. + + opts.ReadOnly = readonly + + return opts, nil +} diff --git a/node/repo/fsrepo.go b/node/repo/fsrepo.go index c1b6b5233..e4d9b3239 100644 --- a/node/repo/fsrepo.go +++ b/node/repo/fsrepo.go @@ -12,6 +12,7 @@ import ( "sync" "github.com/BurntSushi/toml" + "github.com/filecoin-project/lotus/lib/blockstore" "github.com/ipfs/go-datastore" fslock "github.com/ipfs/go-fs-lock" logging "github.com/ipfs/go-log/v2" @@ -22,6 +23,8 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" "github.com/filecoin-project/lotus/extern/sector-storage/stores" + lblockstore "github.com/filecoin-project/lotus/lib/blockstore" + badgerbs "github.com/filecoin-project/lotus/lib/blockstore/badger" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/node/config" @@ -257,6 +260,10 @@ type fsLockedRepo struct { dsErr error dsOnce sync.Once + bs blockstore.Blockstore + bsErr error + bsOnce sync.Once + storageLk sync.Mutex configLk sync.Mutex } @@ -279,11 +286,50 @@ func (fsr *fsLockedRepo) Close() error { } } + // type assertion will return ok=false if fsr.bs is nil altogether. + if c, ok := fsr.bs.(io.Closer); ok && c != nil { + if err := c.Close(); err != nil { + return xerrors.Errorf("could not close blockstore: %w", err) + } + } + err = fsr.closer.Close() fsr.closer = nil return err } +// Blockstore returns a blockstore for the provided data domain. +func (fsr *fsLockedRepo) Blockstore(domain BlockstoreDomain) (blockstore.Blockstore, error) { + if domain != BlockstoreChain { + return nil, ErrInvalidBlockstoreDomain + } + + fsr.bsOnce.Do(func() { + path := fsr.join(filepath.Join(fsDatastore, "chain")) + readonly := fsr.readonly + + if err := os.MkdirAll(path, 0755); err != nil { + fsr.bsErr = err + return + } + + opts, err := BadgerBlockstoreOptions(domain, path, readonly) + if err != nil { + fsr.bsErr = err + return + } + + bs, err := badgerbs.Open(opts) + if err != nil { + fsr.bsErr = err + return + } + fsr.bs = lblockstore.WrapIDStore(bs) + }) + + return fsr.bs, fsr.bsErr +} + // join joins path elements with fsr.path func (fsr *fsLockedRepo) join(paths ...string) string { return filepath.Join(append([]string{fsr.path}, paths...)...) diff --git a/node/repo/fsrepo_ds.go b/node/repo/fsrepo_ds.go index e7746cb8e..433ddb9b8 100644 --- a/node/repo/fsrepo_ds.go +++ b/node/repo/fsrepo_ds.go @@ -16,17 +16,7 @@ import ( type dsCtor func(path string, readonly bool) (datastore.Batching, error) -func ChainBadgerOptions() badger.Options { - opts := badger.DefaultOptions - opts.GcInterval = 0 // disable GC for chain datastore - - opts.Options = dgbadger.DefaultOptions("").WithTruncate(true). - WithValueThreshold(128) - return opts -} - var fsDatastores = map[string]dsCtor{ - "chain": chainBadgerDs, "metadata": levelDs, // Those need to be fast for large writes... but also need a really good GC :c @@ -35,12 +25,6 @@ var fsDatastores = map[string]dsCtor{ "client": badgerDs, // client specific } -func chainBadgerDs(path string, readonly bool) (datastore.Batching, error) { - opts := ChainBadgerOptions() - opts.ReadOnly = readonly - return badger.NewDatastore(path, &opts) -} - func badgerDs(path string, readonly bool) (datastore.Batching, error) { opts := badger.DefaultOptions opts.ReadOnly = readonly diff --git a/node/repo/interface.go b/node/repo/interface.go index c25bcb534..4ae68f880 100644 --- a/node/repo/interface.go +++ b/node/repo/interface.go @@ -3,6 +3,7 @@ package repo import ( "errors" + "github.com/filecoin-project/lotus/lib/blockstore" "github.com/ipfs/go-datastore" "github.com/multiformats/go-multiaddr" @@ -12,11 +13,26 @@ import ( "github.com/filecoin-project/lotus/chain/types" ) +// BlockstoreDomain represents the domain of a blockstore. +type BlockstoreDomain string + +const ( + // BlockstoreChain represents the blockstore domain for chain data. + // Right now, this includes chain objects (tipsets, blocks, messages), as + // well as state. In the future, they may get segregated into different + // domains. + BlockstoreChain = BlockstoreDomain("chain") +) + var ( ErrNoAPIEndpoint = errors.New("API not running (no endpoint)") ErrNoAPIToken = errors.New("API token not set") ErrRepoAlreadyLocked = errors.New("repo is already locked (lotus daemon already running)") ErrClosedRepo = errors.New("repo is no longer open") + + // ErrInvalidBlockstoreDomain is returned by LockedRepo#Blockstore() when + // an unrecognized domain is requested. + ErrInvalidBlockstoreDomain = errors.New("invalid blockstore domain") ) type Repo interface { @@ -37,6 +53,9 @@ type LockedRepo interface { // Returns datastore defined in this repo. Datastore(namespace string) (datastore.Batching, error) + // Blockstore returns an IPLD blockstore for the requested domain. + Blockstore(domain BlockstoreDomain) (blockstore.Blockstore, error) + // Returns config in this repo Config() (interface{}, error) SetConfig(func(interface{})) error diff --git a/node/repo/memrepo.go b/node/repo/memrepo.go index 34e3637eb..88d4eccd3 100644 --- a/node/repo/memrepo.go +++ b/node/repo/memrepo.go @@ -14,10 +14,10 @@ import ( "github.com/multiformats/go-multiaddr" "golang.org/x/xerrors" - "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" - "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/node/config" ) @@ -31,8 +31,9 @@ type MemRepo struct { repoLock chan struct{} token *byte - datastore datastore.Datastore - keystore map[string]types.KeyInfo + datastore datastore.Datastore + keystore map[string]types.KeyInfo + blockstore blockstore.Blockstore // given a repo type, produce the default config configF func(t RepoType) interface{} @@ -158,11 +159,11 @@ func NewMemory(opts *MemRepoOptions) *MemRepo { } return &MemRepo{ - repoLock: make(chan struct{}, 1), - - datastore: opts.Ds, - configF: opts.ConfigF, - keystore: opts.KeyStore, + repoLock: make(chan struct{}, 1), + blockstore: blockstore.WrapIDStore(blockstore.NewTemporarySync()), + datastore: opts.Ds, + configF: opts.ConfigF, + keystore: opts.KeyStore, } } @@ -243,6 +244,13 @@ func (lmem *lockedMemRepo) Datastore(ns string) (datastore.Batching, error) { return namespace.Wrap(lmem.mem.datastore, datastore.NewKey(ns)), nil } +func (lmem *lockedMemRepo) Blockstore(domain BlockstoreDomain) (blockstore.Blockstore, error) { + if domain != BlockstoreChain { + return nil, ErrInvalidBlockstoreDomain + } + return lmem.mem.blockstore, nil +} + func (lmem *lockedMemRepo) ListDatastores(ns string) ([]int64, error) { return nil, nil } diff --git a/node/repo/retrievalstoremgr/retrievalstoremgr_test.go b/node/repo/retrievalstoremgr/retrievalstoremgr_test.go index 044a8cc27..a848f62e2 100644 --- a/node/repo/retrievalstoremgr/retrievalstoremgr_test.go +++ b/node/repo/retrievalstoremgr/retrievalstoremgr_test.go @@ -9,13 +9,13 @@ import ( "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/query" dss "github.com/ipfs/go-datastore/sync" - blockstore "github.com/ipfs/go-ipfs-blockstore" format "github.com/ipfs/go-ipld-format" dag "github.com/ipfs/go-merkledag" "github.com/stretchr/testify/require" "github.com/filecoin-project/go-multistore" + "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/node/repo/importmgr" "github.com/filecoin-project/lotus/node/repo/retrievalstoremgr" ) diff --git a/node/test/builder.go b/node/test/builder.go index ea9a82220..f6599cf23 100644 --- a/node/test/builder.go +++ b/node/test/builder.go @@ -23,11 +23,12 @@ import ( "github.com/filecoin-project/lotus/api/client" "github.com/filecoin-project/lotus/api/test" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/gen" genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis" + "github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/wallet" "github.com/filecoin-project/lotus/cmd/lotus-seed/seed" @@ -38,6 +39,7 @@ import ( lotusminer "github.com/filecoin-project/lotus/miner" "github.com/filecoin-project/lotus/node" "github.com/filecoin-project/lotus/node/modules" + "github.com/filecoin-project/lotus/node/modules/dtypes" testing2 "github.com/filecoin-project/lotus/node/modules/testing" "github.com/filecoin-project/lotus/node/repo" "github.com/filecoin-project/lotus/storage/mockstorage" @@ -50,6 +52,13 @@ import ( "github.com/stretchr/testify/require" ) +func init() { + chain.BootstrapPeerThreshold = 1 + messagepool.HeadChangeCoalesceMinDelay = time.Microsecond + messagepool.HeadChangeCoalesceMaxDelay = 2 * time.Microsecond + messagepool.HeadChangeCoalesceMergeInterval = 100 * time.Nanosecond +} + func CreateTestStorageNode(ctx context.Context, t *testing.T, waddr address.Address, act address.Address, pk crypto.PrivKey, tnd test.TestNode, mn mocknet.Mocknet, opts node.Option) test.TestStorageNode { r := repo.NewMemory(nil) @@ -346,7 +355,7 @@ func mockSbBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []tes preseals = test.GenesisPreseals } - genm, k, err := mockstorage.PreSeal(2048, maddr, preseals) + genm, k, err := mockstorage.PreSeal(abi.RegisteredSealProof_StackedDrg2KiBV1, maddr, preseals) if err != nil { t.Fatal(err) } @@ -403,6 +412,9 @@ func mockSbBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []tes node.Override(new(ffiwrapper.Verifier), mock.MockVerifier), + // so that we subscribe to pubsub topics immediately + node.Override(new(dtypes.Bootstrapper), dtypes.Bootstrapper(true)), + genesis, fullOpts[i].Opts(fulls), @@ -445,7 +457,7 @@ func mockSbBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []tes storers[i] = CreateTestStorageNode(ctx, t, genms[i].Worker, maddrs[i], pidKeys[i], f, mn, node.Options( node.Override(new(sectorstorage.SectorManager), func() (sectorstorage.SectorManager, error) { - return mock.NewMockSectorMgr(policy.GetDefaultSectorSize(), sectors), nil + return mock.NewMockSectorMgr(sectors), nil }), node.Override(new(ffiwrapper.Verifier), mock.MockVerifier), node.Unset(new(*sectorstorage.Manager)), diff --git a/paychmgr/simple.go b/paychmgr/simple.go index afa1ae1f7..1ad58d6f5 100644 --- a/paychmgr/simple.go +++ b/paychmgr/simple.go @@ -36,8 +36,6 @@ type fundsReq struct { lk sync.Mutex // merge parent, if this req is part of a merge merge *mergedFundsReq - // whether the req's context has been cancelled - active bool } func newFundsReq(ctx context.Context, amt types.BigInt) *fundsReq { @@ -46,7 +44,6 @@ func newFundsReq(ctx context.Context, amt types.BigInt) *fundsReq { ctx: ctx, promise: promise, amt: amt, - active: true, } } @@ -61,25 +58,18 @@ func (r *fundsReq) onComplete(res *paychFundsRes) { // cancel is called when the req's context is cancelled func (r *fundsReq) cancel() { r.lk.Lock() - - r.active = false - m := r.merge - - r.lk.Unlock() + defer r.lk.Unlock() // If there's a merge parent, tell the merge parent to check if it has any // active reqs left - if m != nil { - m.checkActive() + if r.merge != nil { + r.merge.checkActive() } } // isActive indicates whether the req's context has been cancelled func (r *fundsReq) isActive() bool { - r.lk.Lock() - defer r.lk.Unlock() - - return r.active + return r.ctx.Err() == nil } // setMergeParent sets the merge that this req is part of diff --git a/storage/adapter_storage_miner.go b/storage/adapter_storage_miner.go index 2279a9201..071ad30df 100644 --- a/storage/adapter_storage_miner.go +++ b/storage/adapter_storage_miner.go @@ -39,13 +39,8 @@ func NewSealingAPIAdapter(api storageMinerApi) SealingAPIAdapter { } func (s SealingAPIAdapter) StateMinerSectorSize(ctx context.Context, maddr address.Address, tok sealing.TipSetToken) (abi.SectorSize, error) { - tsk, err := types.TipSetKeyFromBytes(tok) - if err != nil { - return 0, xerrors.Errorf("failed to unmarshal TipSetToken to TipSetKey: %w", err) - } - // TODO: update storage-fsm to just StateMinerInfo - mi, err := s.delegate.StateMinerInfo(ctx, maddr, tsk) + mi, err := s.StateMinerInfo(ctx, maddr, tok) if err != nil { return 0, err } @@ -70,14 +65,19 @@ func (s SealingAPIAdapter) StateMinerInitialPledgeCollateral(ctx context.Context return s.delegate.StateMinerInitialPledgeCollateral(ctx, a, pci, tsk) } -func (s SealingAPIAdapter) StateMinerWorkerAddress(ctx context.Context, maddr address.Address, tok sealing.TipSetToken) (address.Address, error) { +func (s SealingAPIAdapter) StateMinerInfo(ctx context.Context, maddr address.Address, tok sealing.TipSetToken) (miner.MinerInfo, error) { tsk, err := types.TipSetKeyFromBytes(tok) if err != nil { - return address.Undef, xerrors.Errorf("failed to unmarshal TipSetToken to TipSetKey: %w", err) + return miner.MinerInfo{}, xerrors.Errorf("failed to unmarshal TipSetToken to TipSetKey: %w", err) } // TODO: update storage-fsm to just StateMinerInfo - mi, err := s.delegate.StateMinerInfo(ctx, maddr, tsk) + return s.delegate.StateMinerInfo(ctx, maddr, tsk) +} + +func (s SealingAPIAdapter) StateMinerWorkerAddress(ctx context.Context, maddr address.Address, tok sealing.TipSetToken) (address.Address, error) { + // TODO: update storage-fsm to just StateMinerInfo + mi, err := s.StateMinerInfo(ctx, maddr, tok) if err != nil { return address.Undef, err } diff --git a/storage/miner.go b/storage/miner.go index 378c12b84..daeb0ef20 100644 --- a/storage/miner.go +++ b/storage/miner.go @@ -214,12 +214,7 @@ func NewWinningPoStProver(api api.FullNode, prover storage.Prover, verifier ffiw return nil, xerrors.Errorf("getting sector size: %w", err) } - spt, err := ffiwrapper.SealProofTypeFromSectorSize(mi.SectorSize) - if err != nil { - return nil, err - } - - wpt, err := spt.RegisteredWinningPoStProof() + wpt, err := mi.SealProofType.RegisteredWinningPoStProof() if err != nil { return nil, err } diff --git a/storage/mockstorage/preseal.go b/storage/mockstorage/preseal.go index 0417405c8..d119c273f 100644 --- a/storage/mockstorage/preseal.go +++ b/storage/mockstorage/preseal.go @@ -13,17 +13,21 @@ import ( "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/wallet" - "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/sector-storage/zerocomm" "github.com/filecoin-project/lotus/genesis" ) -func PreSeal(ssize abi.SectorSize, maddr address.Address, sectors int) (*genesis.Miner, *types.KeyInfo, error) { +func PreSeal(spt abi.RegisteredSealProof, maddr address.Address, sectors int) (*genesis.Miner, *types.KeyInfo, error) { k, err := wallet.GenerateKey(types.KTBLS) if err != nil { return nil, nil, err } + ssize, err := spt.SectorSize() + if err != nil { + return nil, nil, err + } + genm := &genesis.Miner{ ID: maddr, Owner: k.Address, @@ -34,15 +38,10 @@ func PreSeal(ssize abi.SectorSize, maddr address.Address, sectors int) (*genesis Sectors: make([]*genesis.PreSeal, sectors), } - st, err := ffiwrapper.SealProofTypeFromSectorSize(ssize) - if err != nil { - return nil, nil, err - } - for i := range genm.Sectors { preseal := &genesis.PreSeal{} - preseal.ProofType = st + preseal.ProofType = spt preseal.CommD = zerocomm.ZeroPieceCommitment(abi.PaddedPieceSize(ssize).Unpadded()) d, _ := commcid.CIDToPieceCommitmentV1(preseal.CommD) r := mock.CommDR(d) diff --git a/storage/wdpost_run.go b/storage/wdpost_run.go index f1da4f221..8bf2cc6ad 100644 --- a/storage/wdpost_run.go +++ b/storage/wdpost_run.go @@ -6,6 +6,7 @@ import ( "time" "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" @@ -188,26 +189,29 @@ func (s *WindowPoStScheduler) runSubmitPoST( return submitErr } -func (s *WindowPoStScheduler) checkSectors(ctx context.Context, check bitfield.BitField) (bitfield.BitField, error) { +func (s *WindowPoStScheduler) checkSectors(ctx context.Context, check bitfield.BitField, tsk types.TipSetKey) (bitfield.BitField, error) { mid, err := address.IDFromAddress(s.actor) if err != nil { return bitfield.BitField{}, err } - sectors := make(map[abi.SectorID]struct{}) - var tocheck []abi.SectorID - err = check.ForEach(func(snum uint64) error { - s := abi.SectorID{ - Miner: abi.ActorID(mid), - Number: abi.SectorNumber(snum), - } - - tocheck = append(tocheck, s) - sectors[s] = struct{}{} - return nil - }) + sectorInfos, err := s.api.StateMinerSectors(ctx, s.actor, &check, tsk) if err != nil { - return bitfield.BitField{}, xerrors.Errorf("iterating over bitfield: %w", err) + return bitfield.BitField{}, err + } + + sectors := make(map[abi.SectorNumber]struct{}) + var tocheck []storage.SectorRef + for _, info := range sectorInfos { + sectors[info.SectorNumber] = struct{}{} + tocheck = append(tocheck, storage.SectorRef{ + ProofType: info.SealProof, + ID: abi.SectorID{ + Miner: abi.ActorID(mid), + Number: info.SectorNumber, + }, + }) + } bad, err := s.faultTracker.CheckProvable(ctx, s.proofType, tocheck) @@ -215,20 +219,20 @@ func (s *WindowPoStScheduler) checkSectors(ctx context.Context, check bitfield.B return bitfield.BitField{}, xerrors.Errorf("checking provable sectors: %w", err) } for _, id := range bad { - delete(sectors, id) + delete(sectors, id.Number) } log.Warnw("Checked sectors", "checked", len(tocheck), "good", len(sectors)) sbf := bitfield.New() for s := range sectors { - sbf.Set(uint64(s.Number)) + sbf.Set(uint64(s)) } return sbf, nil } -func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uint64, partitions []api.Partition) ([]miner.RecoveryDeclaration, *types.SignedMessage, error) { +func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uint64, partitions []api.Partition, tsk types.TipSetKey) ([]miner.RecoveryDeclaration, *types.SignedMessage, error) { ctx, span := trace.StartSpan(ctx, "storage.checkNextRecoveries") defer span.End() @@ -254,7 +258,7 @@ func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uin faulty += uc - recovered, err := s.checkSectors(ctx, unrecovered) + recovered, err := s.checkSectors(ctx, unrecovered, tsk) if err != nil { return nil, nil, xerrors.Errorf("checking unrecovered sectors: %w", err) } @@ -320,7 +324,7 @@ func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uin return recoveries, sm, nil } -func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, dlIdx uint64, partitions []api.Partition) ([]miner.FaultDeclaration, *types.SignedMessage, error) { +func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, dlIdx uint64, partitions []api.Partition, tsk types.TipSetKey) ([]miner.FaultDeclaration, *types.SignedMessage, error) { ctx, span := trace.StartSpan(ctx, "storage.checkNextFaults") defer span.End() @@ -335,7 +339,7 @@ func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, dlIdx uint64, return nil, nil, xerrors.Errorf("determining non faulty sectors: %w", err) } - good, err := s.checkSectors(ctx, nonFaulty) + good, err := s.checkSectors(ctx, nonFaulty, tsk) if err != nil { return nil, nil, xerrors.Errorf("checking sectors: %w", err) } @@ -438,7 +442,7 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty } ) - if recoveries, sigmsg, err = s.checkNextRecoveries(context.TODO(), declDeadline, partitions); err != nil { + if recoveries, sigmsg, err = s.checkNextRecoveries(context.TODO(), declDeadline, partitions, ts.Key()); err != nil { // TODO: This is potentially quite bad, but not even trying to post when this fails is objectively worse log.Errorf("checking sector recoveries: %v", err) } @@ -457,7 +461,7 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty return // FORK: declaring faults after ignition upgrade makes no sense } - if faults, sigmsg, err = s.checkNextFaults(context.TODO(), declDeadline, partitions); err != nil { + if faults, sigmsg, err = s.checkNextFaults(context.TODO(), declDeadline, partitions, ts.Key()); err != nil { // TODO: This is also potentially really bad, but we try to post anyways log.Errorf("checking sector faults: %v", err) } @@ -527,7 +531,7 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty return nil, xerrors.Errorf("adding recoveries to set of sectors to prove: %w", err) } - good, err := s.checkSectors(ctx, toProve) + good, err := s.checkSectors(ctx, toProve, ts.Key()) if err != nil { return nil, xerrors.Errorf("checking sectors to skip: %w", err) } diff --git a/storage/wdpost_run_test.go b/storage/wdpost_run_test.go index a76483a5f..436141295 100644 --- a/storage/wdpost_run_test.go +++ b/storage/wdpost_run_test.go @@ -11,6 +11,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" @@ -116,7 +117,7 @@ func (m *mockProver) GenerateWindowPoSt(ctx context.Context, aid abi.ActorID, si type mockFaultTracker struct { } -func (m mockFaultTracker) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []abi.SectorID) ([]abi.SectorID, error) { +func (m mockFaultTracker) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef) ([]abi.SectorID, error) { // Returns "bad" sectors so just return nil meaning all sectors are good return nil, nil }