diff --git a/.circleci/config.yml b/.circleci/config.yml index acd447f69..0364e99fa 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -34,7 +34,7 @@ commands: condition: << parameters.linux >> steps: - run: sudo apt-get update - - run: sudo apt-get install ocl-icd-opencl-dev + - run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev - run: git submodule sync - run: git submodule update --init download-params: @@ -188,6 +188,14 @@ jobs: command: | bash <(curl -s https://codecov.io/bash) + test-chain: + <<: *test + test-node: + <<: *test + test-storage: + <<: *test + test-cli: + <<: *test test-short: <<: *test test-window-post: @@ -428,6 +436,22 @@ workflows: - test: codecov-upload: true test-suite-name: full + - test-chain: + codecov-upload: true + test-suite-name: chain + packages: "./chain/..." + - test-node: + codecov-upload: true + test-suite-name: node + packages: "./node/..." + - test-storage: + codecov-upload: true + test-suite-name: storage + packages: "./storage/... ./extern/..." + - test-cli: + codecov-upload: true + test-suite-name: cli + packages: "./cli/... ./cmd/... ./api/..." - test-window-post: go-test-flags: "-run=TestWindowedPost" winpost-test: "1" diff --git a/.codecov.yml b/.codecov.yml index 1551f2276..a53081be7 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -1,5 +1,9 @@ comment: off ignore: - - "cbor_gen.go" + - "**/cbor_gen.go" + - "api/test/**/*" + - "api/test/*" + - "gen/**/*" + - "gen/*" github_checks: annotations: false diff --git a/CHANGELOG.md b/CHANGELOG.md index d397762a6..88a30c91d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,162 @@ # Lotus changelog +# 1.1.2 / 2020-10-24 + +This is a patch release of Lotus that builds on the fixes involving worker keys that was introduced in v1.1.1. Miners and node operators should update to this release as soon as possible in order to ensure their blocks are propagated and validated. + +## Changes + +- Handle worker key changes correctly in runtime (https://github.com/filecoin-project/lotus/pull/4579) + +# 1.1.1 / 2020-10-24 + +This is a patch release of Lotus that addresses some issues caused by when miners change their worker keys. Miners and node operators should update to this release as soon as possible, especially any miner who has changed their worker key recently. + +## Changes + +- Miner finder for interactive client deal CLI (https://github.com/filecoin-project/lotus/pull/4504) +- Disable blockstore bloom filter (https://github.com/filecoin-project/lotus/pull/4512) +- Add api for getting status given a code (https://github.com/filecoin-project/lotus/pull/4210) +- add batch api for push messages (https://github.com/filecoin-project/lotus/pull/4236) +- add measure datastore wrapper around bench chain datastore (https://github.com/filecoin-project/lotus/pull/4302) +- Look at block base fee for PCR (https://github.com/filecoin-project/lotus/pull/4313) +- Add a shed util to determine % of power that has won a block (https://github.com/filecoin-project/lotus/pull/4318) +- Shed/borked cmd (https://github.com/filecoin-project/lotus/pull/4339) +- optimize mining code (https://github.com/filecoin-project/lotus/pull/4379) +- heaviestTipSet reurning nil is a ok (https://github.com/filecoin-project/lotus/pull/4523) +- Remove most v0 actor imports (https://github.com/filecoin-project/lotus/pull/4383) +- Small chain export optimization (https://github.com/filecoin-project/lotus/pull/4536) +- Add block list to pcr (https://github.com/filecoin-project/lotus/pull/4314) +- Fix circ supply default in conformance (https://github.com/filecoin-project/lotus/pull/4449) +- miner: fix init --create-worker-key (https://github.com/filecoin-project/lotus/pull/4475) +- make push and addLocal atomic (https://github.com/filecoin-project/lotus/pull/4500) +- add some methods that oni needs (https://github.com/filecoin-project/lotus/pull/4501) +- MinerGetBaseInfo: if miner is not found in lookback, check current (https://github.com/filecoin-project/lotus/pull/4508) +- Delete wallet from local wallet cache (https://github.com/filecoin-project/lotus/pull/4526) +- Fix lotus-shed ledger list (https://github.com/filecoin-project/lotus/pull/4521) +- Manage sectors by size instead of proof type (https://github.com/filecoin-project/lotus/pull/4511) +- Feat/api request metrics wrapper (https://github.com/filecoin-project/lotus/pull/4516) +- Fix chain sync stopping to sync (https://github.com/filecoin-project/lotus/pull/4541) +- Use the correct lookback for the worker key when creating blocks (https://github.com/filecoin-project/lotus/pull/4539) +- Cleanup test initialization and always validate VRFs in tests (https://github.com/filecoin-project/lotus/pull/4538) +- Add a market WithdrawBalance CLI (https://github.com/filecoin-project/lotus/pull/4524) +- wallet list: Add market balance and ID address flags (https://github.com/filecoin-project/lotus/pull/4555) +- tvx simulate command; tvx extract --ignore-sanity-checks (https://github.com/filecoin-project/lotus/pull/4554) +- lotus-lite: CLI tests for `lotus client` commands (https://github.com/filecoin-project/lotus/pull/4497) +- lite-mode - market storage and retrieval clients (https://github.com/filecoin-project/lotus/pull/4263) +- Chore: update drand to v1.2.0 (https://github.com/filecoin-project/lotus/pull/4420) +- Fix random test failures (https://github.com/filecoin-project/lotus/pull/4559) +- Fix flaky TestTimedBSSimple (https://github.com/filecoin-project/lotus/pull/4561) +- Make wallet market withdraw usable with miner addresses (https://github.com/filecoin-project/lotus/pull/4556) +- Fix flaky TestChainExportImportFull (https://github.com/filecoin-project/lotus/pull/4564) +- Use older randomness for the PoSt commit on specs-actors version 2 (https://github.com/filecoin-project/lotus/pull/4563) +- shed: Commad to decode messages (https://github.com/filecoin-project/lotus/pull/4565) +- Fetch worker key from correct block on sync (https://github.com/filecoin-project/lotus/pull/4573) + +# 1.1.0 / 2020-10-20 + +This is a mandatory release that introduces the first post-liftoff upgrade to the Filecoin network. The changes that break consensus are an upgrade to specs-actors v2.2.0 at epoch 170000. + +## Changes + +- Introduce Network version 6 (https://github.com/filecoin-project/lotus/pull/4506) +- Update markets v1.0.0 (https://github.com/filecoin-project/lotus/pull/4505) +- Add some extra logging to try and debug sync issues (https://github.com/filecoin-project/lotus/pull/4486) +- Circle: Run tests for some subsystems separately (https://github.com/filecoin-project/lotus/pull/4496) +- Add a terminate sectors command to lotus-shed (https://github.com/filecoin-project/lotus/pull/4507) +- Add a comment to BlockMessages to address #4446 (https://github.com/filecoin-project/lotus/pull/4491) + +# 1.0.0 / 2020-10-19 + +It's 1.0.0! This is an optional release of Lotus that introduces some UX improvements to the 0.10 series. + +This very small release is largely cosmetic, and intended to flag the code that the Filecoin mainnet was launched with. + +## API changes + +- `StateMsgGasCost` has been removed. The equivalent information can be gained by calling `StateReplay`. +- A `GasCost` field has been added to the `InvocResult` type, meaning detailed gas costs will be returned when calling `StateReplay`, `StateCompute`, and `StateCall`. +- The behaviour of `StateReplay` in response to an empty tipset key has been changed. Instead of simply using the heaviest tipset (which is almost guaranteed to be an unsuccessful replay), we search now search the chain for the tipset that included the message, and replay the message in that tipset (we fail if no such tipset is found). + +## Changes + +- Increase code coverage! (https://github.com/filecoin-project/lotus/pull/4410) +- Mpool: Don't block node startup loading messages (https://github.com/filecoin-project/lotus/pull/4411) +- Improve the UX of multisig approves (https://github.com/filecoin-project/lotus/pull/4398) +- Use build.BlockDelaySecs for deal start buffer (https://github.com/filecoin-project/lotus/pull/4415) +- Conformance: support multiple protocol versions (https://github.com/filecoin-project/lotus/pull/4393) +- Ensure msig inspect cli works with lotus-lite (https://github.com/filecoin-project/lotus/pull/4421) +- Add command to (slowly) prune lotus chain datastore (https://github.com/filecoin-project/lotus/pull/3876) +- Add WalletVerify to lotus-gateway (https://github.com/filecoin-project/lotus/pull/4373) +- Improve StateMsg APIs (https://github.com/filecoin-project/lotus/pull/4429) +- Add endpoints needed by spacegap (https://github.com/filecoin-project/lotus/pull/4426) +- Make audit balances capable of printing robust addresses (https://github.com/filecoin-project/lotus/pull/4423) +- Custom filters for retrieval deals (https://github.com/filecoin-project/lotus/pull/4424) +- Fix message list api (https://github.com/filecoin-project/lotus/pull/4422) +- Replace bootstrap peers (https://github.com/filecoin-project/lotus/pull/4447) +- Don't overwrite previously-configured maxPieceSize for a persisted ask (https://github.com/filecoin-project/lotus/pull/4480) +- State: optimize state snapshot address cache (https://github.com/filecoin-project/lotus/pull/4481) + +# 0.10.2 / 2020-10-14 + +This is an optional release of Lotus that updates markets to 0.9.1, which fixes an issue affecting deals that were mid-transfer when the node was upgraded to 0.9.0. This release also includes some tweaks to default gas values and minor performance improvements. + +## Changes + +- Use updated stored ask API (https://github.com/filecoin-project/lotus/pull/4384) +- tvx: trace puts to blockstore for inclusion in CAR. (https://github.com/filecoin-project/lotus/pull/4278) +- Add propose remove (https://github.com/filecoin-project/lotus/pull/4311) +- Update to 0.9.1 bugfix release (https://github.com/filecoin-project/lotus/pull/4402) +- Update drand endpoints (https://github.com/filecoin-project/lotus/pull/4125) +- fix: return true when deadlines changed (https://github.com/filecoin-project/lotus/pull/4403) +- sync wait --watch (https://github.com/filecoin-project/lotus/pull/4396) +- reduce garbage in blockstore (https://github.com/filecoin-project/lotus/pull/4406) +- give the TimeCacheBS tests a bit more time (https://github.com/filecoin-project/lotus/pull/4407) +- Improve gas defaults (https://github.com/filecoin-project/lotus/pull/4408) +- Change default gas premium to for 10 block inclusion (https://github.com/filecoin-project/lotus/pull/4222) + +# 0.10.1 / 2020-10-14 + +This is an optional release of Lotus that updates markets to 0.9.0, which adds the ability to restart data transfers. This release also introduces Ledger support, and various UX improvements. + +## Changes + +- Test the tape upgrade (https://github.com/filecoin-project/lotus/pull/4328) +- Adding in Ledger support (https://github.com/filecoin-project/lotus/pull/4290) +- Improve the UX for lotus-miner sealing workers (https://github.com/filecoin-project/lotus/pull/4329) +- Add a CLI tool for miner's to repay debt (https://github.com/filecoin-project/lotus/pull/4319) +- Rename params_testnet to params_mainnet (https://github.com/filecoin-project/lotus/pull/4336) +- Use seal-duration in calculating the earliest StartEpoch (https://github.com/filecoin-project/lotus/pull/4337) +- Reject deals that are > 7 days in the future in the BasicDealFilter (https://github.com/filecoin-project/lotus/pull/4173) +- Add an API endpoint to calculate the exact circulating supply (https://github.com/filecoin-project/lotus/pull/4148) +- lotus-pcr: ignore all other market messages (https://github.com/filecoin-project/lotus/pull/4341) +- Add message CID to InvocResult (https://github.com/filecoin-project/lotus/pull/4382) +- types: Add CID fields to messages in json marshalers (https://github.com/filecoin-project/lotus/pull/4338) +- fix(sync state): set state height to actual tipset height (https://github.com/filecoin-project/lotus/pull/4347) +- Fix off by one tipset in searchBackForMsg (https://github.com/filecoin-project/lotus/pull/4367) +- fix a panic on startup when we fail to load the tipset (https://github.com/filecoin-project/lotus/pull/4376) +- Avoid having the same message CID show up in execution traces (https://github.com/filecoin-project/lotus/pull/4350) +- feat(markets): update markets 0.9.0 and add data transfer restart (https://github.com/filecoin-project/lotus/pull/4363) + +# 0.10.0 / 2020-10-12 + +This is a consensus-breaking hotfix that addresses an issue in specs-actors v2.0.3 that made it impossible to pledge new 32GiB sectors. The change in Lotus is to update to actors v2.1.0, behind the new network version 5. + +## Changes + +- make pledge test pass with the race detector (https://github.com/filecoin-project/lotus/pull/4291) +- fix a race in tipset cache usage (https://github.com/filecoin-project/lotus/pull/4282) +- add an api for removing multisig signers (https://github.com/filecoin-project/lotus/pull/4274) +- cli: Don't output errors to stdout (https://github.com/filecoin-project/lotus/pull/4298) +- Fix panic in wallet export when key is not found (https://github.com/filecoin-project/lotus/pull/4299) +- Dump the block validation cache whenever we perform an import (https://github.com/filecoin-project/lotus/pull/4287) +- Fix two races (https://github.com/filecoin-project/lotus/pull/4301) +- sync unmark-bad --all (https://github.com/filecoin-project/lotus/pull/4296) +- decode parameters for multisig transactions in inspect (https://github.com/filecoin-project/lotus/pull/4312) +- Chain is love (https://github.com/filecoin-project/lotus/pull/4321) +- lotus-stats: optmize getting miner power (https://github.com/filecoin-project/lotus/pull/4315) +- implement tape upgrade (https://github.com/filecoin-project/lotus/pull/4322) + # 0.9.1 / 2020-10-10 This release fixes an issue which may cause the actors v2 migration to compute the state incorrectly when more than one migration is running in parallel. diff --git a/Makefile b/Makefile index 79f7fa81e..093f62ef6 100644 --- a/Makefile +++ b/Makefile @@ -133,18 +133,30 @@ benchmarks: lotus-pond: 2k go build -o lotus-pond ./lotuspond - (cd lotuspond/front && npm i && CI=false npm run build) .PHONY: lotus-pond BINS+=lotus-pond +lotus-pond-front: + (cd lotuspond/front && npm i && CI=false npm run build) +.PHONY: lotus-pond-front + +lotus-pond-app: lotus-pond-front lotus-pond +.PHONY: lotus-pond-app + lotus-townhall: rm -f lotus-townhall go build -o lotus-townhall ./cmd/lotus-townhall - (cd ./cmd/lotus-townhall/townhall && npm i && npm run build) - go run github.com/GeertJohan/go.rice/rice append --exec lotus-townhall -i ./cmd/lotus-townhall -i ./build .PHONY: lotus-townhall BINS+=lotus-townhall +lotus-townhall-front: + (cd ./cmd/lotus-townhall/townhall && npm i && npm run build) +.PHONY: lotus-townhall-front + +lotus-townhall-app: lotus-touch lotus-townhall-front + go run github.com/GeertJohan/go.rice/rice append --exec lotus-townhall -i ./cmd/lotus-townhall -i ./build +.PHONY: lotus-townhall-app + lotus-fountain: rm -f lotus-fountain go build -o lotus-fountain ./cmd/lotus-fountain diff --git a/SECURITY.md b/SECURITY.md index ecb600deb..592206bc5 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -2,9 +2,7 @@ ## Reporting a Vulnerability -For *critical* bugs, please send an email to security@filecoin.org. - -The bug reporting process differs between bugs that are critical and may crash the network, and others that are unlikely to cause problems if malicious parties know about it. For non-critical bugs, please simply file a GitHub [issue](https://github.com/filecoin-project/lotus/issues/new?template=bug_report.md). +For *critical* bugs, please consult our Security Policy and Responsible Disclosure Program information at https://github.com/filecoin-project/community/blob/master/SECURITY.md Please try to provide a clear description of any bugs reported, along with how to reproduce the bug if possible. More detailed bug reports (especially those with a PoC included) will help us move forward much faster. Additionally, please avoid reporting bugs that already have open issues. Take a moment to search the issue list of the related GitHub repositories before writing up a new report. @@ -20,10 +18,6 @@ Here are some examples of bugs we would consider 'critical': This is not an exhaustive list, but should provide some idea of what we consider 'critical'. -## Supported Versions +## Reporting a non security bug -* TODO: This should be defined and set up by Mainnet launch. - -| Version | Supported | -| ------- | ------------------ | -| Testnet | :white_check_mark: | +For non-critical bugs, please simply file a GitHub [issue](https://github.com/filecoin-project/lotus/issues/new?template=bug_report.md). diff --git a/api/api_common.go b/api/api_common.go index f8fcbe8c5..5b036d1f6 100644 --- a/api/api_common.go +++ b/api/api_common.go @@ -4,6 +4,8 @@ import ( "context" "fmt" + "github.com/google/uuid" + "github.com/filecoin-project/go-jsonrpc/auth" metrics "github.com/libp2p/go-libp2p-core/metrics" "github.com/libp2p/go-libp2p-core/network" @@ -58,6 +60,9 @@ type Common interface { // trigger graceful shutdown Shutdown(context.Context) error + // Session returns a random UUID of api provider session + Session(context.Context) (uuid.UUID, error) + Closing(context.Context) (<-chan struct{}, error) } diff --git a/api/api_full.go b/api/api_full.go index a2fe94ee9..0b9fb71d2 100644 --- a/api/api_full.go +++ b/api/api_full.go @@ -5,6 +5,7 @@ import ( "fmt" "time" + datatransfer "github.com/filecoin-project/go-data-transfer" "github.com/filecoin-project/go-state-types/network" "github.com/ipfs/go-cid" @@ -206,6 +207,15 @@ type FullNode interface { // based on current chain conditions MpoolPushMessage(ctx context.Context, msg *types.Message, spec *MessageSendSpec) (*types.SignedMessage, error) + // MpoolBatchPush batch pushes a signed message to mempool. + MpoolBatchPush(context.Context, []*types.SignedMessage) ([]cid.Cid, error) + + // MpoolBatchPushUntrusted batch pushes a signed message to mempool from untrusted sources. + MpoolBatchPushUntrusted(context.Context, []*types.SignedMessage) ([]cid.Cid, error) + + // MpoolBatchPushMessage batch pushes a unsigned message to mempool. + MpoolBatchPushMessage(context.Context, []*types.Message, *MessageSendSpec) ([]*types.SignedMessage, error) + // MpoolGetNonce gets next nonce for the specified sender. // Note that this method may not be atomic. Use MpoolPushMessage instead. MpoolGetNonce(context.Context, address.Address) (uint64, error) @@ -229,7 +239,9 @@ type FullNode interface { // MethodGroup: Wallet // WalletNew creates a new address in the wallet with the given sigType. - WalletNew(context.Context, crypto.SigType) (address.Address, error) + // Available key types: bls, secp256k1, secp256k1-ledger + // Support for numerical types: 1 - secp256k1, 2 - BLS is deprecated + WalletNew(context.Context, types.KeyType) (address.Address, error) // WalletHas indicates whether the given address is in the wallet. WalletHas(context.Context, address.Address) (bool, error) // WalletList lists all the addresses in the wallet. @@ -274,6 +286,8 @@ type FullNode interface { ClientListDeals(ctx context.Context) ([]DealInfo, error) // ClientGetDealUpdates returns the status of updated deals ClientGetDealUpdates(ctx context.Context) (<-chan DealInfo, error) + // ClientGetDealStatus returns status given a code + ClientGetDealStatus(ctx context.Context, statusCode uint64) (string, error) // ClientHasLocal indicates whether a certain CID is locally stored. ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error) // ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer). @@ -287,6 +301,8 @@ type FullNode interface { ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *FileRef) (<-chan marketevents.RetrievalEvent, error) // ClientQueryAsk returns a signed StorageAsk from the specified miner. ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) + // ClientCalcCommP calculates the CommP and data size of the specified CID + ClientDealPieceCID(ctx context.Context, root cid.Cid) (DataCIDSize, error) // ClientCalcCommP calculates the CommP for a specified file ClientCalcCommP(ctx context.Context, inpath string) (*CommPRet, error) // ClientGenCar generates a CAR file for the specified file. @@ -296,6 +312,10 @@ type FullNode interface { // ClientListTransfers returns the status of all ongoing transfers of data ClientListDataTransfers(ctx context.Context) ([]DataTransferChannel, error) ClientDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error) + // ClientRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer + ClientRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error + // ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer + ClientCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error // ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel // which are stuck due to insufficient funds ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error @@ -310,19 +330,22 @@ type FullNode interface { // MethodGroup: State // The State methods are used to query, inspect, and interact with chain state. - // All methods take a TipSetKey as a parameter. The state looked up is the state at that tipset. + // Most methods take a TipSetKey as a parameter. The state looked up is the state at that tipset. // A nil TipSetKey can be provided as a param, this will cause the heaviest tipset in the chain to be used. // StateCall runs the given message and returns its result without any persisted changes. StateCall(context.Context, *types.Message, types.TipSetKey) (*InvocResult, error) - // StateReplay returns the result of executing the indicated message, assuming it was executed in the indicated tipset. + // StateReplay replays a given message, assuming it was included in a block in the specified tipset. + // If no tipset key is provided, the appropriate tipset is looked up. StateReplay(context.Context, types.TipSetKey, cid.Cid) (*InvocResult, error) // StateGetActor returns the indicated actor's nonce and balance. StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) // StateReadState returns the indicated actor's state. StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*ActorState, error) // StateListMessages looks back and returns all messages with a matching to or from address, stopping at the given height. - StateListMessages(ctx context.Context, match *types.Message, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error) + StateListMessages(ctx context.Context, match *MessageMatch, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error) + // StateDecodeParams attempts to decode the provided params, based on the recipient actor address and method number. + StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error) // StateNetworkName returns the name of the network the node is synced to StateNetworkName(context.Context) (dtypes.NetworkName, error) @@ -353,6 +376,8 @@ type FullNode interface { StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) // StateMinerAvailableBalance returns the portion of a miner's balance that can be withdrawn or spent StateMinerAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) + // StateMinerSectorAllocated checks if a sector is allocated + StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (bool, error) // StateSectorPreCommitInfo returns the PreCommit info for the specified miner's sector StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) // StateSectorGetInfo returns the on-chain info for the specified miner's sector. Returns null in case the sector info isn't found @@ -365,8 +390,6 @@ type FullNode interface { StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error) // StateSearchMsg searches for a message in the chain, and returns its receipt and the tipset where it was executed StateSearchMsg(context.Context, cid.Cid) (*MsgLookup, error) - // StateMsgGasCost searches for a message in the chain, and returns details of the messages gas costs, including the penalty and miner tip - StateMsgGasCost(context.Context, cid.Cid, types.TipSetKey) (*MsgGasCost, error) // StateWaitMsg looks back in the chain for a message. If not found, it blocks until the // message arrives on chain, and gets to the indicated confidence depth. StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64) (*MsgLookup, error) @@ -414,8 +437,12 @@ type FullNode interface { // can issue. It takes the deal size and verified status as parameters. StateDealProviderCollateralBounds(context.Context, abi.PaddedPieceSize, bool, types.TipSetKey) (DealCollateralBounds, error) - // StateCirculatingSupply returns the circulating supply of Filecoin at the given tipset - StateCirculatingSupply(context.Context, types.TipSetKey) (CirculatingSupply, error) + // StateCirculatingSupply returns the exact circulating supply of Filecoin at the given tipset. + // This is not used anywhere in the protocol itself, and is only for external consumption. + StateCirculatingSupply(context.Context, types.TipSetKey) (abi.TokenAmount, error) + // StateVMCirculatingSupplyInternal returns an approximation of the circulating supply of Filecoin at the given tipset. + // This is the value reported by the runtime interface to actors code. + StateVMCirculatingSupplyInternal(context.Context, types.TipSetKey) (CirculatingSupply, error) // StateNetworkVersion returns the network version at the given tipset StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error) @@ -583,6 +610,8 @@ type MsgGasCost struct { TotalCost abi.TokenAmount } +// BlsMessages[x].cid = Cids[x] +// SecpkMessages[y].cid = Cids[BlsMessages.length + y] type BlockMessages struct { BlsMessages []*types.Message SecpkMessages []*types.SignedMessage @@ -728,8 +757,10 @@ type RetrievalOrder struct { } type InvocResult struct { + MsgCid cid.Cid Msg *types.Message MsgRct *types.MessageReceipt + GasCost MsgGasCost ExecutionTrace types.ExecutionTrace Error string Duration time.Duration @@ -864,6 +895,12 @@ type DataSize struct { PieceSize abi.PaddedPieceSize } +type DataCIDSize struct { + PayloadSize int64 + PieceSize abi.PaddedPieceSize + PieceCID cid.Cid +} + type CommPRet struct { Root cid.Cid Size abi.UnpaddedPieceSize @@ -908,3 +945,8 @@ type MsigVesting struct { StartEpoch abi.ChainEpoch UnlockDuration abi.ChainEpoch } + +type MessageMatch struct { + To address.Address + From address.Address +} diff --git a/api/api_gateway.go b/api/api_gateway.go index 95d28887d..07fb5deb3 100644 --- a/api/api_gateway.go +++ b/api/api_gateway.go @@ -3,22 +3,42 @@ package api import ( "context" + "github.com/ipfs/go-cid" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/dline" + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" - "github.com/ipfs/go-cid" ) type GatewayAPI interface { + ChainHasObj(context.Context, cid.Cid) (bool, error) ChainHead(ctx context.Context) (*types.TipSet, error) + ChainGetBlockMessages(context.Context, cid.Cid) (*BlockMessages, error) + ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) + ChainNotify(context.Context) (<-chan []*HeadChange, error) + ChainReadObj(context.Context, cid.Cid) ([]byte, error) GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) + StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (DealCollateralBounds, error) StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error) + StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error) + StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) + StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (MarketBalance, error) + StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*MarketDeal, error) + StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error) + StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error) + StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error) + StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error) + StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*MsgLookup, error) } diff --git a/api/api_storage.go b/api/api_storage.go index 529224f6e..d003ec776 100644 --- a/api/api_storage.go +++ b/api/api_storage.go @@ -5,7 +5,10 @@ import ( "context" "time" + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/google/uuid" "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p-core/peer" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-fil-markets/piecestore" @@ -62,11 +65,12 @@ type StorageMiner interface { // WorkerConnect tells the node to connect to workers RPC WorkerConnect(context.Context, string) error - WorkerStats(context.Context) (map[uint64]storiface.WorkerStats, error) - WorkerJobs(context.Context) (map[uint64][]storiface.WorkerJob, error) + WorkerStats(context.Context) (map[uuid.UUID]storiface.WorkerStats, error) + WorkerJobs(context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) + storiface.WorkerReturn // SealingSchedDiag dumps internal sealing scheduler state - SealingSchedDiag(context.Context) (interface{}, error) + SealingSchedDiag(ctx context.Context, doSched bool) (interface{}, error) stores.SectorIndex @@ -81,6 +85,10 @@ type StorageMiner interface { MarketGetRetrievalAsk(ctx context.Context) (*retrievalmarket.Ask, error) MarketListDataTransfers(ctx context.Context) ([]DataTransferChannel, error) MarketDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error) + // MinerRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer + MarketRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error + // ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer + MarketCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error DealsList(ctx context.Context) ([]MarketDeal, error) diff --git a/api/api_wallet.go b/api/api_wallet.go index 1213ffc1d..88ad8f43a 100644 --- a/api/api_wallet.go +++ b/api/api_wallet.go @@ -35,7 +35,7 @@ type MsgMeta struct { } type WalletAPI interface { - WalletNew(context.Context, crypto.SigType) (address.Address, error) + WalletNew(context.Context, types.KeyType) (address.Address, error) WalletHas(context.Context, address.Address) (bool, error) WalletList(context.Context) ([]address.Address, error) diff --git a/api/api_worker.go b/api/api_worker.go index ac1446fdd..805b23bc1 100644 --- a/api/api_worker.go +++ b/api/api_worker.go @@ -2,15 +2,13 @@ package api import ( "context" - "io" - "github.com/ipfs/go-cid" + "github.com/google/uuid" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" "github.com/filecoin-project/lotus/extern/sector-storage/stores" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" - "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/build" ) @@ -23,18 +21,26 @@ type WorkerAPI interface { Paths(context.Context) ([]stores.StoragePath, error) Info(context.Context) (storiface.WorkerInfo, error) - AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) + storiface.WorkerCalls - storage.Sealer - - MoveStorage(ctx context.Context, sector abi.SectorID, types stores.SectorFileType) error - - UnsealPiece(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error - ReadPiece(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (bool, error) + // Storage / Other + Remove(ctx context.Context, sector abi.SectorID) error StorageAddLocal(ctx context.Context, path string) error - Fetch(context.Context, abi.SectorID, stores.SectorFileType, stores.PathType, stores.AcquireMode) error + // SetEnabled marks the worker as enabled/disabled. Not that this setting + // may take a few seconds to propagate to task scheduler + SetEnabled(ctx context.Context, enabled bool) error - Closing(context.Context) (<-chan struct{}, error) + Enabled(ctx context.Context) (bool, error) + + // WaitQuiet blocks until there are no tasks running + WaitQuiet(ctx context.Context) error + + // returns a random UUID of worker session, generated randomly when worker + // process starts + ProcessSession(context.Context) (uuid.UUID, error) + + // Like ProcessSession, but returns an error when worker is disabled + Session(context.Context) (uuid.UUID, error) } diff --git a/api/apistruct/struct.go b/api/apistruct/struct.go index 22d50e726..244d309a0 100644 --- a/api/apistruct/struct.go +++ b/api/apistruct/struct.go @@ -5,8 +5,7 @@ import ( "io" "time" - stnetwork "github.com/filecoin-project/go-state-types/network" - + "github.com/google/uuid" "github.com/ipfs/go-cid" metrics "github.com/libp2p/go-libp2p-core/metrics" "github.com/libp2p/go-libp2p-core/network" @@ -15,6 +14,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" + datatransfer "github.com/filecoin-project/go-data-transfer" "github.com/filecoin-project/go-fil-markets/piecestore" "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/storagemarket" @@ -24,6 +24,7 @@ import ( "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/dline" + stnetwork "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" "github.com/filecoin-project/lotus/extern/sector-storage/stores" @@ -67,6 +68,7 @@ type CommonStruct struct { LogSetLevel func(context.Context, string, string) error `perm:"write"` Shutdown func(context.Context) error `perm:"admin"` + Session func(context.Context) (uuid.UUID, error) `perm:"read"` Closing func(context.Context) (<-chan struct{}, error) `perm:"read"` } } @@ -130,10 +132,14 @@ type FullNodeStruct struct { MpoolGetNonce func(context.Context, address.Address) (uint64, error) `perm:"read"` MpoolSub func(context.Context) (<-chan api.MpoolUpdate, error) `perm:"read"` + MpoolBatchPush func(ctx context.Context, smsgs []*types.SignedMessage) ([]cid.Cid, error) `perm:"write"` + MpoolBatchPushUntrusted func(ctx context.Context, smsgs []*types.SignedMessage) ([]cid.Cid, error) `perm:"write"` + MpoolBatchPushMessage func(ctx context.Context, msgs []*types.Message, spec *api.MessageSendSpec) ([]*types.SignedMessage, error) `perm:"sign"` + MinerGetBaseInfo func(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*api.MiningBaseInfo, error) `perm:"read"` MinerCreateBlock func(context.Context, *api.BlockTemplate) (*types.BlockMsg, error) `perm:"write"` - WalletNew func(context.Context, crypto.SigType) (address.Address, error) `perm:"write"` + WalletNew func(context.Context, types.KeyType) (address.Address, error) `perm:"write"` WalletHas func(context.Context, address.Address) (bool, error) `perm:"write"` WalletList func(context.Context) ([]address.Address, error) `perm:"write"` WalletBalance func(context.Context, address.Address) (types.BigInt, error) `perm:"read"` @@ -155,16 +161,20 @@ type FullNodeStruct struct { ClientMinerQueryOffer func(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (api.QueryOffer, error) `perm:"read"` ClientStartDeal func(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) `perm:"admin"` ClientGetDealInfo func(context.Context, cid.Cid) (*api.DealInfo, error) `perm:"read"` + ClientGetDealStatus func(context.Context, uint64) (string, error) `perm:"read"` ClientListDeals func(ctx context.Context) ([]api.DealInfo, error) `perm:"write"` ClientGetDealUpdates func(ctx context.Context) (<-chan api.DealInfo, error) `perm:"read"` ClientRetrieve func(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error `perm:"admin"` ClientRetrieveWithEvents func(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"` ClientQueryAsk func(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) `perm:"read"` + ClientDealPieceCID func(ctx context.Context, root cid.Cid) (api.DataCIDSize, error) `perm:"read"` ClientCalcCommP func(ctx context.Context, inpath string) (*api.CommPRet, error) `perm:"read"` ClientGenCar func(ctx context.Context, ref api.FileRef, outpath string) error `perm:"write"` ClientDealSize func(ctx context.Context, root cid.Cid) (api.DataSize, error) `perm:"read"` ClientListDataTransfers func(ctx context.Context) ([]api.DataTransferChannel, error) `perm:"write"` ClientDataTransferUpdates func(ctx context.Context) (<-chan api.DataTransferChannel, error) `perm:"write"` + ClientRestartDataTransfer func(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error `perm:"write"` + ClientCancelDataTransfer func(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error `perm:"write"` ClientRetrieveTryRestartInsufficientFunds func(ctx context.Context, paymentChannel address.Address) error `perm:"write"` StateNetworkName func(context.Context) (dtypes.NetworkName, error) `perm:"read"` @@ -181,6 +191,7 @@ type FullNodeStruct struct { StateMinerPreCommitDepositForPower func(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) `perm:"read"` StateMinerInitialPledgeCollateral func(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) `perm:"read"` StateMinerAvailableBalance func(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) `perm:"read"` + StateMinerSectorAllocated func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (bool, error) `perm:"read"` StateSectorPreCommitInfo func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) `perm:"read"` StateSectorGetInfo func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error) `perm:"read"` StateSectorExpiration func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorExpiration, error) `perm:"read"` @@ -189,7 +200,6 @@ type FullNodeStruct struct { StateReplay func(context.Context, types.TipSetKey, cid.Cid) (*api.InvocResult, error) `perm:"read"` StateGetActor func(context.Context, address.Address, types.TipSetKey) (*types.Actor, error) `perm:"read"` StateReadState func(context.Context, address.Address, types.TipSetKey) (*api.ActorState, error) `perm:"read"` - StateMsgGasCost func(context.Context, cid.Cid, types.TipSetKey) (*api.MsgGasCost, error) `perm:"read"` StateWaitMsg func(ctx context.Context, cid cid.Cid, confidence uint64) (*api.MsgLookup, error) `perm:"read"` StateWaitMsgLimited func(context.Context, cid.Cid, uint64, abi.ChainEpoch) (*api.MsgLookup, error) `perm:"read"` StateSearchMsg func(context.Context, cid.Cid) (*api.MsgLookup, error) `perm:"read"` @@ -204,13 +214,15 @@ type FullNodeStruct struct { StateChangedActors func(context.Context, cid.Cid, cid.Cid) (map[string]types.Actor, error) `perm:"read"` StateGetReceipt func(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error) `perm:"read"` StateMinerSectorCount func(context.Context, address.Address, types.TipSetKey) (api.MinerSectors, error) `perm:"read"` - StateListMessages func(ctx context.Context, match *types.Message, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error) `perm:"read"` + StateListMessages func(ctx context.Context, match *api.MessageMatch, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error) `perm:"read"` + StateDecodeParams func(context.Context, address.Address, abi.MethodNum, []byte, types.TipSetKey) (interface{}, error) `perm:"read"` StateCompute func(context.Context, abi.ChainEpoch, []*types.Message, types.TipSetKey) (*api.ComputeStateOutput, error) `perm:"read"` StateVerifierStatus func(context.Context, address.Address, types.TipSetKey) (*abi.StoragePower, error) `perm:"read"` StateVerifiedClientStatus func(context.Context, address.Address, types.TipSetKey) (*abi.StoragePower, error) `perm:"read"` StateVerifiedRegistryRootKey func(ctx context.Context, tsk types.TipSetKey) (address.Address, error) `perm:"read"` StateDealProviderCollateralBounds func(context.Context, abi.PaddedPieceSize, bool, types.TipSetKey) (api.DealCollateralBounds, error) `perm:"read"` - StateCirculatingSupply func(context.Context, types.TipSetKey) (api.CirculatingSupply, error) `perm:"read"` + StateCirculatingSupply func(context.Context, types.TipSetKey) (abi.TokenAmount, error) `perm:"read"` + StateVMCirculatingSupplyInternal func(context.Context, types.TipSetKey) (api.CirculatingSupply, error) `perm:"read"` StateNetworkVersion func(context.Context, types.TipSetKey) (stnetwork.Version, error) `perm:"read"` MsigGetAvailableBalance func(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) `perm:"read"` @@ -277,6 +289,8 @@ type StorageMinerStruct struct { MarketGetRetrievalAsk func(ctx context.Context) (*retrievalmarket.Ask, error) `perm:"read"` MarketListDataTransfers func(ctx context.Context) ([]api.DataTransferChannel, error) `perm:"write"` MarketDataTransferUpdates func(ctx context.Context) (<-chan api.DataTransferChannel, error) `perm:"write"` + MarketRestartDataTransfer func(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error `perm:"read"` + MarketCancelDataTransfer func(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error `perm:"read"` PledgeSector func(context.Context) error `perm:"write"` @@ -292,24 +306,36 @@ type StorageMinerStruct struct { SectorRemove func(context.Context, abi.SectorNumber) error `perm:"admin"` SectorMarkForUpgrade func(ctx context.Context, id abi.SectorNumber) error `perm:"admin"` - WorkerConnect func(context.Context, string) error `perm:"admin"` // TODO: worker perm - WorkerStats func(context.Context) (map[uint64]storiface.WorkerStats, error) `perm:"admin"` - WorkerJobs func(context.Context) (map[uint64][]storiface.WorkerJob, error) `perm:"admin"` + WorkerConnect func(context.Context, string) error `perm:"admin" retry:"true"` // TODO: worker perm + WorkerStats func(context.Context) (map[uuid.UUID]storiface.WorkerStats, error) `perm:"admin"` + WorkerJobs func(context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) `perm:"admin"` - SealingSchedDiag func(context.Context) (interface{}, error) `perm:"admin"` + ReturnAddPiece func(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err string) error `perm:"admin" retry:"true"` + ReturnSealPreCommit1 func(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err string) error `perm:"admin" retry:"true"` + ReturnSealPreCommit2 func(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err string) error `perm:"admin" retry:"true"` + ReturnSealCommit1 func(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err string) error `perm:"admin" retry:"true"` + ReturnSealCommit2 func(ctx context.Context, callID storiface.CallID, proof storage.Proof, err string) error `perm:"admin" retry:"true"` + ReturnFinalizeSector func(ctx context.Context, callID storiface.CallID, err string) error `perm:"admin" retry:"true"` + ReturnReleaseUnsealed func(ctx context.Context, callID storiface.CallID, err string) error `perm:"admin" retry:"true"` + ReturnMoveStorage func(ctx context.Context, callID storiface.CallID, err string) error `perm:"admin" retry:"true"` + ReturnUnsealPiece func(ctx context.Context, callID storiface.CallID, err string) error `perm:"admin" retry:"true"` + ReturnReadPiece func(ctx context.Context, callID storiface.CallID, ok bool, err string) error `perm:"admin" retry:"true"` + ReturnFetch func(ctx context.Context, callID storiface.CallID, err string) error `perm:"admin" retry:"true"` - StorageList func(context.Context) (map[stores.ID][]stores.Decl, error) `perm:"admin"` - StorageLocal func(context.Context) (map[stores.ID]string, error) `perm:"admin"` - StorageStat func(context.Context, stores.ID) (fsutil.FsStat, error) `perm:"admin"` - StorageAttach func(context.Context, stores.StorageInfo, fsutil.FsStat) error `perm:"admin"` - StorageDeclareSector func(context.Context, stores.ID, abi.SectorID, stores.SectorFileType, bool) error `perm:"admin"` - StorageDropSector func(context.Context, stores.ID, abi.SectorID, stores.SectorFileType) error `perm:"admin"` - StorageFindSector func(context.Context, abi.SectorID, stores.SectorFileType, abi.RegisteredSealProof, bool) ([]stores.SectorStorageInfo, error) `perm:"admin"` - StorageInfo func(context.Context, stores.ID) (stores.StorageInfo, error) `perm:"admin"` - StorageBestAlloc func(ctx context.Context, allocate stores.SectorFileType, spt abi.RegisteredSealProof, sealing stores.PathType) ([]stores.StorageInfo, error) `perm:"admin"` - StorageReportHealth func(ctx context.Context, id stores.ID, report stores.HealthReport) error `perm:"admin"` - StorageLock func(ctx context.Context, sector abi.SectorID, read stores.SectorFileType, write stores.SectorFileType) error `perm:"admin"` - StorageTryLock func(ctx context.Context, sector abi.SectorID, read stores.SectorFileType, write stores.SectorFileType) (bool, error) `perm:"admin"` + SealingSchedDiag func(context.Context, bool) (interface{}, error) `perm:"admin"` + + StorageList func(context.Context) (map[stores.ID][]stores.Decl, error) `perm:"admin"` + StorageLocal func(context.Context) (map[stores.ID]string, error) `perm:"admin"` + StorageStat func(context.Context, stores.ID) (fsutil.FsStat, error) `perm:"admin"` + StorageAttach func(context.Context, stores.StorageInfo, fsutil.FsStat) error `perm:"admin"` + StorageDeclareSector func(context.Context, stores.ID, abi.SectorID, storiface.SectorFileType, bool) error `perm:"admin"` + StorageDropSector func(context.Context, stores.ID, abi.SectorID, storiface.SectorFileType) error `perm:"admin"` + StorageFindSector func(context.Context, abi.SectorID, storiface.SectorFileType, abi.SectorSize, bool) ([]stores.SectorStorageInfo, error) `perm:"admin"` + StorageInfo func(context.Context, stores.ID) (stores.StorageInfo, error) `perm:"admin"` + StorageBestAlloc func(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, sealing storiface.PathType) ([]stores.StorageInfo, error) `perm:"admin"` + StorageReportHealth func(ctx context.Context, id stores.ID, report stores.HealthReport) error `perm:"admin"` + StorageLock func(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error `perm:"admin"` + StorageTryLock func(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) `perm:"admin"` DealsImportData func(ctx context.Context, dealPropCid cid.Cid, file string) error `perm:"write"` DealsList func(ctx context.Context) ([]api.MarketDeal, error) `perm:"read"` @@ -345,46 +371,65 @@ type WorkerStruct struct { Paths func(context.Context) ([]stores.StoragePath, error) `perm:"admin"` Info func(context.Context) (storiface.WorkerInfo, error) `perm:"admin"` - AddPiece func(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) `perm:"admin"` - SealPreCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) `perm:"admin"` - SealPreCommit2 func(context.Context, abi.SectorID, storage.PreCommit1Out) (cids storage.SectorCids, err error) `perm:"admin"` - SealCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) `perm:"admin"` - SealCommit2 func(context.Context, abi.SectorID, storage.Commit1Out) (storage.Proof, error) `perm:"admin"` - FinalizeSector func(context.Context, abi.SectorID, []storage.Range) error `perm:"admin"` - ReleaseUnsealed func(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error `perm:"admin"` - Remove func(ctx context.Context, sector abi.SectorID) error `perm:"admin"` - MoveStorage func(ctx context.Context, sector abi.SectorID, types stores.SectorFileType) error `perm:"admin"` - StorageAddLocal func(ctx context.Context, path string) error `perm:"admin"` + AddPiece func(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) `perm:"admin"` + SealPreCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) `perm:"admin"` + SealPreCommit2 func(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storiface.CallID, error) `perm:"admin"` + SealCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) `perm:"admin"` + SealCommit2 func(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storiface.CallID, error) `perm:"admin"` + FinalizeSector func(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (storiface.CallID, error) `perm:"admin"` + ReleaseUnsealed func(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) (storiface.CallID, error) `perm:"admin"` + MoveStorage func(ctx context.Context, sector abi.SectorID, types storiface.SectorFileType) (storiface.CallID, error) `perm:"admin"` + UnsealPiece func(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (storiface.CallID, error) `perm:"admin"` + ReadPiece func(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (storiface.CallID, error) `perm:"admin"` + Fetch func(context.Context, abi.SectorID, storiface.SectorFileType, storiface.PathType, storiface.AcquireMode) (storiface.CallID, error) `perm:"admin"` - UnsealPiece func(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error `perm:"admin"` - ReadPiece func(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (bool, error) `perm:"admin"` + Remove func(ctx context.Context, sector abi.SectorID) error `perm:"admin"` + StorageAddLocal func(ctx context.Context, path string) error `perm:"admin"` - Fetch func(context.Context, abi.SectorID, stores.SectorFileType, stores.PathType, stores.AcquireMode) error `perm:"admin"` + SetEnabled func(ctx context.Context, enabled bool) error `perm:"admin"` + Enabled func(ctx context.Context) (bool, error) `perm:"admin"` - Closing func(context.Context) (<-chan struct{}, error) `perm:"admin"` + WaitQuiet func(ctx context.Context) error `perm:"admin"` + + ProcessSession func(context.Context) (uuid.UUID, error) `perm:"admin"` + Session func(context.Context) (uuid.UUID, error) `perm:"admin"` } } type GatewayStruct struct { Internal struct { - // TODO: does the gateway need perms? - ChainGetTipSet func(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) - ChainGetTipSetByHeight func(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) - ChainHead func(ctx context.Context) (*types.TipSet, error) - GasEstimateMessageGas func(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) - MpoolPush func(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) - MsigGetAvailableBalance func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) - MsigGetVested func(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) - StateAccountKey func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) - StateGetActor func(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error) - StateLookupID func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) - StateWaitMsg func(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error) + ChainGetBlockMessages func(ctx context.Context, c cid.Cid) (*api.BlockMessages, error) + ChainGetMessage func(ctx context.Context, mc cid.Cid) (*types.Message, error) + ChainGetTipSet func(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) + ChainGetTipSetByHeight func(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) + ChainHasObj func(context.Context, cid.Cid) (bool, error) + ChainHead func(ctx context.Context) (*types.TipSet, error) + ChainNotify func(ctx context.Context) (<-chan []*api.HeadChange, error) + ChainReadObj func(context.Context, cid.Cid) ([]byte, error) + GasEstimateMessageGas func(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) + MpoolPush func(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) + MsigGetAvailableBalance func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) + MsigGetVested func(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) + StateAccountKey func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) + StateDealProviderCollateralBounds func(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) + StateGetActor func(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error) + StateGetReceipt func(ctx context.Context, c cid.Cid, tsk types.TipSetKey) (*types.MessageReceipt, error) + StateLookupID func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) + StateListMiners func(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) + StateMinerInfo func(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error) + StateMinerProvingDeadline func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error) + StateMinerPower func(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) + StateMarketBalance func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error) + StateMarketStorageDeal func(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error) + StateNetworkVersion func(ctx context.Context, tsk types.TipSetKey) (stnetwork.Version, error) + StateVerifiedClientStatus func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) + StateWaitMsg func(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error) } } type WalletStruct struct { Internal struct { - WalletNew func(context.Context, crypto.SigType) (address.Address, error) `perm:"write"` + WalletNew func(context.Context, types.KeyType) (address.Address, error) `perm:"write"` WalletHas func(context.Context, address.Address) (bool, error) `perm:"write"` WalletList func(context.Context) ([]address.Address, error) `perm:"write"` WalletSign func(context.Context, address.Address, []byte, api.MsgMeta) (*crypto.Signature, error) `perm:"sign"` @@ -474,6 +519,10 @@ func (c *CommonStruct) Shutdown(ctx context.Context) error { return c.Internal.Shutdown(ctx) } +func (c *CommonStruct) Session(ctx context.Context) (uuid.UUID, error) { + return c.Internal.Session(ctx) +} + func (c *CommonStruct) Closing(ctx context.Context) (<-chan struct{}, error) { return c.Internal.Closing(ctx) } @@ -512,6 +561,10 @@ func (c *FullNodeStruct) ClientGetDealInfo(ctx context.Context, deal cid.Cid) (* return c.Internal.ClientGetDealInfo(ctx, deal) } +func (c *FullNodeStruct) ClientGetDealStatus(ctx context.Context, statusCode uint64) (string, error) { + return c.Internal.ClientGetDealStatus(ctx, statusCode) +} + func (c *FullNodeStruct) ClientListDeals(ctx context.Context) ([]api.DealInfo, error) { return c.Internal.ClientListDeals(ctx) } @@ -531,6 +584,11 @@ func (c *FullNodeStruct) ClientRetrieveWithEvents(ctx context.Context, order api func (c *FullNodeStruct) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) { return c.Internal.ClientQueryAsk(ctx, p, miner) } + +func (c *FullNodeStruct) ClientDealPieceCID(ctx context.Context, root cid.Cid) (api.DataCIDSize, error) { + return c.Internal.ClientDealPieceCID(ctx, root) +} + func (c *FullNodeStruct) ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet, error) { return c.Internal.ClientCalcCommP(ctx, inpath) } @@ -551,6 +609,14 @@ func (c *FullNodeStruct) ClientDataTransferUpdates(ctx context.Context) (<-chan return c.Internal.ClientDataTransferUpdates(ctx) } +func (c *FullNodeStruct) ClientRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error { + return c.Internal.ClientRestartDataTransfer(ctx, transferID, otherPeer, isInitiator) +} + +func (c *FullNodeStruct) ClientCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error { + return c.Internal.ClientCancelDataTransfer(ctx, transferID, otherPeer, isInitiator) +} + func (c *FullNodeStruct) ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error { return c.Internal.ClientRetrieveTryRestartInsufficientFunds(ctx, paymentChannel) } @@ -603,6 +669,18 @@ func (c *FullNodeStruct) MpoolPushMessage(ctx context.Context, msg *types.Messag return c.Internal.MpoolPushMessage(ctx, msg, spec) } +func (c *FullNodeStruct) MpoolBatchPush(ctx context.Context, smsgs []*types.SignedMessage) ([]cid.Cid, error) { + return c.Internal.MpoolBatchPush(ctx, smsgs) +} + +func (c *FullNodeStruct) MpoolBatchPushUntrusted(ctx context.Context, smsgs []*types.SignedMessage) ([]cid.Cid, error) { + return c.Internal.MpoolBatchPushUntrusted(ctx, smsgs) +} + +func (c *FullNodeStruct) MpoolBatchPushMessage(ctx context.Context, msgs []*types.Message, spec *api.MessageSendSpec) ([]*types.SignedMessage, error) { + return c.Internal.MpoolBatchPushMessage(ctx, msgs, spec) +} + func (c *FullNodeStruct) MpoolSub(ctx context.Context) (<-chan api.MpoolUpdate, error) { return c.Internal.MpoolSub(ctx) } @@ -631,7 +709,7 @@ func (c *FullNodeStruct) ChainGetTipSetByHeight(ctx context.Context, h abi.Chain return c.Internal.ChainGetTipSetByHeight(ctx, h, tsk) } -func (c *FullNodeStruct) WalletNew(ctx context.Context, typ crypto.SigType) (address.Address, error) { +func (c *FullNodeStruct) WalletNew(ctx context.Context, typ types.KeyType) (address.Address, error) { return c.Internal.WalletNew(ctx, typ) } @@ -851,6 +929,10 @@ func (c *FullNodeStruct) StateMinerAvailableBalance(ctx context.Context, maddr a return c.Internal.StateMinerAvailableBalance(ctx, maddr, tsk) } +func (c *FullNodeStruct) StateMinerSectorAllocated(ctx context.Context, maddr address.Address, s abi.SectorNumber, tsk types.TipSetKey) (bool, error) { + return c.Internal.StateMinerSectorAllocated(ctx, maddr, s, tsk) +} + func (c *FullNodeStruct) StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) { return c.Internal.StateSectorPreCommitInfo(ctx, maddr, n, tsk) } @@ -883,10 +965,6 @@ func (c *FullNodeStruct) StateReadState(ctx context.Context, addr address.Addres return c.Internal.StateReadState(ctx, addr, tsk) } -func (c *FullNodeStruct) StateMsgGasCost(ctx context.Context, msgc cid.Cid, tsk types.TipSetKey) (*api.MsgGasCost, error) { - return c.Internal.StateMsgGasCost(ctx, msgc, tsk) -} - func (c *FullNodeStruct) StateWaitMsg(ctx context.Context, msgc cid.Cid, confidence uint64) (*api.MsgLookup, error) { return c.Internal.StateWaitMsg(ctx, msgc, confidence) } @@ -939,10 +1017,14 @@ func (c *FullNodeStruct) StateGetReceipt(ctx context.Context, msg cid.Cid, tsk t return c.Internal.StateGetReceipt(ctx, msg, tsk) } -func (c *FullNodeStruct) StateListMessages(ctx context.Context, match *types.Message, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error) { +func (c *FullNodeStruct) StateListMessages(ctx context.Context, match *api.MessageMatch, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error) { return c.Internal.StateListMessages(ctx, match, tsk, toht) } +func (c *FullNodeStruct) StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error) { + return c.Internal.StateDecodeParams(ctx, toAddr, method, params, tsk) +} + func (c *FullNodeStruct) StateCompute(ctx context.Context, height abi.ChainEpoch, msgs []*types.Message, tsk types.TipSetKey) (*api.ComputeStateOutput, error) { return c.Internal.StateCompute(ctx, height, msgs, tsk) } @@ -963,10 +1045,14 @@ func (c *FullNodeStruct) StateDealProviderCollateralBounds(ctx context.Context, return c.Internal.StateDealProviderCollateralBounds(ctx, size, verified, tsk) } -func (c *FullNodeStruct) StateCirculatingSupply(ctx context.Context, tsk types.TipSetKey) (api.CirculatingSupply, error) { +func (c *FullNodeStruct) StateCirculatingSupply(ctx context.Context, tsk types.TipSetKey) (abi.TokenAmount, error) { return c.Internal.StateCirculatingSupply(ctx, tsk) } +func (c *FullNodeStruct) StateVMCirculatingSupplyInternal(ctx context.Context, tsk types.TipSetKey) (api.CirculatingSupply, error) { + return c.Internal.StateVMCirculatingSupplyInternal(ctx, tsk) +} + func (c *FullNodeStruct) StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (stnetwork.Version, error) { return c.Internal.StateNetworkVersion(ctx, tsk) } @@ -1171,32 +1257,76 @@ func (c *StorageMinerStruct) WorkerConnect(ctx context.Context, url string) erro return c.Internal.WorkerConnect(ctx, url) } -func (c *StorageMinerStruct) WorkerStats(ctx context.Context) (map[uint64]storiface.WorkerStats, error) { +func (c *StorageMinerStruct) WorkerStats(ctx context.Context) (map[uuid.UUID]storiface.WorkerStats, error) { return c.Internal.WorkerStats(ctx) } -func (c *StorageMinerStruct) WorkerJobs(ctx context.Context) (map[uint64][]storiface.WorkerJob, error) { +func (c *StorageMinerStruct) WorkerJobs(ctx context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) { return c.Internal.WorkerJobs(ctx) } -func (c *StorageMinerStruct) SealingSchedDiag(ctx context.Context) (interface{}, error) { - return c.Internal.SealingSchedDiag(ctx) +func (c *StorageMinerStruct) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err string) error { + return c.Internal.ReturnAddPiece(ctx, callID, pi, err) +} + +func (c *StorageMinerStruct) ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err string) error { + return c.Internal.ReturnSealPreCommit1(ctx, callID, p1o, err) +} + +func (c *StorageMinerStruct) ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err string) error { + return c.Internal.ReturnSealPreCommit2(ctx, callID, sealed, err) +} + +func (c *StorageMinerStruct) ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err string) error { + return c.Internal.ReturnSealCommit1(ctx, callID, out, err) +} + +func (c *StorageMinerStruct) ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err string) error { + return c.Internal.ReturnSealCommit2(ctx, callID, proof, err) +} + +func (c *StorageMinerStruct) ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err string) error { + return c.Internal.ReturnFinalizeSector(ctx, callID, err) +} + +func (c *StorageMinerStruct) ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err string) error { + return c.Internal.ReturnReleaseUnsealed(ctx, callID, err) +} + +func (c *StorageMinerStruct) ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err string) error { + return c.Internal.ReturnMoveStorage(ctx, callID, err) +} + +func (c *StorageMinerStruct) ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err string) error { + return c.Internal.ReturnUnsealPiece(ctx, callID, err) +} + +func (c *StorageMinerStruct) ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err string) error { + return c.Internal.ReturnReadPiece(ctx, callID, ok, err) +} + +func (c *StorageMinerStruct) ReturnFetch(ctx context.Context, callID storiface.CallID, err string) error { + return c.Internal.ReturnFetch(ctx, callID, err) +} + +func (c *StorageMinerStruct) SealingSchedDiag(ctx context.Context, doSched bool) (interface{}, error) { + return c.Internal.SealingSchedDiag(ctx, doSched) } func (c *StorageMinerStruct) StorageAttach(ctx context.Context, si stores.StorageInfo, st fsutil.FsStat) error { return c.Internal.StorageAttach(ctx, si, st) } -func (c *StorageMinerStruct) StorageDeclareSector(ctx context.Context, storageId stores.ID, s abi.SectorID, ft stores.SectorFileType, primary bool) error { +func (c *StorageMinerStruct) StorageDeclareSector(ctx context.Context, storageId stores.ID, s abi.SectorID, ft storiface.SectorFileType, primary bool) error { return c.Internal.StorageDeclareSector(ctx, storageId, s, ft, primary) } -func (c *StorageMinerStruct) StorageDropSector(ctx context.Context, storageId stores.ID, s abi.SectorID, ft stores.SectorFileType) error { +func (c *StorageMinerStruct) StorageDropSector(ctx context.Context, storageId stores.ID, s abi.SectorID, ft storiface.SectorFileType) error { return c.Internal.StorageDropSector(ctx, storageId, s, ft) } -func (c *StorageMinerStruct) StorageFindSector(ctx context.Context, si abi.SectorID, types stores.SectorFileType, spt abi.RegisteredSealProof, allowFetch bool) ([]stores.SectorStorageInfo, error) { - return c.Internal.StorageFindSector(ctx, si, types, spt, allowFetch) +func (c *StorageMinerStruct) StorageFindSector(ctx context.Context, si abi.SectorID, types storiface.SectorFileType, ssize abi.SectorSize, allowFetch bool) ([]stores.SectorStorageInfo, error) { + return c.Internal.StorageFindSector(ctx, si, types, ssize, allowFetch) } func (c *StorageMinerStruct) StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) { @@ -1215,19 +1345,19 @@ func (c *StorageMinerStruct) StorageInfo(ctx context.Context, id stores.ID) (sto return c.Internal.StorageInfo(ctx, id) } -func (c *StorageMinerStruct) StorageBestAlloc(ctx context.Context, allocate stores.SectorFileType, spt abi.RegisteredSealProof, pt stores.PathType) ([]stores.StorageInfo, error) { - return c.Internal.StorageBestAlloc(ctx, allocate, spt, pt) +func (c *StorageMinerStruct) StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pt storiface.PathType) ([]stores.StorageInfo, error) { + return c.Internal.StorageBestAlloc(ctx, allocate, ssize, pt) } func (c *StorageMinerStruct) StorageReportHealth(ctx context.Context, id stores.ID, report stores.HealthReport) error { return c.Internal.StorageReportHealth(ctx, id, report) } -func (c *StorageMinerStruct) StorageLock(ctx context.Context, sector abi.SectorID, read stores.SectorFileType, write stores.SectorFileType) error { +func (c *StorageMinerStruct) StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error { return c.Internal.StorageLock(ctx, sector, read, write) } -func (c *StorageMinerStruct) StorageTryLock(ctx context.Context, sector abi.SectorID, read stores.SectorFileType, write stores.SectorFileType) (bool, error) { +func (c *StorageMinerStruct) StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) { return c.Internal.StorageTryLock(ctx, sector, read, write) } @@ -1275,6 +1405,14 @@ func (c *StorageMinerStruct) MarketDataTransferUpdates(ctx context.Context) (<-c return c.Internal.MarketDataTransferUpdates(ctx) } +func (c *StorageMinerStruct) MarketRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error { + return c.Internal.MarketRestartDataTransfer(ctx, transferID, otherPeer, isInitiator) +} + +func (c *StorageMinerStruct) MarketCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error { + return c.Internal.MarketCancelDataTransfer(ctx, transferID, otherPeer, isInitiator) +} + func (c *StorageMinerStruct) DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error { return c.Internal.DealsImportData(ctx, dealPropCid, file) } @@ -1365,64 +1503,84 @@ func (w *WorkerStruct) Info(ctx context.Context) (storiface.WorkerInfo, error) { return w.Internal.Info(ctx) } -func (w *WorkerStruct) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) { +func (w *WorkerStruct) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) { return w.Internal.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData) } -func (w *WorkerStruct) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) { +func (w *WorkerStruct) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) { return w.Internal.SealPreCommit1(ctx, sector, ticket, pieces) } -func (w *WorkerStruct) SealPreCommit2(ctx context.Context, sector abi.SectorID, p1o storage.PreCommit1Out) (storage.SectorCids, error) { - return w.Internal.SealPreCommit2(ctx, sector, p1o) +func (w *WorkerStruct) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storiface.CallID, error) { + return w.Internal.SealPreCommit2(ctx, sector, pc1o) } -func (w *WorkerStruct) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) { +func (w *WorkerStruct) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) { return w.Internal.SealCommit1(ctx, sector, ticket, seed, pieces, cids) } -func (w *WorkerStruct) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storage.Proof, error) { +func (w *WorkerStruct) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storiface.CallID, error) { return w.Internal.SealCommit2(ctx, sector, c1o) } -func (w *WorkerStruct) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error { +func (w *WorkerStruct) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (storiface.CallID, error) { return w.Internal.FinalizeSector(ctx, sector, keepUnsealed) } -func (w *WorkerStruct) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error { +func (w *WorkerStruct) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) (storiface.CallID, error) { return w.Internal.ReleaseUnsealed(ctx, sector, safeToFree) } +func (w *WorkerStruct) MoveStorage(ctx context.Context, sector abi.SectorID, types storiface.SectorFileType) (storiface.CallID, error) { + return w.Internal.MoveStorage(ctx, sector, types) +} + +func (w *WorkerStruct) UnsealPiece(ctx context.Context, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, c cid.Cid) (storiface.CallID, error) { + return w.Internal.UnsealPiece(ctx, sector, offset, size, ticket, c) +} + +func (w *WorkerStruct) ReadPiece(ctx context.Context, sink io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) { + return w.Internal.ReadPiece(ctx, sink, sector, offset, size) +} + +func (w *WorkerStruct) Fetch(ctx context.Context, id abi.SectorID, fileType storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) { + return w.Internal.Fetch(ctx, id, fileType, ptype, am) +} + func (w *WorkerStruct) Remove(ctx context.Context, sector abi.SectorID) error { return w.Internal.Remove(ctx, sector) } -func (w *WorkerStruct) MoveStorage(ctx context.Context, sector abi.SectorID, types stores.SectorFileType) error { - return w.Internal.MoveStorage(ctx, sector, types) -} - func (w *WorkerStruct) StorageAddLocal(ctx context.Context, path string) error { return w.Internal.StorageAddLocal(ctx, path) } -func (w *WorkerStruct) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, c cid.Cid) error { - return w.Internal.UnsealPiece(ctx, id, index, size, randomness, c) +func (w *WorkerStruct) SetEnabled(ctx context.Context, enabled bool) error { + return w.Internal.SetEnabled(ctx, enabled) } -func (w *WorkerStruct) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { - return w.Internal.ReadPiece(ctx, writer, id, index, size) +func (w *WorkerStruct) Enabled(ctx context.Context) (bool, error) { + return w.Internal.Enabled(ctx) } -func (w *WorkerStruct) Fetch(ctx context.Context, id abi.SectorID, fileType stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) error { - return w.Internal.Fetch(ctx, id, fileType, ptype, am) +func (w *WorkerStruct) WaitQuiet(ctx context.Context) error { + return w.Internal.WaitQuiet(ctx) } -func (w *WorkerStruct) Closing(ctx context.Context) (<-chan struct{}, error) { - return w.Internal.Closing(ctx) +func (w *WorkerStruct) ProcessSession(ctx context.Context) (uuid.UUID, error) { + return w.Internal.ProcessSession(ctx) } -func (g GatewayStruct) ChainHead(ctx context.Context) (*types.TipSet, error) { - return g.Internal.ChainHead(ctx) +func (w *WorkerStruct) Session(ctx context.Context) (uuid.UUID, error) { + return w.Internal.Session(ctx) +} + +func (g GatewayStruct) ChainGetBlockMessages(ctx context.Context, c cid.Cid) (*api.BlockMessages, error) { + return g.Internal.ChainGetBlockMessages(ctx, c) +} + +func (g GatewayStruct) ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) { + return g.Internal.ChainGetMessage(ctx, mc) } func (g GatewayStruct) ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) { @@ -1433,6 +1591,22 @@ func (g GatewayStruct) ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEp return g.Internal.ChainGetTipSetByHeight(ctx, h, tsk) } +func (g GatewayStruct) ChainHasObj(ctx context.Context, c cid.Cid) (bool, error) { + return g.Internal.ChainHasObj(ctx, c) +} + +func (g GatewayStruct) ChainHead(ctx context.Context) (*types.TipSet, error) { + return g.Internal.ChainHead(ctx) +} + +func (g GatewayStruct) ChainNotify(ctx context.Context) (<-chan []*api.HeadChange, error) { + return g.Internal.ChainNotify(ctx) +} + +func (g GatewayStruct) ChainReadObj(ctx context.Context, c cid.Cid) ([]byte, error) { + return g.Internal.ChainReadObj(ctx, c) +} + func (g GatewayStruct) GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) { return g.Internal.GasEstimateMessageGas(ctx, msg, spec, tsk) } @@ -1453,19 +1627,59 @@ func (g GatewayStruct) StateAccountKey(ctx context.Context, addr address.Address return g.Internal.StateAccountKey(ctx, addr, tsk) } +func (g GatewayStruct) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) { + return g.Internal.StateDealProviderCollateralBounds(ctx, size, verified, tsk) +} + func (g GatewayStruct) StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error) { return g.Internal.StateGetActor(ctx, actor, ts) } +func (g GatewayStruct) StateGetReceipt(ctx context.Context, c cid.Cid, tsk types.TipSetKey) (*types.MessageReceipt, error) { + return g.Internal.StateGetReceipt(ctx, c, tsk) +} + func (g GatewayStruct) StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) { return g.Internal.StateLookupID(ctx, addr, tsk) } +func (g GatewayStruct) StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) { + return g.Internal.StateListMiners(ctx, tsk) +} + +func (g GatewayStruct) StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error) { + return g.Internal.StateMarketBalance(ctx, addr, tsk) +} + +func (g GatewayStruct) StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error) { + return g.Internal.StateMarketStorageDeal(ctx, dealId, tsk) +} + +func (g GatewayStruct) StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error) { + return g.Internal.StateMinerInfo(ctx, actor, tsk) +} + +func (g GatewayStruct) StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error) { + return g.Internal.StateMinerProvingDeadline(ctx, addr, tsk) +} + +func (g GatewayStruct) StateMinerPower(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*api.MinerPower, error) { + return g.Internal.StateMinerPower(ctx, addr, tsk) +} + +func (g GatewayStruct) StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (stnetwork.Version, error) { + return g.Internal.StateNetworkVersion(ctx, tsk) +} + +func (g GatewayStruct) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) { + return g.Internal.StateVerifiedClientStatus(ctx, addr, tsk) +} + func (g GatewayStruct) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error) { return g.Internal.StateWaitMsg(ctx, msg, confidence) } -func (c *WalletStruct) WalletNew(ctx context.Context, typ crypto.SigType) (address.Address, error) { +func (c *WalletStruct) WalletNew(ctx context.Context, typ types.KeyType) (address.Address, error) { return c.Internal.WalletNew(ctx, typ) } diff --git a/api/docgen/docgen.go b/api/docgen/docgen.go index ced536cc3..dc6004121 100644 --- a/api/docgen/docgen.go +++ b/api/docgen/docgen.go @@ -87,6 +87,7 @@ func init() { addExample(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1) addExample(abi.ChainEpoch(10101)) addExample(crypto.SigTypeBLS) + addExample(types.KTBLS) addExample(int64(9)) addExample(12.3) addExample(123) diff --git a/api/test/ccupgrade.go b/api/test/ccupgrade.go index 4a860c661..75f72d861 100644 --- a/api/test/ccupgrade.go +++ b/api/test/ccupgrade.go @@ -3,7 +3,6 @@ package test import ( "context" "fmt" - "os" "sync/atomic" "testing" "time" @@ -17,8 +16,6 @@ import ( ) func TestCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) { - _ = os.Setenv("BELLMAN_NO_GPU", "1") - for _, height := range []abi.ChainEpoch{ 1, // before 162, // while sealing @@ -92,7 +89,7 @@ func testCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration, upgradeH t.Fatal(err) } - makeDeal(t, ctx, 6, client, miner, false, false) + MakeDeal(t, ctx, 6, client, miner, false, false) // Validate upgrade diff --git a/api/test/deals.go b/api/test/deals.go index 8b4a7fe8b..b81099d90 100644 --- a/api/test/deals.go +++ b/api/test/deals.go @@ -16,15 +16,12 @@ import ( "github.com/ipfs/go-cid" files "github.com/ipfs/go-ipfs-files" - logging "github.com/ipfs/go-log/v2" "github.com/ipld/go-car" "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" sealing "github.com/filecoin-project/lotus/extern/storage-sealing" - "github.com/filecoin-project/lotus/miner" dag "github.com/ipfs/go-merkledag" dstest "github.com/ipfs/go-merkledag/test" unixfile "github.com/ipfs/go-unixfs/file" @@ -34,18 +31,7 @@ import ( ipld "github.com/ipfs/go-ipld-format" ) -var MineNext = miner.MineReq{ - InjectNulls: 0, - Done: func(bool, abi.ChainEpoch, error) {}, -} - -func init() { - logging.SetAllLoggers(logging.LevelInfo) - build.InsecurePoStValidation = true -} - func TestDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, carExport, fastRet bool) { - _ = os.Setenv("BELLMAN_NO_GPU", "1") ctx := context.Background() n, sn := b(t, OneFull, OneMiner) @@ -74,7 +60,7 @@ func TestDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, carExport } }() - makeDeal(t, ctx, 6, client, miner, carExport, fastRet) + MakeDeal(t, ctx, 6, client, miner, carExport, fastRet) atomic.AddInt64(&mine, -1) fmt.Println("shutting down mining") @@ -82,7 +68,6 @@ func TestDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, carExport } func TestDoubleDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration) { - _ = os.Setenv("BELLMAN_NO_GPU", "1") ctx := context.Background() n, sn := b(t, OneFull, OneMiner) @@ -112,24 +97,21 @@ func TestDoubleDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration) { } }() - makeDeal(t, ctx, 6, client, miner, false, false) - makeDeal(t, ctx, 7, client, miner, false, false) + MakeDeal(t, ctx, 6, client, miner, false, false) + MakeDeal(t, ctx, 7, client, miner, false, false) atomic.AddInt64(&mine, -1) fmt.Println("shutting down mining") <-done } -func makeDeal(t *testing.T, ctx context.Context, rseed int, client *impl.FullNodeAPI, miner TestStorageNode, carExport, fastRet bool) { - data := make([]byte, 1600) - rand.New(rand.NewSource(int64(rseed))).Read(data) - - r := bytes.NewReader(data) - fcid, err := client.ClientImportLocal(ctx, r) +func MakeDeal(t *testing.T, ctx context.Context, rseed int, client api.FullNode, miner TestStorageNode, carExport, fastRet bool) { + res, data, err := CreateClientFile(ctx, client, rseed) if err != nil { t.Fatal(err) } + fcid := res.Root fmt.Println("FILE CID: ", fcid) deal := startDeal(t, ctx, miner, client, fcid, fastRet) @@ -145,8 +127,29 @@ func makeDeal(t *testing.T, ctx context.Context, rseed int, client *impl.FullNod testRetrieval(t, ctx, client, fcid, &info.PieceCID, carExport, data) } +func CreateClientFile(ctx context.Context, client api.FullNode, rseed int) (*api.ImportRes, []byte, error) { + data := make([]byte, 1600) + rand.New(rand.NewSource(int64(rseed))).Read(data) + + dir, err := ioutil.TempDir(os.TempDir(), "test-make-deal-") + if err != nil { + return nil, nil, err + } + + path := filepath.Join(dir, "sourcefile.dat") + err = ioutil.WriteFile(path, data, 0644) + if err != nil { + return nil, nil, err + } + + res, err := client.ClientImport(ctx, api.FileRef{Path: path}) + if err != nil { + return nil, nil, err + } + return res, data, nil +} + func TestFastRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration) { - _ = os.Setenv("BELLMAN_NO_GPU", "1") ctx := context.Background() n, sn := b(t, OneFull, OneMiner) @@ -201,7 +204,6 @@ func TestFastRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Durati } func TestSenondDealRetrieval(t *testing.T, b APIBuilder, blocktime time.Duration) { - _ = os.Setenv("BELLMAN_NO_GPU", "1") ctx := context.Background() n, sn := b(t, OneFull, OneMiner) @@ -276,7 +278,7 @@ func TestSenondDealRetrieval(t *testing.T, b APIBuilder, blocktime time.Duration <-done } -func startDeal(t *testing.T, ctx context.Context, miner TestStorageNode, client *impl.FullNodeAPI, fcid cid.Cid, fastRet bool) *cid.Cid { +func startDeal(t *testing.T, ctx context.Context, miner TestStorageNode, client api.FullNode, fcid cid.Cid, fastRet bool) *cid.Cid { maddr, err := miner.ActorAddress(ctx) if err != nil { t.Fatal(err) @@ -303,7 +305,7 @@ func startDeal(t *testing.T, ctx context.Context, miner TestStorageNode, client return deal } -func waitDealSealed(t *testing.T, ctx context.Context, miner TestStorageNode, client *impl.FullNodeAPI, deal *cid.Cid, noseal bool) { +func waitDealSealed(t *testing.T, ctx context.Context, miner TestStorageNode, client api.FullNode, deal *cid.Cid, noseal bool) { loop: for { di, err := client.ClientGetDealInfo(ctx, *deal) @@ -376,7 +378,7 @@ func startSealingWaiting(t *testing.T, ctx context.Context, miner TestStorageNod } } -func testRetrieval(t *testing.T, ctx context.Context, client *impl.FullNodeAPI, fcid cid.Cid, piece *cid.Cid, carExport bool, data []byte) { +func testRetrieval(t *testing.T, ctx context.Context, client api.FullNode, fcid cid.Cid, piece *cid.Cid, carExport bool, data []byte) { offers, err := client.ClientFindData(ctx, fcid, piece) if err != nil { t.Fatal(err) diff --git a/api/test/mining.go b/api/test/mining.go index 8147c224b..11953b95d 100644 --- a/api/test/mining.go +++ b/api/test/mining.go @@ -5,7 +5,6 @@ import ( "context" "fmt" "math/rand" - "os" "sync/atomic" "testing" "time" @@ -59,14 +58,11 @@ func (ts *testSuite) testMiningReal(t *testing.T) { newHeads, err := api.ChainNotify(ctx) require.NoError(t, err) - initHead := (<-newHeads)[0] - if initHead.Val.Height() != 2 { - <-newHeads - } + at := (<-newHeads)[0].Val.Height() h1, err := api.ChainHead(ctx) require.NoError(t, err) - require.Equal(t, abi.ChainEpoch(2), h1.Height()) + require.Equal(t, int64(at), int64(h1.Height())) MineUntilBlock(ctx, t, apis[0], sn[0], nil) require.NoError(t, err) @@ -75,21 +71,19 @@ func (ts *testSuite) testMiningReal(t *testing.T) { h2, err := api.ChainHead(ctx) require.NoError(t, err) - require.Equal(t, abi.ChainEpoch(3), h2.Height()) + require.Greater(t, int64(h2.Height()), int64(h1.Height())) MineUntilBlock(ctx, t, apis[0], sn[0], nil) require.NoError(t, err) <-newHeads - h2, err = api.ChainHead(ctx) + h3, err := api.ChainHead(ctx) require.NoError(t, err) - require.Equal(t, abi.ChainEpoch(4), h2.Height()) + require.Greater(t, int64(h3.Height()), int64(h2.Height())) } func TestDealMining(t *testing.T, b APIBuilder, blocktime time.Duration, carExport bool) { - _ = os.Setenv("BELLMAN_NO_GPU", "1") - // test making a deal with a fresh miner, and see if it starts to mine ctx := context.Background() diff --git a/api/test/paych.go b/api/test/paych.go index a8ccebdde..2bcea4369 100644 --- a/api/test/paych.go +++ b/api/test/paych.go @@ -3,14 +3,10 @@ package test import ( "context" "fmt" - "os" "sync/atomic" "testing" "time" - builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" - paych0 "github.com/filecoin-project/specs-actors/actors/builtin/paych" - "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/ipfs/go-cid" @@ -22,16 +18,15 @@ import ( "github.com/filecoin-project/lotus/api/apibstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin/paych" + "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/events" "github.com/filecoin-project/lotus/chain/events/state" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/wallet" ) func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) { - _ = os.Setenv("BELLMAN_NO_GPU", "1") - ctx := context.Background() n, sn := b(t, TwoFull, OneMiner) @@ -58,7 +53,7 @@ func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) { bm.MineBlocks() // send some funds to register the receiver - receiverAddr, err := paymentReceiver.WalletNew(ctx, wallet.ActSigType("secp256k1")) + receiverAddr, err := paymentReceiver.WalletNew(ctx, types.KTSecp256k1) if err != nil { t.Fatal(err) } @@ -228,7 +223,7 @@ func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) { } // wait for the settlement period to pass before collecting - waitForBlocks(ctx, t, bm, paymentReceiver, receiverAddr, paych0.SettleDelay) + waitForBlocks(ctx, t, bm, paymentReceiver, receiverAddr, policy.PaychSettleDelay) creatorPreCollectBalance, err := paymentCreator.WalletBalance(ctx, createrAddr) if err != nil { @@ -284,7 +279,7 @@ func waitForBlocks(ctx context.Context, t *testing.T, bm *BlockMiner, paymentRec // Add a real block m, err := paymentReceiver.MpoolPushMessage(ctx, &types.Message{ - To: builtin0.BurntFundsActorAddr, + To: builtin.BurntFundsActorAddr, From: receiverAddr, Value: types.NewInt(0), }, nil) diff --git a/api/test/tape.go b/api/test/tape.go new file mode 100644 index 000000000..466bdd829 --- /dev/null +++ b/api/test/tape.go @@ -0,0 +1,114 @@ +package test + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/stmgr" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/filecoin-project/lotus/node" + "github.com/filecoin-project/lotus/node/impl" + "github.com/stretchr/testify/require" +) + +func TestTapeFix(t *testing.T, b APIBuilder, blocktime time.Duration) { + t.Run("before", func(t *testing.T) { testTapeFix(t, b, blocktime, false) }) + t.Run("after", func(t *testing.T) { testTapeFix(t, b, blocktime, true) }) +} +func testTapeFix(t *testing.T, b APIBuilder, blocktime time.Duration, after bool) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + upgradeSchedule := stmgr.UpgradeSchedule{{ + Network: build.ActorUpgradeNetworkVersion, + Height: 1, + Migration: stmgr.UpgradeActorsV2, + }} + if after { + upgradeSchedule = append(upgradeSchedule, stmgr.Upgrade{ + Network: network.Version5, + Height: 2, + }) + } + + n, sn := b(t, []FullNodeOpts{{Opts: func(_ []TestNode) node.Option { + return node.Override(new(stmgr.UpgradeSchedule), upgradeSchedule) + }}}, OneMiner) + + client := n[0].FullNode.(*impl.FullNodeAPI) + miner := sn[0] + + addrinfo, err := client.NetAddrsListen(ctx) + if err != nil { + t.Fatal(err) + } + + if err := miner.NetConnect(ctx, addrinfo); err != nil { + t.Fatal(err) + } + build.Clock.Sleep(time.Second) + + done := make(chan struct{}) + go func() { + defer close(done) + for ctx.Err() == nil { + build.Clock.Sleep(blocktime) + if err := sn[0].MineOne(ctx, MineNext); err != nil { + if ctx.Err() != nil { + // context was canceled, ignore the error. + return + } + t.Error(err) + } + } + }() + defer func() { + cancel() + <-done + }() + + err = miner.PledgeSector(ctx) + require.NoError(t, err) + + // Wait till done. + var sectorNo abi.SectorNumber + for { + s, err := miner.SectorsList(ctx) // Note - the test builder doesn't import genesis sectors into FSM + require.NoError(t, err) + fmt.Printf("Sectors: %d\n", len(s)) + if len(s) == 1 { + sectorNo = s[0] + break + } + + build.Clock.Sleep(100 * time.Millisecond) + } + + fmt.Printf("All sectors is fsm\n") + + // If before, we expect the precommit to fail + successState := api.SectorState(sealing.CommitFailed) + failureState := api.SectorState(sealing.Proving) + if after { + // otherwise, it should succeed. + successState, failureState = failureState, successState + } + + for { + st, err := miner.SectorsStatus(ctx, sectorNo, false) + require.NoError(t, err) + if st.State == successState { + break + } + require.NotEqual(t, failureState, st.State) + build.Clock.Sleep(100 * time.Millisecond) + fmt.Println("WaitSeal") + } + +} diff --git a/api/test/test.go b/api/test/test.go index 35b397740..bae3d520e 100644 --- a/api/test/test.go +++ b/api/test/test.go @@ -2,22 +2,38 @@ package test import ( "context" + "fmt" + "os" "testing" + "time" "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/types" + logging "github.com/ipfs/go-log/v2" "github.com/multiformats/go-multiaddr" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/miner" "github.com/filecoin-project/lotus/node" ) +func init() { + logging.SetAllLoggers(logging.LevelInfo) + err := os.Setenv("BELLMAN_NO_GPU", "1") + if err != nil { + panic(fmt.Sprintf("failed to set BELLMAN_NO_GPU env variable: %s", err)) + } + build.InsecurePoStValidation = true +} + type TestNode struct { api.FullNode // ListenAddr is the address on which an API server is listening, if an @@ -74,6 +90,7 @@ func TestApis(t *testing.T, b APIBuilder) { t.Run("testConnectTwo", ts.testConnectTwo) t.Run("testMining", ts.testMining) t.Run("testMiningReal", ts.testMiningReal) + t.Run("testSearchMsg", ts.testSearchMsg) } func DefaultFullOpts(nFull int) []FullNodeOpts { @@ -96,7 +113,8 @@ var FullNodeWithUpgradeAt = func(upgradeHeight abi.ChainEpoch) FullNodeOpts { return FullNodeOpts{ Opts: func(nodes []TestNode) node.Option { return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{ - Network: build.ActorUpgradeNetworkVersion, + // Skip directly to tape height so precommits work. + Network: network.Version5, Height: upgradeHeight, Migration: stmgr.UpgradeActorsV2, }}) @@ -104,6 +122,11 @@ var FullNodeWithUpgradeAt = func(upgradeHeight abi.ChainEpoch) FullNodeOpts { } } +var MineNext = miner.MineReq{ + InjectNulls: 0, + Done: func(bool, abi.ChainEpoch, error) {}, +} + func (ts *testSuite) testVersion(t *testing.T) { build.RunningNodeType = build.NodeFull @@ -118,6 +141,49 @@ func (ts *testSuite) testVersion(t *testing.T) { require.Equal(t, v.Version, build.BuildVersion) } +func (ts *testSuite) testSearchMsg(t *testing.T) { + apis, miners := ts.makeNodes(t, OneFull, OneMiner) + + api := apis[0] + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + senderAddr, err := api.WalletDefaultAddress(ctx) + if err != nil { + t.Fatal(err) + } + + msg := &types.Message{ + From: senderAddr, + To: senderAddr, + Value: big.Zero(), + } + bm := NewBlockMiner(ctx, t, miners[0], 100*time.Millisecond) + bm.MineBlocks() + defer bm.Stop() + + sm, err := api.MpoolPushMessage(ctx, msg, nil) + if err != nil { + t.Fatal(err) + } + res, err := api.StateWaitMsg(ctx, sm.Cid(), 1) + if err != nil { + t.Fatal(err) + } + if res.Receipt.ExitCode != 0 { + t.Fatal("did not successfully send message") + } + + searchRes, err := api.StateSearchMsg(ctx, sm.Cid()) + if err != nil { + t.Fatal(err) + } + + if searchRes.TipSet != res.TipSet { + t.Fatalf("search ts: %s, different from wait ts: %s", searchRes.TipSet, res.TipSet) + } + +} + func (ts *testSuite) testID(t *testing.T) { ctx := context.Background() apis, _ := ts.makeNodes(t, OneFull, OneMiner) diff --git a/api/test/window_post.go b/api/test/window_post.go index 28639cda8..55fc4ad70 100644 --- a/api/test/window_post.go +++ b/api/test/window_post.go @@ -5,7 +5,6 @@ import ( "fmt" "sync/atomic" - "os" "strings" "testing" "time" @@ -24,13 +23,6 @@ import ( "github.com/filecoin-project/lotus/node/impl" ) -func init() { - err := os.Setenv("BELLMAN_NO_GPU", "1") - if err != nil { - panic(fmt.Sprintf("failed to set BELLMAN_NO_GPU env variable: %s", err)) - } -} - func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/build/bootstrap/bootstrappers.pi b/build/bootstrap/bootstrappers.pi index f3f8b8a55..886ac8e99 100644 --- a/build/bootstrap/bootstrappers.pi +++ b/build/bootstrap/bootstrappers.pi @@ -1,9 +1,12 @@ -/dns4/bootstrap-0.testnet.fildev.network/tcp/1347/p2p/12D3KooWJTUBUjtzWJGWU1XSiY21CwmHaCNLNYn2E7jqHEHyZaP7 -/dns4/bootstrap-1.testnet.fildev.network/tcp/1347/p2p/12D3KooW9yeKXha4hdrJKq74zEo99T8DhriQdWNoojWnnQbsgB3v -/dns4/bootstrap-2.testnet.fildev.network/tcp/1347/p2p/12D3KooWCrx8yVG9U9Kf7w8KLN3Edkj5ZKDhgCaeMqQbcQUoB6CT -/dns4/bootstrap-4.testnet.fildev.network/tcp/1347/p2p/12D3KooWPkL9LrKRQgHtq7kn9ecNhGU9QaziG8R5tX8v9v7t3h34 -/dns4/bootstrap-3.testnet.fildev.network/tcp/1347/p2p/12D3KooWKYSsbpgZ3HAjax5M1BXCwXLa6gVkUARciz7uN3FNtr7T -/dns4/bootstrap-5.testnet.fildev.network/tcp/1347/p2p/12D3KooWQYzqnLASJAabyMpPb1GcWZvNSe7JDcRuhdRqonFoiK9W +/dns4/bootstrap-0.mainnet.filops.net/tcp/1347/p2p/12D3KooWCVe8MmsEMes2FzgTpt9fXtmCY7wrq91GRiaC8PHSCCBj +/dns4/bootstrap-1.mainnet.filops.net/tcp/1347/p2p/12D3KooWCwevHg1yLCvktf2nvLu7L9894mcrJR4MsBCcm4syShVc +/dns4/bootstrap-2.mainnet.filops.net/tcp/1347/p2p/12D3KooWEWVwHGn2yR36gKLozmb4YjDJGerotAPGxmdWZx2nxMC4 +/dns4/bootstrap-3.mainnet.filops.net/tcp/1347/p2p/12D3KooWKhgq8c7NQ9iGjbyK7v7phXvG6492HQfiDaGHLHLQjk7R +/dns4/bootstrap-4.mainnet.filops.net/tcp/1347/p2p/12D3KooWL6PsFNPhYftrJzGgF5U18hFoaVhfGk7xwzD8yVrHJ3Uc +/dns4/bootstrap-5.mainnet.filops.net/tcp/1347/p2p/12D3KooWLFynvDQiUpXoHroV1YxKHhPJgysQGH2k3ZGwtWzR4dFH +/dns4/bootstrap-6.mainnet.filops.net/tcp/1347/p2p/12D3KooWP5MwCiqdMETF9ub1P3MbCvQCcfconnYHbWg6sUJcDRQQ +/dns4/bootstrap-7.mainnet.filops.net/tcp/1347/p2p/12D3KooWRs3aY1p3juFjPy8gPN95PEQChm2QKGUCAdcDCC4EBMKf +/dns4/bootstrap-8.mainnet.filops.net/tcp/1347/p2p/12D3KooWScFR7385LTyR4zU1bYdzSiiAb5rnNABfVahPvVSzyTkR /dns4/lotus-bootstrap.forceup.cn/tcp/41778/p2p/12D3KooWFQsv3nRMUevZNWWsY1Wu6NUzUbawnWU5NcRhgKuJA37C /dns4/bootstrap-0.starpool.in/tcp/12757/p2p/12D3KooWGHpBMeZbestVEWkfdnC9u7p6uFHXL1n7m1ZBqsEmiUzz /dns4/bootstrap-1.starpool.in/tcp/12757/p2p/12D3KooWQZrGH1PxSNZPum99M1zNvjNFM33d1AAu5DcvdHptuU7u diff --git a/build/drand.go b/build/drand.go index 73299249a..3b976ac92 100644 --- a/build/drand.go +++ b/build/drand.go @@ -35,6 +35,7 @@ var DrandConfigs = map[DrandEnum]dtypes.DrandConfig{ "https://api.drand.sh", "https://api2.drand.sh", "https://api3.drand.sh", + "https://drand.cloudflare.com", }, Relays: []string{ "/dnsaddr/api.drand.sh/", @@ -68,16 +69,6 @@ var DrandConfigs = map[DrandEnum]dtypes.DrandConfig{ ChainInfoJSON: `{"public_key":"8cda589f88914aa728fd183f383980b35789ce81b274e5daee1f338b77d02566ef4d3fb0098af1f844f10f9c803c1827","period":25,"genesis_time":1595348225,"hash":"e73b7dc3c4f6a236378220c0dd6aa110eb16eed26c11259606e07ee122838d4f","groupHash":"567d4785122a5a3e75a9bc9911d7ea807dd85ff76b78dc4ff06b075712898607"}`, }, DrandIncentinet: { - Servers: []string{ - "https://pl-eu.incentinet.drand.sh", - "https://pl-us.incentinet.drand.sh", - "https://pl-sin.incentinet.drand.sh", - }, - Relays: []string{ - "/dnsaddr/pl-eu.incentinet.drand.sh/", - "/dnsaddr/pl-us.incentinet.drand.sh/", - "/dnsaddr/pl-sin.incentinet.drand.sh/", - }, ChainInfoJSON: `{"public_key":"8cad0c72c606ab27d36ee06de1d5b2db1faf92e447025ca37575ab3a8aac2eaae83192f846fc9e158bc738423753d000","period":30,"genesis_time":1595873820,"hash":"80c8b872c714f4c00fdd3daa465d5514049f457f01f85a4caf68cdcd394ba039","groupHash":"d9406aaed487f7af71851b4399448e311f2328923d454e971536c05398ce2d9b"}`, }, } diff --git a/build/params_2k.go b/build/params_2k.go index c6538dc08..5a0e8fd61 100644 --- a/build/params_2k.go +++ b/build/params_2k.go @@ -17,9 +17,12 @@ const BreezeGasTampingDuration = 0 const UpgradeSmokeHeight = -1 const UpgradeIgnitionHeight = -2 const UpgradeRefuelHeight = -3 +const UpgradeTapeHeight = -4 var UpgradeActorsV2Height = abi.ChainEpoch(10) -var UpgradeLiftoffHeight = abi.ChainEpoch(-4) +var UpgradeLiftoffHeight = abi.ChainEpoch(-5) + +const UpgradeKumquatHeight = -6 var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ 0: DrandMainnet, diff --git a/build/params_testnet.go b/build/params_mainnet.go similarity index 86% rename from build/params_testnet.go rename to build/params_mainnet.go index fe70deaef..94deedfec 100644 --- a/build/params_testnet.go +++ b/build/params_mainnet.go @@ -12,7 +12,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/chain/actors/policy" - builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" ) var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ @@ -30,11 +30,15 @@ const UpgradeRefuelHeight = 130800 var UpgradeActorsV2Height = abi.ChainEpoch(138720) +const UpgradeTapeHeight = 140760 + // This signals our tentative epoch for mainnet launch. Can make it later, but not earlier. // Miners, clients, developers, custodians all need time to prepare. // We still have upgrades and state changes to do, but can happen after signaling timing here. const UpgradeLiftoffHeight = 148888 +const UpgradeKumquatHeight = 170000 + func init() { policy.SetConsensusMinerMinPower(abi.NewStoragePower(10 << 40)) policy.SetSupportedProofTypes( @@ -53,6 +57,6 @@ func init() { Devnet = false } -const BlockDelaySecs = uint64(builtin0.EpochDurationSeconds) +const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds) const PropagationDelaySecs = uint64(6) diff --git a/build/params_shared_funcs.go b/build/params_shared_funcs.go index 40ccca50b..77fd9256d 100644 --- a/build/params_shared_funcs.go +++ b/build/params_shared_funcs.go @@ -1,36 +1,13 @@ package build import ( - "sort" - "github.com/filecoin-project/go-address" "github.com/libp2p/go-libp2p-core/protocol" - "github.com/filecoin-project/go-state-types/abi" - miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" - "github.com/filecoin-project/lotus/node/modules/dtypes" ) -func DefaultSectorSize() abi.SectorSize { - szs := make([]abi.SectorSize, 0, len(miner0.SupportedProofTypes)) - for spt := range miner0.SupportedProofTypes { - ss, err := spt.SectorSize() - if err != nil { - panic(err) - } - - szs = append(szs, ss) - } - - sort.Slice(szs, func(i, j int) bool { - return szs[i] < szs[j] - }) - - return szs[0] -} - // Core network constants func BlocksTopic(netName dtypes.NetworkName) string { return "/fil/blocks/" + string(netName) } @@ -39,14 +16,6 @@ func DhtProtocolName(netName dtypes.NetworkName) protocol.ID { return protocol.ID("/fil/kad/" + string(netName)) } -func UseNewestNetwork() bool { - // TODO: Put these in a container we can iterate over - if UpgradeBreezeHeight <= 0 && UpgradeSmokeHeight <= 0 && UpgradeActorsV2Height <= 0 { - return true - } - return false -} - func SetAddressNetwork(n address.Network) { address.CurrentNetwork = n } diff --git a/build/params_shared_vals.go b/build/params_shared_vals.go index 722590575..5070777bd 100644 --- a/build/params_shared_vals.go +++ b/build/params_shared_vals.go @@ -7,12 +7,12 @@ import ( "os" "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/chain/actors/policy" - + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/network" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/specs-actors/actors/builtin" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + + "github.com/filecoin-project/lotus/chain/actors/policy" ) // ///// @@ -25,14 +25,14 @@ const UnixfsLinksPerLevel = 1024 // Consensus / Network const AllowableClockDriftSecs = uint64(1) -const NewestNetworkVersion = network.Version4 +const NewestNetworkVersion = network.Version6 const ActorUpgradeNetworkVersion = network.Version4 // Epochs const ForkLengthThreshold = Finality // Blocks (e) -var BlocksPerEpoch = uint64(builtin.ExpectedLeadersPerEpoch) +var BlocksPerEpoch = uint64(builtin2.ExpectedLeadersPerEpoch) // Epochs const Finality = policy.ChainFinality @@ -116,4 +116,4 @@ const PackingEfficiencyDenom = 5 // Actor consts // TODO: Pull from actors when its made not private -var MinDealDuration = abi.ChainEpoch(180 * builtin.EpochsInDay) +var MinDealDuration = abi.ChainEpoch(180 * builtin2.EpochsInDay) diff --git a/build/params_testground.go b/build/params_testground.go index 6109cbc04..d9893a5f5 100644 --- a/build/params_testground.go +++ b/build/params_testground.go @@ -12,7 +12,8 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/network" - "github.com/filecoin-project/specs-actors/actors/builtin" + + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/policy" ) @@ -21,14 +22,14 @@ var ( UnixfsChunkSize = uint64(1 << 20) UnixfsLinksPerLevel = 1024 - BlocksPerEpoch = uint64(builtin.ExpectedLeadersPerEpoch) + BlocksPerEpoch = uint64(builtin2.ExpectedLeadersPerEpoch) BlockMessageLimit = 512 BlockGasLimit = int64(100_000_000_000) BlockGasTarget = int64(BlockGasLimit / 2) BaseFeeMaxChangeDenom = int64(8) // 12.5% InitialBaseFee = int64(100e6) MinimumBaseFee = int64(100) - BlockDelaySecs = uint64(builtin.EpochDurationSeconds) + BlockDelaySecs = uint64(builtin2.EpochDurationSeconds) PropagationDelaySecs = uint64(6) AllowableClockDriftSecs = uint64(1) @@ -72,7 +73,7 @@ var ( // Actor consts // TODO: Pull from actors when its made not private - MinDealDuration = abi.ChainEpoch(180 * builtin.EpochsInDay) + MinDealDuration = abi.ChainEpoch(180 * builtin2.EpochsInDay) PackingEfficiencyNum int64 = 4 PackingEfficiencyDenom int64 = 5 @@ -83,14 +84,16 @@ var ( UpgradeSmokeHeight abi.ChainEpoch = -1 UpgradeIgnitionHeight abi.ChainEpoch = -2 UpgradeRefuelHeight abi.ChainEpoch = -3 + UpgradeTapeHeight abi.ChainEpoch = -4 UpgradeActorsV2Height abi.ChainEpoch = 10 - UpgradeLiftoffHeight abi.ChainEpoch = -4 + UpgradeLiftoffHeight abi.ChainEpoch = -5 + UpgradeKumquatHeight abi.ChainEpoch = -6 DrandSchedule = map[abi.ChainEpoch]DrandEnum{ 0: DrandMainnet, } - NewestNetworkVersion = network.Version4 + NewestNetworkVersion = network.Version5 ActorUpgradeNetworkVersion = network.Version4 Devnet = true diff --git a/build/version.go b/build/version.go index 5baed1fb7..80977f2f1 100644 --- a/build/version.go +++ b/build/version.go @@ -29,7 +29,7 @@ func buildType() string { } // BuildVersion is the local build version, set by build system -const BuildVersion = "0.9.1" +const BuildVersion = "1.1.2" func UserVersion() string { return BuildVersion + buildType() + CurrentCommit @@ -83,9 +83,9 @@ func VersionForType(nodeType NodeType) (Version, error) { // semver versions of the rpc api exposed var ( - FullAPIVersion = newVer(0, 16, 0) - MinerAPIVersion = newVer(0, 15, 0) - WorkerAPIVersion = newVer(0, 15, 0) + FullAPIVersion = newVer(0, 17, 0) + MinerAPIVersion = newVer(0, 17, 0) + WorkerAPIVersion = newVer(0, 16, 0) ) //nolint:varcheck,deadcode diff --git a/chain/actors/adt/diff_adt_test.go b/chain/actors/adt/diff_adt_test.go index 1c0726003..a187c9f35 100644 --- a/chain/actors/adt/diff_adt_test.go +++ b/chain/actors/adt/diff_adt_test.go @@ -12,8 +12,9 @@ import ( typegen "github.com/whyrusleeping/cbor-gen" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/specs-actors/actors/runtime" - adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" + + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt" bstore "github.com/filecoin-project/lotus/lib/blockstore" ) @@ -22,24 +23,24 @@ func TestDiffAdtArray(t *testing.T) { ctxstoreA := newContextStore() ctxstoreB := newContextStore() - arrA := adt0.MakeEmptyArray(ctxstoreA) - arrB := adt0.MakeEmptyArray(ctxstoreB) + arrA := adt2.MakeEmptyArray(ctxstoreA) + arrB := adt2.MakeEmptyArray(ctxstoreB) - require.NoError(t, arrA.Set(0, runtime.CBORBytes([]byte{0}))) // delete + require.NoError(t, arrA.Set(0, builtin2.CBORBytes([]byte{0}))) // delete - require.NoError(t, arrA.Set(1, runtime.CBORBytes([]byte{0}))) // modify - require.NoError(t, arrB.Set(1, runtime.CBORBytes([]byte{1}))) + require.NoError(t, arrA.Set(1, builtin2.CBORBytes([]byte{0}))) // modify + require.NoError(t, arrB.Set(1, builtin2.CBORBytes([]byte{1}))) - require.NoError(t, arrA.Set(2, runtime.CBORBytes([]byte{1}))) // delete + require.NoError(t, arrA.Set(2, builtin2.CBORBytes([]byte{1}))) // delete - require.NoError(t, arrA.Set(3, runtime.CBORBytes([]byte{0}))) // noop - require.NoError(t, arrB.Set(3, runtime.CBORBytes([]byte{0}))) + require.NoError(t, arrA.Set(3, builtin2.CBORBytes([]byte{0}))) // noop + require.NoError(t, arrB.Set(3, builtin2.CBORBytes([]byte{0}))) - require.NoError(t, arrA.Set(4, runtime.CBORBytes([]byte{0}))) // modify - require.NoError(t, arrB.Set(4, runtime.CBORBytes([]byte{6}))) + require.NoError(t, arrA.Set(4, builtin2.CBORBytes([]byte{0}))) // modify + require.NoError(t, arrB.Set(4, builtin2.CBORBytes([]byte{6}))) - require.NoError(t, arrB.Set(5, runtime.CBORBytes{8})) // add - require.NoError(t, arrB.Set(6, runtime.CBORBytes{9})) // add + require.NoError(t, arrB.Set(5, builtin2.CBORBytes{8})) // add + require.NoError(t, arrB.Set(6, builtin2.CBORBytes{9})) // add changes := new(TestDiffArray) @@ -76,24 +77,24 @@ func TestDiffAdtMap(t *testing.T) { ctxstoreA := newContextStore() ctxstoreB := newContextStore() - mapA := adt0.MakeEmptyMap(ctxstoreA) - mapB := adt0.MakeEmptyMap(ctxstoreB) + mapA := adt2.MakeEmptyMap(ctxstoreA) + mapB := adt2.MakeEmptyMap(ctxstoreB) - require.NoError(t, mapA.Put(abi.UIntKey(0), runtime.CBORBytes([]byte{0}))) // delete + require.NoError(t, mapA.Put(abi.UIntKey(0), builtin2.CBORBytes([]byte{0}))) // delete - require.NoError(t, mapA.Put(abi.UIntKey(1), runtime.CBORBytes([]byte{0}))) // modify - require.NoError(t, mapB.Put(abi.UIntKey(1), runtime.CBORBytes([]byte{1}))) + require.NoError(t, mapA.Put(abi.UIntKey(1), builtin2.CBORBytes([]byte{0}))) // modify + require.NoError(t, mapB.Put(abi.UIntKey(1), builtin2.CBORBytes([]byte{1}))) - require.NoError(t, mapA.Put(abi.UIntKey(2), runtime.CBORBytes([]byte{1}))) // delete + require.NoError(t, mapA.Put(abi.UIntKey(2), builtin2.CBORBytes([]byte{1}))) // delete - require.NoError(t, mapA.Put(abi.UIntKey(3), runtime.CBORBytes([]byte{0}))) // noop - require.NoError(t, mapB.Put(abi.UIntKey(3), runtime.CBORBytes([]byte{0}))) + require.NoError(t, mapA.Put(abi.UIntKey(3), builtin2.CBORBytes([]byte{0}))) // noop + require.NoError(t, mapB.Put(abi.UIntKey(3), builtin2.CBORBytes([]byte{0}))) - require.NoError(t, mapA.Put(abi.UIntKey(4), runtime.CBORBytes([]byte{0}))) // modify - require.NoError(t, mapB.Put(abi.UIntKey(4), runtime.CBORBytes([]byte{6}))) + require.NoError(t, mapA.Put(abi.UIntKey(4), builtin2.CBORBytes([]byte{0}))) // modify + require.NoError(t, mapB.Put(abi.UIntKey(4), builtin2.CBORBytes([]byte{6}))) - require.NoError(t, mapB.Put(abi.UIntKey(5), runtime.CBORBytes{8})) // add - require.NoError(t, mapB.Put(abi.UIntKey(6), runtime.CBORBytes{9})) // add + require.NoError(t, mapB.Put(abi.UIntKey(5), builtin2.CBORBytes{8})) // add + require.NoError(t, mapB.Put(abi.UIntKey(6), builtin2.CBORBytes{9})) // add changes := new(TestDiffMap) @@ -144,7 +145,7 @@ func (t *TestDiffMap) AsKey(key string) (abi.Keyer, error) { } func (t *TestDiffMap) Add(key string, val *typegen.Deferred) error { - v := new(runtime.CBORBytes) + v := new(builtin2.CBORBytes) err := v.UnmarshalCBOR(bytes.NewReader(val.Raw)) if err != nil { return err @@ -161,13 +162,13 @@ func (t *TestDiffMap) Add(key string, val *typegen.Deferred) error { } func (t *TestDiffMap) Modify(key string, from, to *typegen.Deferred) error { - vFrom := new(runtime.CBORBytes) + vFrom := new(builtin2.CBORBytes) err := vFrom.UnmarshalCBOR(bytes.NewReader(from.Raw)) if err != nil { return err } - vTo := new(runtime.CBORBytes) + vTo := new(builtin2.CBORBytes) err = vTo.UnmarshalCBOR(bytes.NewReader(to.Raw)) if err != nil { return err @@ -194,7 +195,7 @@ func (t *TestDiffMap) Modify(key string, from, to *typegen.Deferred) error { } func (t *TestDiffMap) Remove(key string, val *typegen.Deferred) error { - v := new(runtime.CBORBytes) + v := new(builtin2.CBORBytes) err := v.UnmarshalCBOR(bytes.NewReader(val.Raw)) if err != nil { return err @@ -212,7 +213,7 @@ func (t *TestDiffMap) Remove(key string, val *typegen.Deferred) error { type adtMapDiffResult struct { key uint64 - val runtime.CBORBytes + val builtin2.CBORBytes } type TestAdtMapDiffModified struct { @@ -222,7 +223,7 @@ type TestAdtMapDiffModified struct { type adtArrayDiffResult struct { key uint64 - val runtime.CBORBytes + val builtin2.CBORBytes } type TestDiffArray struct { @@ -239,7 +240,7 @@ type TestAdtArrayDiffModified struct { } func (t *TestDiffArray) Add(key uint64, val *typegen.Deferred) error { - v := new(runtime.CBORBytes) + v := new(builtin2.CBORBytes) err := v.UnmarshalCBOR(bytes.NewReader(val.Raw)) if err != nil { return err @@ -252,13 +253,13 @@ func (t *TestDiffArray) Add(key uint64, val *typegen.Deferred) error { } func (t *TestDiffArray) Modify(key uint64, from, to *typegen.Deferred) error { - vFrom := new(runtime.CBORBytes) + vFrom := new(builtin2.CBORBytes) err := vFrom.UnmarshalCBOR(bytes.NewReader(from.Raw)) if err != nil { return err } - vTo := new(runtime.CBORBytes) + vTo := new(builtin2.CBORBytes) err = vTo.UnmarshalCBOR(bytes.NewReader(to.Raw)) if err != nil { return err @@ -280,7 +281,7 @@ func (t *TestDiffArray) Modify(key uint64, from, to *typegen.Deferred) error { } func (t *TestDiffArray) Remove(key uint64, val *typegen.Deferred) error { - v := new(runtime.CBORBytes) + v := new(builtin2.CBORBytes) err := v.UnmarshalCBOR(bytes.NewReader(val.Raw)) if err != nil { return err diff --git a/chain/actors/builtin/account/account.go b/chain/actors/builtin/account/account.go index 7b1b2a792..38ed2654b 100644 --- a/chain/actors/builtin/account/account.go +++ b/chain/actors/builtin/account/account.go @@ -24,6 +24,8 @@ func init() { }) } +var Methods = builtin2.MethodsAccount + func Load(store adt.Store, act *types.Actor) (State, error) { switch act.Code { case builtin0.AccountActorCodeID: diff --git a/chain/actors/builtin/builtin.go b/chain/actors/builtin/builtin.go index cb24a2c33..afba8efe8 100644 --- a/chain/actors/builtin/builtin.go +++ b/chain/actors/builtin/builtin.go @@ -2,28 +2,46 @@ package builtin import ( "github.com/filecoin-project/go-address" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + smoothing2 "github.com/filecoin-project/specs-actors/v2/actors/util/smoothing" "github.com/ipfs/go-cid" "golang.org/x/xerrors" + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/types" - builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" proof0 "github.com/filecoin-project/specs-actors/actors/runtime/proof" smoothing0 "github.com/filecoin-project/specs-actors/actors/util/smoothing" - builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" - smoothing2 "github.com/filecoin-project/specs-actors/v2/actors/util/smoothing" ) var SystemActorAddr = builtin0.SystemActorAddr var BurntFundsActorAddr = builtin0.BurntFundsActorAddr +var CronActorAddr = builtin0.CronActorAddr +var SaftAddress = makeAddress("t0122") var ReserveAddress = makeAddress("t090") var RootVerifierAddress = makeAddress("t080") +var ( + ExpectedLeadersPerEpoch = builtin0.ExpectedLeadersPerEpoch +) + +const ( + EpochDurationSeconds = builtin0.EpochDurationSeconds + EpochsInDay = builtin0.EpochsInDay + SecondsInDay = builtin0.SecondsInDay +) + +const ( + MethodSend = builtin2.MethodSend + MethodConstructor = builtin2.MethodConstructor +) + // TODO: Why does actors have 2 different versions of this? type SectorInfo = proof0.SectorInfo type PoStProof = proof0.PoStProof diff --git a/chain/actors/builtin/cron/cron.go b/chain/actors/builtin/cron/cron.go new file mode 100644 index 000000000..65bfd992f --- /dev/null +++ b/chain/actors/builtin/cron/cron.go @@ -0,0 +1,10 @@ +package cron + +import ( + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" +) + +var ( + Address = builtin2.CronActorAddr + Methods = builtin2.MethodsCron +) diff --git a/chain/actors/builtin/init/init.go b/chain/actors/builtin/init/init.go index 5777bb890..60dbdf4fe 100644 --- a/chain/actors/builtin/init/init.go +++ b/chain/actors/builtin/init/init.go @@ -26,7 +26,10 @@ func init() { }) } -var Address = builtin0.InitActorAddr +var ( + Address = builtin2.InitActorAddr + Methods = builtin2.MethodsInit +) func Load(store adt.Store, act *types.Actor) (State, error) { switch act.Code { diff --git a/chain/actors/builtin/market/market.go b/chain/actors/builtin/market/market.go index fd08a0119..195ca40b9 100644 --- a/chain/actors/builtin/market/market.go +++ b/chain/actors/builtin/market/market.go @@ -27,7 +27,10 @@ func init() { }) } -var Address = builtin0.StorageMarketActorAddr +var ( + Address = builtin2.StorageMarketActorAddr + Methods = builtin2.MethodsMarket +) func Load(store adt.Store, act *types.Actor) (st State, err error) { switch act.Code { diff --git a/chain/actors/builtin/miner/diff_deadlines.go b/chain/actors/builtin/miner/diff_deadlines.go index e1e839960..7d686ece5 100644 --- a/chain/actors/builtin/miner/diff_deadlines.go +++ b/chain/actors/builtin/miner/diff_deadlines.go @@ -7,9 +7,9 @@ import ( "github.com/filecoin-project/go-state-types/exitcode" ) -type DeadlinesDiff map[uint64]*DeadlineDiff +type DeadlinesDiff map[uint64]DeadlineDiff -func DiffDeadlines(pre, cur State) (*DeadlinesDiff, error) { +func DiffDeadlines(pre, cur State) (DeadlinesDiff, error) { changed, err := pre.DeadlinesChanged(cur) if err != nil { return nil, err @@ -18,11 +18,7 @@ func DiffDeadlines(pre, cur State) (*DeadlinesDiff, error) { return nil, nil } - numDl, err := pre.NumDeadlines() - if err != nil { - return nil, err - } - dlDiff := make(DeadlinesDiff, numDl) + dlDiff := make(DeadlinesDiff) if err := pre.ForEachDeadline(func(idx uint64, preDl Deadline) error { curDl, err := cur.LoadDeadline(idx) if err != nil { @@ -39,12 +35,12 @@ func DiffDeadlines(pre, cur State) (*DeadlinesDiff, error) { }); err != nil { return nil, err } - return &dlDiff, nil + return dlDiff, nil } type DeadlineDiff map[uint64]*PartitionDiff -func DiffDeadline(pre, cur Deadline) (*DeadlineDiff, error) { +func DiffDeadline(pre, cur Deadline) (DeadlineDiff, error) { changed, err := pre.PartitionsChanged(cur) if err != nil { return nil, err @@ -104,7 +100,7 @@ func DiffDeadline(pre, cur Deadline) (*DeadlineDiff, error) { return nil, err } - return &partDiff, nil + return partDiff, nil } type PartitionDiff struct { diff --git a/chain/actors/builtin/miner/miner.go b/chain/actors/builtin/miner/miner.go index 8649d4351..5821d092b 100644 --- a/chain/actors/builtin/miner/miner.go +++ b/chain/actors/builtin/miner/miner.go @@ -1,6 +1,7 @@ package miner import ( + "github.com/filecoin-project/go-state-types/big" "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p-core/peer" cbg "github.com/whyrusleeping/cbor-gen" @@ -30,6 +31,8 @@ func init() { }) } +var Methods = builtin2.MethodsMiner + // Unchanged between v0 and v2 actors var WPoStProvingPeriod = miner0.WPoStProvingPeriod var WPoStPeriodDeadlines = miner0.WPoStPeriodDeadlines @@ -74,6 +77,7 @@ type State interface { DeadlinesChanged(State) (bool, error) Info() (MinerInfo, error) + MinerInfoChanged(State) (bool, error) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) @@ -148,6 +152,20 @@ type MinerInfo struct { ConsensusFaultElapsed abi.ChainEpoch } +func (mi MinerInfo) IsController(addr address.Address) bool { + if addr == mi.Owner || addr == mi.Worker { + return true + } + + for _, ca := range mi.ControlAddresses { + if addr == ca { + return true + } + } + + return false +} + type SectorExpiration struct { OnTime abi.ChainEpoch @@ -182,3 +200,7 @@ type LockedFunds struct { InitialPledgeRequirement abi.TokenAmount PreCommitDeposits abi.TokenAmount } + +func (lf LockedFunds) TotalLockedFunds() abi.TokenAmount { + return big.Add(lf.VestingFunds, big.Add(lf.InitialPledgeRequirement, lf.PreCommitDeposits)) +} diff --git a/chain/actors/builtin/miner/v0.go b/chain/actors/builtin/miner/v0.go index 7e71c7611..69160e4e0 100644 --- a/chain/actors/builtin/miner/v0.go +++ b/chain/actors/builtin/miner/v0.go @@ -47,8 +47,16 @@ type partition0 struct { store adt.Store } -func (s *state0) AvailableBalance(bal abi.TokenAmount) (abi.TokenAmount, error) { - return s.GetAvailableBalance(bal), nil +func (s *state0) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) { + defer func() { + if r := recover(); r != nil { + err = xerrors.Errorf("failed to get available balance: %w", r) + available = abi.NewTokenAmount(0) + } + }() + // this panics if the miner doesnt have enough funds to cover their locked pledge + available = s.GetAvailableBalance(bal) + return available, err } func (s *state0) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) { @@ -266,7 +274,16 @@ func (s *state0) DeadlinesChanged(other State) (bool, error) { return true, nil } - return s.State.Deadlines.Equals(other0.Deadlines), nil + return !s.State.Deadlines.Equals(other0.Deadlines), nil +} + +func (s *state0) MinerInfoChanged(other State) (bool, error) { + other0, ok := other.(*state0) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Info.Equals(other0.State.Info), nil } func (s *state0) Info() (MinerInfo, error) { @@ -362,7 +379,7 @@ func (d *deadline0) PartitionsChanged(other Deadline) (bool, error) { return true, nil } - return d.Deadline.Partitions.Equals(other0.Deadline.Partitions), nil + return !d.Deadline.Partitions.Equals(other0.Deadline.Partitions), nil } func (d *deadline0) PostSubmissions() (bitfield.BitField, error) { diff --git a/chain/actors/builtin/miner/v2.go b/chain/actors/builtin/miner/v2.go index eed82257f..2c67484ea 100644 --- a/chain/actors/builtin/miner/v2.go +++ b/chain/actors/builtin/miner/v2.go @@ -45,8 +45,16 @@ type partition2 struct { store adt.Store } -func (s *state2) AvailableBalance(bal abi.TokenAmount) (abi.TokenAmount, error) { - return s.GetAvailableBalance(bal) +func (s *state2) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) { + defer func() { + if r := recover(); r != nil { + err = xerrors.Errorf("failed to get available balance: %w", r) + available = abi.NewTokenAmount(0) + } + }() + // this panics if the miner doesnt have enough funds to cover their locked pledge + available, err = s.GetAvailableBalance(bal) + return available, err } func (s *state2) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) { @@ -265,7 +273,16 @@ func (s *state2) DeadlinesChanged(other State) (bool, error) { return true, nil } - return s.State.Deadlines.Equals(other2.Deadlines), nil + return !s.State.Deadlines.Equals(other2.Deadlines), nil +} + +func (s *state2) MinerInfoChanged(other State) (bool, error) { + other0, ok := other.(*state2) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Info.Equals(other0.State.Info), nil } func (s *state2) Info() (MinerInfo, error) { @@ -361,7 +378,7 @@ func (d *deadline2) PartitionsChanged(other Deadline) (bool, error) { return true, nil } - return d.Deadline.Partitions.Equals(other2.Deadline.Partitions), nil + return !d.Deadline.Partitions.Equals(other2.Deadline.Partitions), nil } func (d *deadline2) PostSubmissions() (bitfield.BitField, error) { diff --git a/chain/actors/builtin/multisig/message.go b/chain/actors/builtin/multisig/message.go index b19287432..3d2c66e6b 100644 --- a/chain/actors/builtin/multisig/message.go +++ b/chain/actors/builtin/multisig/message.go @@ -9,12 +9,15 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" multisig2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/types" ) +var Methods = builtin2.MethodsMultisig + func Message(version actors.Version, from address.Address) MessageBuilder { switch version { case actors.Version0: @@ -45,6 +48,7 @@ type MessageBuilder interface { // this type is the same between v0 and v2 type ProposalHashData = multisig2.ProposalHashData +type ProposeReturn = multisig2.ProposeReturn func txnParams(id uint64, data *ProposalHashData) ([]byte, error) { params := multisig2.TxnIDParams{ID: multisig2.TxnID(id)} diff --git a/chain/actors/builtin/paych/message.go b/chain/actors/builtin/paych/message.go index 23b360394..5709d4b23 100644 --- a/chain/actors/builtin/paych/message.go +++ b/chain/actors/builtin/paych/message.go @@ -7,8 +7,12 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/types" + + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" ) +var Methods = builtin2.MethodsPaych + func Message(version actors.Version, from address.Address) MessageBuilder { switch version { case actors.Version0: diff --git a/chain/actors/builtin/power/diff.go b/chain/actors/builtin/power/diff.go new file mode 100644 index 000000000..3daa70569 --- /dev/null +++ b/chain/actors/builtin/power/diff.go @@ -0,0 +1,117 @@ +package power + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +type ClaimChanges struct { + Added []ClaimInfo + Modified []ClaimModification + Removed []ClaimInfo +} + +type ClaimModification struct { + Miner address.Address + From Claim + To Claim +} + +type ClaimInfo struct { + Miner address.Address + Claim Claim +} + +func DiffClaims(pre, cur State) (*ClaimChanges, error) { + results := new(ClaimChanges) + + prec, err := pre.claims() + if err != nil { + return nil, err + } + + curc, err := cur.claims() + if err != nil { + return nil, err + } + + if err := adt.DiffAdtMap(prec, curc, &claimDiffer{results, pre, cur}); err != nil { + return nil, err + } + + return results, nil +} + +type claimDiffer struct { + Results *ClaimChanges + pre, after State +} + +func (c *claimDiffer) AsKey(key string) (abi.Keyer, error) { + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return nil, err + } + return abi.AddrKey(addr), nil +} + +func (c *claimDiffer) Add(key string, val *cbg.Deferred) error { + ci, err := c.after.decodeClaim(val) + if err != nil { + return err + } + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + c.Results.Added = append(c.Results.Added, ClaimInfo{ + Miner: addr, + Claim: ci, + }) + return nil +} + +func (c *claimDiffer) Modify(key string, from, to *cbg.Deferred) error { + ciFrom, err := c.pre.decodeClaim(from) + if err != nil { + return err + } + + ciTo, err := c.after.decodeClaim(to) + if err != nil { + return err + } + + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + + if ciFrom != ciTo { + c.Results.Modified = append(c.Results.Modified, ClaimModification{ + Miner: addr, + From: ciFrom, + To: ciTo, + }) + } + return nil +} + +func (c *claimDiffer) Remove(key string, val *cbg.Deferred) error { + ci, err := c.after.decodeClaim(val) + if err != nil { + return err + } + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + c.Results.Removed = append(c.Results.Removed, ClaimInfo{ + Miner: addr, + Claim: ci, + }) + return nil +} diff --git a/chain/actors/builtin/power/power.go b/chain/actors/builtin/power/power.go index e683cfd96..e0cf0d700 100644 --- a/chain/actors/builtin/power/power.go +++ b/chain/actors/builtin/power/power.go @@ -2,7 +2,9 @@ package power import ( "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" @@ -25,7 +27,10 @@ func init() { }) } -var Address = builtin0.StoragePowerActorAddr +var ( + Address = builtin2.StoragePowerActorAddr + Methods = builtin2.MethodsPower +) func Load(store adt.Store, act *types.Actor) (st State, err error) { switch act.Code { @@ -51,6 +56,12 @@ type State interface { MinerPower(address.Address) (Claim, bool, error) MinerNominalPowerMeetsConsensusMinimum(address.Address) (bool, error) ListAllMiners() ([]address.Address, error) + ForEachClaim(func(miner address.Address, claim Claim) error) error + ClaimsChanged(State) (bool, error) + + // Diff helpers. Used by Diff* functions internally. + claims() (adt.Map, error) + decodeClaim(*cbg.Deferred) (Claim, error) } type Claim struct { @@ -60,3 +71,10 @@ type Claim struct { // Sum of quality adjusted power for a miner's sectors. QualityAdjPower abi.StoragePower } + +func AddClaims(a Claim, b Claim) Claim { + return Claim{ + RawBytePower: big.Add(a.RawBytePower, b.RawBytePower), + QualityAdjPower: big.Add(a.QualityAdjPower, b.QualityAdjPower), + } +} diff --git a/chain/actors/builtin/power/v0.go b/chain/actors/builtin/power/v0.go index e2a9cf382..7636b612b 100644 --- a/chain/actors/builtin/power/v0.go +++ b/chain/actors/builtin/power/v0.go @@ -1,9 +1,12 @@ package power import ( + "bytes" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" @@ -96,3 +99,47 @@ func (s *state0) ListAllMiners() ([]address.Address, error) { return miners, nil } + +func (s *state0) ForEachClaim(cb func(miner address.Address, claim Claim) error) error { + claims, err := adt0.AsMap(s.store, s.Claims) + if err != nil { + return err + } + + var claim power0.Claim + return claims.ForEach(&claim, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + return cb(a, Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }) + }) +} + +func (s *state0) ClaimsChanged(other State) (bool, error) { + other0, ok := other.(*state0) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Claims.Equals(other0.State.Claims), nil +} + +func (s *state0) claims() (adt.Map, error) { + return adt0.AsMap(s.store, s.Claims) +} + +func (s *state0) decodeClaim(val *cbg.Deferred) (Claim, error) { + var ci power0.Claim + if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Claim{}, err + } + return fromV0Claim(ci), nil +} + +func fromV0Claim(v0 power0.Claim) Claim { + return (Claim)(v0) +} diff --git a/chain/actors/builtin/power/v2.go b/chain/actors/builtin/power/v2.go index 6346a09b6..012dc2a4f 100644 --- a/chain/actors/builtin/power/v2.go +++ b/chain/actors/builtin/power/v2.go @@ -1,9 +1,12 @@ package power import ( + "bytes" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" @@ -96,3 +99,50 @@ func (s *state2) ListAllMiners() ([]address.Address, error) { return miners, nil } + +func (s *state2) ForEachClaim(cb func(miner address.Address, claim Claim) error) error { + claims, err := adt2.AsMap(s.store, s.Claims) + if err != nil { + return err + } + + var claim power2.Claim + return claims.ForEach(&claim, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + return cb(a, Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }) + }) +} + +func (s *state2) ClaimsChanged(other State) (bool, error) { + other2, ok := other.(*state2) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Claims.Equals(other2.State.Claims), nil +} + +func (s *state2) claims() (adt.Map, error) { + return adt2.AsMap(s.store, s.Claims) +} + +func (s *state2) decodeClaim(val *cbg.Deferred) (Claim, error) { + var ci power2.Claim + if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Claim{}, err + } + return fromV2Claim(ci), nil +} + +func fromV2Claim(v2 power2.Claim) Claim { + return Claim{ + RawBytePower: v2.RawBytePower, + QualityAdjPower: v2.QualityAdjPower, + } +} diff --git a/chain/actors/builtin/reward/reward.go b/chain/actors/builtin/reward/reward.go index 065f242e2..952ca270b 100644 --- a/chain/actors/builtin/reward/reward.go +++ b/chain/actors/builtin/reward/reward.go @@ -24,7 +24,10 @@ func init() { }) } -var Address = builtin0.RewardActorAddr +var ( + Address = builtin2.RewardActorAddr + Methods = builtin2.MethodsReward +) func Load(store adt.Store, act *types.Actor) (st State, err error) { switch act.Code { diff --git a/chain/actors/builtin/verifreg/verifreg.go b/chain/actors/builtin/verifreg/verifreg.go index 204cdae95..a4468d8a0 100644 --- a/chain/actors/builtin/verifreg/verifreg.go +++ b/chain/actors/builtin/verifreg/verifreg.go @@ -24,7 +24,10 @@ func init() { }) } -var Address = builtin0.VerifiedRegistryActorAddr +var ( + Address = builtin2.VerifiedRegistryActorAddr + Methods = builtin2.MethodsVerifiedRegistry +) func Load(store adt.Store, act *types.Actor) (State, error) { switch act.Code { diff --git a/chain/actors/policy/policy.go b/chain/actors/policy/policy.go index ba09e4424..c1a971db5 100644 --- a/chain/actors/policy/policy.go +++ b/chain/actors/policy/policy.go @@ -1,6 +1,8 @@ package policy import ( + "sort" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/chain/actors" @@ -11,12 +13,14 @@ import ( builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + paych2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/paych" verifreg2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/verifreg" ) const ( ChainFinality = miner0.ChainFinality SealRandomnessLookback = ChainFinality + PaychSettleDelay = paych2.SettleDelay ) // SetSupportedProofTypes sets supported proof types, across all actor versions. @@ -114,3 +118,35 @@ func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch { return ChainFinality } + +func GetMaxSectorExpirationExtension() abi.ChainEpoch { + return miner0.MaxSectorExpirationExtension +} + +// TODO: we'll probably need to abstract over this better in the future. +func GetMaxPoStPartitions(p abi.RegisteredPoStProof) (int, error) { + sectorsPerPart, err := builtin2.PoStProofWindowPoStPartitionSectors(p) + if err != nil { + return 0, err + } + return int(miner2.AddressedSectorsMax / sectorsPerPart), nil +} + +func GetDefaultSectorSize() abi.SectorSize { + // supported proof types are the same across versions. + szs := make([]abi.SectorSize, 0, len(miner2.SupportedProofTypes)) + for spt := range miner2.SupportedProofTypes { + ss, err := spt.SectorSize() + if err != nil { + panic(err) + } + + szs = append(szs, ss) + } + + sort.Slice(szs, func(i, j int) bool { + return szs[i] < szs[j] + }) + + return szs[0] +} diff --git a/chain/actors/policy/policy_test.go b/chain/actors/policy/policy_test.go index 62e7f8964..af600cc75 100644 --- a/chain/actors/policy/policy_test.go +++ b/chain/actors/policy/policy_test.go @@ -6,9 +6,13 @@ import ( "github.com/stretchr/testify/require" "github.com/filecoin-project/go-state-types/abi" + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + paych0 "github.com/filecoin-project/specs-actors/actors/builtin/paych" verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + paych2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/paych" verifreg2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/verifreg" ) @@ -42,9 +46,25 @@ func TestSupportedProofTypes(t *testing.T) { func TestAssumptions(t *testing.T) { require.EqualValues(t, miner0.SupportedProofTypes, miner2.SupportedProofTypes) require.Equal(t, miner0.PreCommitChallengeDelay, miner2.PreCommitChallengeDelay) + require.Equal(t, miner0.MaxSectorExpirationExtension, miner2.MaxSectorExpirationExtension) require.Equal(t, miner0.ChainFinality, miner2.ChainFinality) require.Equal(t, miner0.WPoStChallengeWindow, miner2.WPoStChallengeWindow) require.Equal(t, miner0.WPoStProvingPeriod, miner2.WPoStProvingPeriod) require.Equal(t, miner0.WPoStPeriodDeadlines, miner2.WPoStPeriodDeadlines) + require.Equal(t, miner0.AddressedSectorsMax, miner2.AddressedSectorsMax) + require.Equal(t, paych0.SettleDelay, paych2.SettleDelay) require.True(t, verifreg0.MinVerifiedDealSize.Equals(verifreg2.MinVerifiedDealSize)) } + +func TestPartitionSizes(t *testing.T) { + for p := range abi.PoStSealProofTypes { + sizeNew, err := builtin2.PoStProofWindowPoStPartitionSectors(p) + require.NoError(t, err) + sizeOld, err := builtin0.PoStProofWindowPoStPartitionSectors(p) + if err != nil { + // new proof type. + continue + } + require.Equal(t, sizeOld, sizeNew) + } +} diff --git a/chain/actors/version.go b/chain/actors/version.go index 17af8b08b..fe16d521e 100644 --- a/chain/actors/version.go +++ b/chain/actors/version.go @@ -18,7 +18,7 @@ func VersionForNetwork(version network.Version) Version { switch version { case network.Version0, network.Version1, network.Version2, network.Version3: return Version0 - case network.Version4: + case network.Version4, network.Version5, network.Version6: return Version2 default: panic(fmt.Sprintf("unsupported network version %d", version)) diff --git a/chain/beacon/drand/drand.go b/chain/beacon/drand/drand.go index 6e8e83a20..4abc12d29 100644 --- a/chain/beacon/drand/drand.go +++ b/chain/beacon/drand/drand.go @@ -65,6 +65,11 @@ type DrandBeacon struct { localCache map[uint64]types.BeaconEntry } +// DrandHTTPClient interface overrides the user agent used by drand +type DrandHTTPClient interface { + SetUserAgent(string) +} + func NewDrandBeacon(genesisTs, interval uint64, ps *pubsub.PubSub, config dtypes.DrandConfig) (*DrandBeacon, error) { if genesisTs == 0 { panic("what are you doing this cant be zero") @@ -84,6 +89,7 @@ func NewDrandBeacon(genesisTs, interval uint64, ps *pubsub.PubSub, config dtypes if err != nil { return nil, xerrors.Errorf("could not create http drand client: %w", err) } + hc.(DrandHTTPClient).SetUserAgent("drand-client-lotus/" + build.BuildVersion) clients = append(clients, hc) } @@ -92,7 +98,6 @@ func NewDrandBeacon(genesisTs, interval uint64, ps *pubsub.PubSub, config dtypes dclient.WithChainInfo(drandChain), dclient.WithCacheSize(1024), dclient.WithLogger(dlogger), - dclient.WithAutoWatch(), } if ps != nil { diff --git a/chain/beacon/drand/drand_test.go b/chain/beacon/drand/drand_test.go index 0cb9c2ba8..d66ee7b54 100644 --- a/chain/beacon/drand/drand_test.go +++ b/chain/beacon/drand/drand_test.go @@ -12,7 +12,7 @@ import ( ) func TestPrintGroupInfo(t *testing.T) { - server := build.DrandConfigs[build.DrandIncentinet].Servers[0] + server := build.DrandConfigs[build.DrandDevnet].Servers[0] c, err := hclient.New(server, nil, nil) assert.NoError(t, err) cg := c.(interface { diff --git a/chain/events/events_called.go b/chain/events/events_called.go index 753206093..7f39e9038 100644 --- a/chain/events/events_called.go +++ b/chain/events/events_called.go @@ -459,7 +459,7 @@ type messageEvents struct { hcAPI headChangeAPI lk sync.RWMutex - matchers map[triggerID][]MsgMatchFunc + matchers map[triggerID]MsgMatchFunc } func newMessageEvents(ctx context.Context, hcAPI headChangeAPI, cs eventAPI) messageEvents { @@ -467,7 +467,7 @@ func newMessageEvents(ctx context.Context, hcAPI headChangeAPI, cs eventAPI) mes ctx: ctx, cs: cs, hcAPI: hcAPI, - matchers: map[triggerID][]MsgMatchFunc{}, + matchers: make(map[triggerID]MsgMatchFunc), } } @@ -482,32 +482,23 @@ func (me *messageEvents) checkNewCalls(ts *types.TipSet) (map[triggerID]eventDat me.lk.RLock() defer me.lk.RUnlock() + // For each message in the tipset res := make(map[triggerID]eventData) me.messagesForTs(pts, func(msg *types.Message) { // TODO: provide receipts - for tid, matchFns := range me.matchers { - var matched bool - var once bool - for _, matchFn := range matchFns { - matchOne, ok, err := matchFn(msg) - if err != nil { - log.Errorf("event matcher failed: %s", err) - continue - } - matched = ok - once = matchOne - - if matched { - break - } + // Run each trigger's matcher against the message + for tid, matchFn := range me.matchers { + matched, err := matchFn(msg) + if err != nil { + log.Errorf("event matcher failed: %s", err) + continue } + // If there was a match, include the message in the results for the + // trigger if matched { res[tid] = msg - if once { - break - } } } }) @@ -555,7 +546,7 @@ func (me *messageEvents) messagesForTs(ts *types.TipSet, consume func(*types.Mes // `curH`-`ts.Height` = `confidence` type MsgHandler func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (more bool, err error) -type MsgMatchFunc func(msg *types.Message) (matchOnce bool, matched bool, err error) +type MsgMatchFunc func(msg *types.Message) (matched bool, err error) // Called registers a callback which is triggered when a specified method is // called on an actor, or a timeout is reached. @@ -607,7 +598,7 @@ func (me *messageEvents) Called(check CheckFunc, msgHnd MsgHandler, rev RevertHa me.lk.Lock() defer me.lk.Unlock() - me.matchers[id] = append(me.matchers[id], mf) + me.matchers[id] = mf return nil } diff --git a/chain/events/events_test.go b/chain/events/events_test.go index 0e4fd34b2..3957f425c 100644 --- a/chain/events/events_test.go +++ b/chain/events/events_test.go @@ -572,9 +572,9 @@ func TestAtChainedConfidenceNull(t *testing.T) { require.Equal(t, false, reverted) } -func matchAddrMethod(to address.Address, m abi.MethodNum) func(msg *types.Message) (matchOnce bool, matched bool, err error) { - return func(msg *types.Message) (matchOnce bool, matched bool, err error) { - return true, to == msg.To && m == msg.Method, nil +func matchAddrMethod(to address.Address, m abi.MethodNum) func(msg *types.Message) (matched bool, err error) { + return func(msg *types.Message) (matched bool, err error) { + return to == msg.To && m == msg.Method, nil } } diff --git a/chain/events/state/mock/api.go b/chain/events/state/mock/api.go new file mode 100644 index 000000000..4e8bcc94d --- /dev/null +++ b/chain/events/state/mock/api.go @@ -0,0 +1,69 @@ +package test + +import ( + "context" + "sync" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/lib/blockstore" + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" +) + +type MockAPI struct { + bs blockstore.Blockstore + + lk sync.Mutex + ts map[types.TipSetKey]*types.Actor + stateGetActorCalled int +} + +func NewMockAPI(bs blockstore.Blockstore) *MockAPI { + return &MockAPI{ + bs: bs, + ts: make(map[types.TipSetKey]*types.Actor), + } +} + +func (m *MockAPI) ChainHasObj(ctx context.Context, c cid.Cid) (bool, error) { + return m.bs.Has(c) +} + +func (m *MockAPI) ChainReadObj(ctx context.Context, c cid.Cid) ([]byte, error) { + blk, err := m.bs.Get(c) + if err != nil { + return nil, xerrors.Errorf("blockstore get: %w", err) + } + + return blk.RawData(), nil +} + +func (m *MockAPI) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) { + m.lk.Lock() + defer m.lk.Unlock() + + m.stateGetActorCalled++ + return m.ts[tsk], nil +} + +func (m *MockAPI) StateGetActorCallCount() int { + m.lk.Lock() + defer m.lk.Unlock() + + return m.stateGetActorCalled +} + +func (m *MockAPI) ResetCallCounts() { + m.lk.Lock() + defer m.lk.Unlock() + + m.stateGetActorCalled = 0 +} + +func (m *MockAPI) SetActor(tsk types.TipSetKey, act *types.Actor) { + m.lk.Lock() + defer m.lk.Unlock() + + m.ts[tsk] = act +} diff --git a/chain/events/state/mock/state.go b/chain/events/state/mock/state.go new file mode 100644 index 000000000..bac06b59f --- /dev/null +++ b/chain/events/state/mock/state.go @@ -0,0 +1,32 @@ +package test + +import ( + "context" + "testing" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" + "github.com/filecoin-project/specs-actors/v2/actors/util/adt" + "github.com/stretchr/testify/require" +) + +func CreateEmptyMarketState(t *testing.T, store adt.Store) *market.State { + emptyArrayCid, err := adt.MakeEmptyArray(store).Root() + require.NoError(t, err) + emptyMap, err := adt.MakeEmptyMap(store).Root() + require.NoError(t, err) + return market.ConstructState(emptyArrayCid, emptyMap, emptyMap) +} + +func CreateDealAMT(ctx context.Context, t *testing.T, store adt.Store, deals map[abi.DealID]*market.DealState) cid.Cid { + root := adt.MakeEmptyArray(store) + for dealID, dealState := range deals { + err := root.Set(uint64(dealID), dealState) + require.NoError(t, err) + } + rootCid, err := root.Root() + require.NoError(t, err) + return rootCid +} diff --git a/chain/events/state/mock/tipset.go b/chain/events/state/mock/tipset.go new file mode 100644 index 000000000..39d42d6e5 --- /dev/null +++ b/chain/events/state/mock/tipset.go @@ -0,0 +1,27 @@ +package test + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/lotus/chain/types" + "github.com/ipfs/go-cid" +) + +var dummyCid cid.Cid + +func init() { + dummyCid, _ = cid.Parse("bafkqaaa") +} + +func MockTipset(minerAddr address.Address, timestamp uint64) (*types.TipSet, error) { + return types.NewTipSet([]*types.BlockHeader{{ + Miner: minerAddr, + Height: 5, + ParentStateRoot: dummyCid, + Messages: dummyCid, + ParentMessageReceipts: dummyCid, + BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS}, + BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS}, + Timestamp: timestamp, + }}) +} diff --git a/chain/events/state/predicates_test.go b/chain/events/state/predicates_test.go index 461ac4997..8fc93d9cd 100644 --- a/chain/events/state/predicates_test.go +++ b/chain/events/state/predicates_test.go @@ -4,28 +4,26 @@ import ( "context" "testing" + test "github.com/filecoin-project/lotus/chain/events/state/mock" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/go-bitfield" - "github.com/stretchr/testify/require" - "golang.org/x/xerrors" - "github.com/ipfs/go-cid" cbornode "github.com/ipfs/go-ipld-cbor" + "github.com/stretchr/testify/require" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/crypto" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" + miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt" + tutils "github.com/filecoin-project/specs-actors/v2/support/testing" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" - builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" - market0 "github.com/filecoin-project/specs-actors/actors/builtin/market" - - miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" - "github.com/filecoin-project/specs-actors/actors/util/adt" - tutils "github.com/filecoin-project/specs-actors/support/testing" - "github.com/filecoin-project/lotus/chain/types" bstore "github.com/filecoin-project/lotus/lib/blockstore" ) @@ -36,60 +34,27 @@ func init() { dummyCid, _ = cid.Parse("bafkqaaa") } -type mockAPI struct { - ts map[types.TipSetKey]*types.Actor - bs bstore.Blockstore -} - -func newMockAPI(bs bstore.Blockstore) *mockAPI { - return &mockAPI{ - bs: bs, - ts: make(map[types.TipSetKey]*types.Actor), - } -} - -func (m mockAPI) ChainHasObj(ctx context.Context, c cid.Cid) (bool, error) { - return m.bs.Has(c) -} - -func (m mockAPI) ChainReadObj(ctx context.Context, c cid.Cid) ([]byte, error) { - blk, err := m.bs.Get(c) - if err != nil { - return nil, xerrors.Errorf("blockstore get: %w", err) - } - - return blk.RawData(), nil -} - -func (m mockAPI) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) { - return m.ts[tsk], nil -} - -func (m mockAPI) setActor(tsk types.TipSetKey, act *types.Actor) { - m.ts[tsk] = act -} - func TestMarketPredicates(t *testing.T) { ctx := context.Background() bs := bstore.NewTemporarySync() - store := adt.WrapStore(ctx, cbornode.NewCborStore(bs)) + store := adt2.WrapStore(ctx, cbornode.NewCborStore(bs)) - oldDeal1 := &market0.DealState{ + oldDeal1 := &market2.DealState{ SectorStartEpoch: 1, LastUpdatedEpoch: 2, SlashEpoch: 0, } - oldDeal2 := &market0.DealState{ + oldDeal2 := &market2.DealState{ SectorStartEpoch: 4, LastUpdatedEpoch: 5, SlashEpoch: 0, } - oldDeals := map[abi.DealID]*market0.DealState{ + oldDeals := map[abi.DealID]*market2.DealState{ abi.DealID(1): oldDeal1, abi.DealID(2): oldDeal2, } - oldProp1 := &market0.DealProposal{ + oldProp1 := &market2.DealProposal{ PieceCID: dummyCid, PieceSize: 0, VerifiedDeal: false, @@ -101,7 +66,7 @@ func TestMarketPredicates(t *testing.T) { ProviderCollateral: big.Zero(), ClientCollateral: big.Zero(), } - oldProp2 := &market0.DealProposal{ + oldProp2 := &market2.DealProposal{ PieceCID: dummyCid, PieceSize: 0, VerifiedDeal: false, @@ -113,7 +78,7 @@ func TestMarketPredicates(t *testing.T) { ProviderCollateral: big.Zero(), ClientCollateral: big.Zero(), } - oldProps := map[abi.DealID]*market0.DealProposal{ + oldProps := map[abi.DealID]*market2.DealProposal{ abi.DealID(1): oldProp1, abi.DealID(2): oldProp2, } @@ -127,7 +92,7 @@ func TestMarketPredicates(t *testing.T) { oldStateC := createMarketState(ctx, t, store, oldDeals, oldProps, oldBalances) - newDeal1 := &market0.DealState{ + newDeal1 := &market2.DealState{ SectorStartEpoch: 1, LastUpdatedEpoch: 3, SlashEpoch: 0, @@ -136,19 +101,19 @@ func TestMarketPredicates(t *testing.T) { // deal 2 removed // added - newDeal3 := &market0.DealState{ + newDeal3 := &market2.DealState{ SectorStartEpoch: 1, LastUpdatedEpoch: 2, SlashEpoch: 3, } - newDeals := map[abi.DealID]*market0.DealState{ + newDeals := map[abi.DealID]*market2.DealState{ abi.DealID(1): newDeal1, // deal 2 was removed abi.DealID(3): newDeal3, } // added - newProp3 := &market0.DealProposal{ + newProp3 := &market2.DealProposal{ PieceCID: dummyCid, PieceSize: 0, VerifiedDeal: false, @@ -160,7 +125,7 @@ func TestMarketPredicates(t *testing.T) { ProviderCollateral: big.Zero(), ClientCollateral: big.Zero(), } - newProps := map[abi.DealID]*market0.DealProposal{ + newProps := map[abi.DealID]*market2.DealProposal{ abi.DealID(1): oldProp1, // 1 was persisted // prop 2 was removed abi.DealID(3): newProp3, // new @@ -177,14 +142,14 @@ func TestMarketPredicates(t *testing.T) { minerAddr, err := address.NewFromString("t00") require.NoError(t, err) - oldState, err := mockTipset(minerAddr, 1) + oldState, err := test.MockTipset(minerAddr, 1) require.NoError(t, err) - newState, err := mockTipset(minerAddr, 2) + newState, err := test.MockTipset(minerAddr, 2) require.NoError(t, err) - api := newMockAPI(bs) - api.setActor(oldState.Key(), &types.Actor{Code: builtin0.StorageMarketActorCodeID, Head: oldStateC}) - api.setActor(newState.Key(), &types.Actor{Code: builtin0.StorageMarketActorCodeID, Head: newStateC}) + api := test.NewMockAPI(bs) + api.SetActor(oldState.Key(), &types.Actor{Code: builtin2.StorageMarketActorCodeID, Head: oldStateC}) + api.SetActor(newState.Key(), &types.Actor{Code: builtin2.StorageMarketActorCodeID, Head: newStateC}) t.Run("deal ID predicate", func(t *testing.T) { preds := NewStatePredicates(api) @@ -239,11 +204,11 @@ func TestMarketPredicates(t *testing.T) { t.Fatal("No state change so this should not be called") return false, nil, nil }) - marketState0 := createEmptyMarketState(t, store) + marketState0 := test.CreateEmptyMarketState(t, store) marketCid, err := store.Put(ctx, marketState0) require.NoError(t, err) marketState, err := market.Load(store, &types.Actor{ - Code: builtin0.StorageMarketActorCodeID, + Code: builtin2.StorageMarketActorCodeID, Head: marketCid, }) require.NoError(t, err) @@ -352,11 +317,11 @@ func TestMarketPredicates(t *testing.T) { t.Fatal("No state change so this should not be called") return false, nil, nil }) - marketState0 := createEmptyMarketState(t, store) + marketState0 := test.CreateEmptyMarketState(t, store) marketCid, err := store.Put(ctx, marketState0) require.NoError(t, err) marketState, err := market.Load(store, &types.Actor{ - Code: builtin0.StorageMarketActorCodeID, + Code: builtin2.StorageMarketActorCodeID, Head: marketCid, }) require.NoError(t, err) @@ -370,7 +335,7 @@ func TestMarketPredicates(t *testing.T) { func TestMinerSectorChange(t *testing.T) { ctx := context.Background() bs := bstore.NewTemporarySync() - store := adt.WrapStore(ctx, cbornode.NewCborStore(bs)) + store := adt2.WrapStore(ctx, cbornode.NewCborStore(bs)) nextID := uint64(0) nextIDAddrF := func() address.Address { @@ -379,12 +344,12 @@ func TestMinerSectorChange(t *testing.T) { } owner, worker := nextIDAddrF(), nextIDAddrF() - si0 := newSectorOnChainInfo(0, tutils.MakeCID("0", &miner0.SealedCIDPrefix), big.NewInt(0), abi.ChainEpoch(0), abi.ChainEpoch(10)) - si1 := newSectorOnChainInfo(1, tutils.MakeCID("1", &miner0.SealedCIDPrefix), big.NewInt(1), abi.ChainEpoch(1), abi.ChainEpoch(11)) - si2 := newSectorOnChainInfo(2, tutils.MakeCID("2", &miner0.SealedCIDPrefix), big.NewInt(2), abi.ChainEpoch(2), abi.ChainEpoch(11)) + si0 := newSectorOnChainInfo(0, tutils.MakeCID("0", &miner2.SealedCIDPrefix), big.NewInt(0), abi.ChainEpoch(0), abi.ChainEpoch(10)) + si1 := newSectorOnChainInfo(1, tutils.MakeCID("1", &miner2.SealedCIDPrefix), big.NewInt(1), abi.ChainEpoch(1), abi.ChainEpoch(11)) + si2 := newSectorOnChainInfo(2, tutils.MakeCID("2", &miner2.SealedCIDPrefix), big.NewInt(2), abi.ChainEpoch(2), abi.ChainEpoch(11)) oldMinerC := createMinerState(ctx, t, store, owner, worker, []miner.SectorOnChainInfo{si0, si1, si2}) - si3 := newSectorOnChainInfo(3, tutils.MakeCID("3", &miner0.SealedCIDPrefix), big.NewInt(3), abi.ChainEpoch(3), abi.ChainEpoch(12)) + si3 := newSectorOnChainInfo(3, tutils.MakeCID("3", &miner2.SealedCIDPrefix), big.NewInt(3), abi.ChainEpoch(3), abi.ChainEpoch(12)) // 0 delete // 1 extend // 2 same @@ -394,14 +359,14 @@ func TestMinerSectorChange(t *testing.T) { newMinerC := createMinerState(ctx, t, store, owner, worker, []miner.SectorOnChainInfo{si1Ext, si2, si3}) minerAddr := nextIDAddrF() - oldState, err := mockTipset(minerAddr, 1) + oldState, err := test.MockTipset(minerAddr, 1) require.NoError(t, err) - newState, err := mockTipset(minerAddr, 2) + newState, err := test.MockTipset(minerAddr, 2) require.NoError(t, err) - api := newMockAPI(bs) - api.setActor(oldState.Key(), &types.Actor{Head: oldMinerC, Code: builtin0.StorageMinerActorCodeID}) - api.setActor(newState.Key(), &types.Actor{Head: newMinerC, Code: builtin0.StorageMinerActorCodeID}) + api := test.NewMockAPI(bs) + api.SetActor(oldState.Key(), &types.Actor{Head: oldMinerC, Code: builtin2.StorageMinerActorCodeID}) + api.SetActor(newState.Key(), &types.Actor{Head: newMinerC, Code: builtin2.StorageMinerActorCodeID}) preds := NewStatePredicates(api) @@ -449,29 +414,16 @@ func TestMinerSectorChange(t *testing.T) { require.Equal(t, si1Ext, sectorChanges.Extended[0].From) } -func mockTipset(minerAddr address.Address, timestamp uint64) (*types.TipSet, error) { - return types.NewTipSet([]*types.BlockHeader{{ - Miner: minerAddr, - Height: 5, - ParentStateRoot: dummyCid, - Messages: dummyCid, - ParentMessageReceipts: dummyCid, - BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS}, - BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS}, - Timestamp: timestamp, - }}) -} - type balance struct { available abi.TokenAmount locked abi.TokenAmount } -func createMarketState(ctx context.Context, t *testing.T, store adt.Store, deals map[abi.DealID]*market0.DealState, props map[abi.DealID]*market0.DealProposal, balances map[address.Address]balance) cid.Cid { - dealRootCid := createDealAMT(ctx, t, store, deals) +func createMarketState(ctx context.Context, t *testing.T, store adt2.Store, deals map[abi.DealID]*market2.DealState, props map[abi.DealID]*market2.DealProposal, balances map[address.Address]balance) cid.Cid { + dealRootCid := test.CreateDealAMT(ctx, t, store, deals) propRootCid := createProposalAMT(ctx, t, store, props) balancesCids := createBalanceTable(ctx, t, store, balances) - state := createEmptyMarketState(t, store) + state := test.CreateEmptyMarketState(t, store) state.States = dealRootCid state.Proposals = propRootCid state.EscrowTable = balancesCids[0] @@ -482,27 +434,8 @@ func createMarketState(ctx context.Context, t *testing.T, store adt.Store, deals return stateC } -func createEmptyMarketState(t *testing.T, store adt.Store) *market0.State { - emptyArrayCid, err := adt.MakeEmptyArray(store).Root() - require.NoError(t, err) - emptyMap, err := adt.MakeEmptyMap(store).Root() - require.NoError(t, err) - return market0.ConstructState(emptyArrayCid, emptyMap, emptyMap) -} - -func createDealAMT(ctx context.Context, t *testing.T, store adt.Store, deals map[abi.DealID]*market0.DealState) cid.Cid { - root := adt.MakeEmptyArray(store) - for dealID, dealState := range deals { - err := root.Set(uint64(dealID), dealState) - require.NoError(t, err) - } - rootCid, err := root.Root() - require.NoError(t, err) - return rootCid -} - -func createProposalAMT(ctx context.Context, t *testing.T, store adt.Store, props map[abi.DealID]*market0.DealProposal) cid.Cid { - root := adt.MakeEmptyArray(store) +func createProposalAMT(ctx context.Context, t *testing.T, store adt2.Store, props map[abi.DealID]*market2.DealProposal) cid.Cid { + root := adt2.MakeEmptyArray(store) for dealID, prop := range props { err := root.Set(uint64(dealID), prop) require.NoError(t, err) @@ -512,16 +445,16 @@ func createProposalAMT(ctx context.Context, t *testing.T, store adt.Store, props return rootCid } -func createBalanceTable(ctx context.Context, t *testing.T, store adt.Store, balances map[address.Address]balance) [2]cid.Cid { - escrowMapRoot := adt.MakeEmptyMap(store) +func createBalanceTable(ctx context.Context, t *testing.T, store adt2.Store, balances map[address.Address]balance) [2]cid.Cid { + escrowMapRoot := adt2.MakeEmptyMap(store) escrowMapRootCid, err := escrowMapRoot.Root() require.NoError(t, err) - escrowRoot, err := adt.AsBalanceTable(store, escrowMapRootCid) + escrowRoot, err := adt2.AsBalanceTable(store, escrowMapRootCid) require.NoError(t, err) - lockedMapRoot := adt.MakeEmptyMap(store) + lockedMapRoot := adt2.MakeEmptyMap(store) lockedMapRootCid, err := lockedMapRoot.Root() require.NoError(t, err) - lockedRoot, err := adt.AsBalanceTable(store, lockedMapRootCid) + lockedRoot, err := adt2.AsBalanceTable(store, lockedMapRootCid) require.NoError(t, err) for addr, balance := range balances { @@ -538,7 +471,7 @@ func createBalanceTable(ctx context.Context, t *testing.T, store adt.Store, bala return [2]cid.Cid{escrowRootCid, lockedRootCid} } -func createMinerState(ctx context.Context, t *testing.T, store adt.Store, owner, worker address.Address, sectors []miner.SectorOnChainInfo) cid.Cid { +func createMinerState(ctx context.Context, t *testing.T, store adt2.Store, owner, worker address.Address, sectors []miner.SectorOnChainInfo) cid.Cid { rootCid := createSectorsAMT(ctx, t, store, sectors) state := createEmptyMinerState(ctx, t, store, owner, worker) @@ -549,20 +482,20 @@ func createMinerState(ctx context.Context, t *testing.T, store adt.Store, owner, return stateC } -func createEmptyMinerState(ctx context.Context, t *testing.T, store adt.Store, owner, worker address.Address) *miner0.State { - emptyArrayCid, err := adt.MakeEmptyArray(store).Root() +func createEmptyMinerState(ctx context.Context, t *testing.T, store adt2.Store, owner, worker address.Address) *miner2.State { + emptyArrayCid, err := adt2.MakeEmptyArray(store).Root() require.NoError(t, err) - emptyMap, err := adt.MakeEmptyMap(store).Root() + emptyMap, err := adt2.MakeEmptyMap(store).Root() require.NoError(t, err) - emptyDeadline, err := store.Put(store.Context(), miner0.ConstructDeadline(emptyArrayCid)) + emptyDeadline, err := store.Put(store.Context(), miner2.ConstructDeadline(emptyArrayCid)) require.NoError(t, err) - emptyVestingFunds := miner0.ConstructVestingFunds() + emptyVestingFunds := miner2.ConstructVestingFunds() emptyVestingFundsCid, err := store.Put(store.Context(), emptyVestingFunds) require.NoError(t, err) - emptyDeadlines := miner0.ConstructDeadlines(emptyDeadline) + emptyDeadlines := miner2.ConstructDeadlines(emptyDeadline) emptyDeadlinesCid, err := store.Put(store.Context(), emptyDeadlines) require.NoError(t, err) @@ -572,16 +505,30 @@ func createEmptyMinerState(ctx context.Context, t *testing.T, store adt.Store, o emptyBitfieldCid, err := store.Put(store.Context(), emptyBitfield) require.NoError(t, err) - state, err := miner0.ConstructState(minerInfo, 123, emptyBitfieldCid, emptyArrayCid, emptyMap, emptyDeadlinesCid, emptyVestingFundsCid) + state, err := miner2.ConstructState(minerInfo, 123, 4, emptyBitfieldCid, emptyArrayCid, emptyMap, emptyDeadlinesCid, emptyVestingFundsCid) require.NoError(t, err) return state } -func createSectorsAMT(ctx context.Context, t *testing.T, store adt.Store, sectors []miner.SectorOnChainInfo) cid.Cid { - root := adt.MakeEmptyArray(store) +func createSectorsAMT(ctx context.Context, t *testing.T, store adt2.Store, sectors []miner.SectorOnChainInfo) cid.Cid { + root := adt2.MakeEmptyArray(store) for _, sector := range sectors { - sector := (miner0.SectorOnChainInfo)(sector) + sector := miner2.SectorOnChainInfo{ + SectorNumber: sector.SectorNumber, + SealProof: sector.SealProof, + SealedCID: sector.SealedCID, + DealIDs: sector.DealIDs, + Activation: sector.Activation, + Expiration: sector.Expiration, + DealWeight: sector.DealWeight, + VerifiedDealWeight: sector.VerifiedDealWeight, + InitialPledge: sector.InitialPledge, + ExpectedDayReward: sector.ExpectedDayReward, + ExpectedStoragePledge: sector.ExpectedStoragePledge, + ReplacedSectorAge: 0, + ReplacedDayReward: big.NewInt(0), + } err := root.Set(uint64(sector.SectorNumber), §or) require.NoError(t, err) } @@ -614,8 +561,8 @@ const ( ) // returns a unique SectorPreCommitInfo with each invocation with SectorNumber set to `sectorNo`. -func newSectorPreCommitInfo(sectorNo abi.SectorNumber, sealed cid.Cid, expiration abi.ChainEpoch) *miner0.SectorPreCommitInfo { - return &miner0.SectorPreCommitInfo{ +func newSectorPreCommitInfo(sectorNo abi.SectorNumber, sealed cid.Cid, expiration abi.ChainEpoch) *miner2.SectorPreCommitInfo { + return &miner2.SectorPreCommitInfo{ SealProof: abi.RegisteredSealProof_StackedDrg32GiBV1, SectorNumber: sectorNo, SealedCID: sealed, @@ -625,7 +572,7 @@ func newSectorPreCommitInfo(sectorNo abi.SectorNumber, sealed cid.Cid, expiratio } } -func dealEquality(expected market0.DealState, actual market.DealState) bool { +func dealEquality(expected market2.DealState, actual market.DealState) bool { return expected.LastUpdatedEpoch == actual.LastUpdatedEpoch && expected.SectorStartEpoch == actual.SectorStartEpoch && expected.SlashEpoch == actual.SlashEpoch diff --git a/chain/events/utils.go b/chain/events/utils.go index e50dbc6fe..c26ca5b83 100644 --- a/chain/events/utils.go +++ b/chain/events/utils.go @@ -34,11 +34,11 @@ func (me *messageEvents) CheckMsg(ctx context.Context, smsg types.ChainMsg, hnd } func (me *messageEvents) MatchMsg(inmsg *types.Message) MsgMatchFunc { - return func(msg *types.Message) (matchOnce bool, matched bool, err error) { + return func(msg *types.Message) (matched bool, err error) { if msg.From == inmsg.From && msg.Nonce == inmsg.Nonce && !inmsg.Equals(msg) { - return true, false, xerrors.Errorf("matching msg %s from %s, nonce %d: got duplicate origin/nonce msg %d", inmsg.Cid(), inmsg.From, inmsg.Nonce, msg.Nonce) + return false, xerrors.Errorf("matching msg %s from %s, nonce %d: got duplicate origin/nonce msg %d", inmsg.Cid(), inmsg.From, inmsg.Nonce, msg.Nonce) } - return true, inmsg.Equals(msg), nil + return inmsg.Equals(msg), nil } } diff --git a/chain/gen/gen.go b/chain/gen/gen.go index 9133f14b4..d56f285a0 100644 --- a/chain/gen/gen.go +++ b/chain/gen/gen.go @@ -8,13 +8,11 @@ import ( "sync/atomic" "time" - "github.com/filecoin-project/specs-actors/actors/runtime/proof" - "github.com/google/uuid" - "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" + "github.com/google/uuid" block "github.com/ipfs/go-block-format" "github.com/ipfs/go-blockservice" "github.com/ipfs/go-cid" @@ -26,6 +24,8 @@ import ( "go.opencensus.io/trace" "golang.org/x/xerrors" + proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/policy" @@ -50,7 +50,7 @@ const msgsPerBlock = 20 //nolint:deadcode,varcheck var log = logging.Logger("gen") -var ValidWpostForTesting = []proof.PoStProof{{ +var ValidWpostForTesting = []proof2.PoStProof{{ ProofBytes: []byte("valid proof"), }} @@ -155,14 +155,14 @@ func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) { return nil, xerrors.Errorf("creating memrepo wallet failed: %w", err) } - banker, err := w.WalletNew(context.Background(), crypto.SigTypeSecp256k1) + banker, err := w.WalletNew(context.Background(), types.KTSecp256k1) if err != nil { return nil, xerrors.Errorf("failed to generate banker key: %w", err) } receievers := make([]address.Address, msgsPerBlock) for r := range receievers { - receievers[r], err = w.WalletNew(context.Background(), crypto.SigTypeBLS) + receievers[r], err = w.WalletNew(context.Background(), types.KTBLS) if err != nil { return nil, xerrors.Errorf("failed to generate receiver key: %w", err) } @@ -467,7 +467,7 @@ func (cg *ChainGen) NextTipSetFromMinersWithMessages(base *types.TipSet, miners func (cg *ChainGen) makeBlock(parents *types.TipSet, m address.Address, vrfticket *types.Ticket, eticket *types.ElectionProof, bvals []types.BeaconEntry, height abi.ChainEpoch, - wpost []proof.PoStProof, msgs []*types.SignedMessage) (*types.FullBlock, error) { + wpost []proof2.PoStProof, msgs []*types.SignedMessage) (*types.FullBlock, error) { var ts uint64 if cg.Timestamper != nil { @@ -605,7 +605,7 @@ func (mca mca) WalletSign(ctx context.Context, a address.Address, v []byte) (*cr type WinningPoStProver interface { GenerateCandidates(context.Context, abi.PoStRandomness, uint64) ([]uint64, error) - ComputeProof(context.Context, []proof.SectorInfo, abi.PoStRandomness) ([]proof.PoStProof, error) + ComputeProof(context.Context, []proof2.SectorInfo, abi.PoStRandomness) ([]proof2.PoStProof, error) } type wppProvider struct{} @@ -614,7 +614,7 @@ func (wpp *wppProvider) GenerateCandidates(ctx context.Context, _ abi.PoStRandom return []uint64{0}, nil } -func (wpp *wppProvider) ComputeProof(context.Context, []proof.SectorInfo, abi.PoStRandomness) ([]proof.PoStProof, error) { +func (wpp *wppProvider) ComputeProof(context.Context, []proof2.SectorInfo, abi.PoStRandomness) ([]proof2.PoStProof, error) { return ValidWpostForTesting, nil } @@ -681,15 +681,15 @@ type genFakeVerifier struct{} var _ ffiwrapper.Verifier = (*genFakeVerifier)(nil) -func (m genFakeVerifier) VerifySeal(svi proof.SealVerifyInfo) (bool, error) { +func (m genFakeVerifier) VerifySeal(svi proof2.SealVerifyInfo) (bool, error) { return true, nil } -func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info proof.WinningPoStVerifyInfo) (bool, error) { +func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info proof2.WinningPoStVerifyInfo) (bool, error) { panic("not supported") } -func (m genFakeVerifier) VerifyWindowPoSt(ctx context.Context, info proof.WindowPoStVerifyInfo) (bool, error) { +func (m genFakeVerifier) VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStVerifyInfo) (bool, error) { panic("not supported") } diff --git a/chain/gen/genesis/miners.go b/chain/gen/genesis/miners.go index 1023e5efa..be83a8711 100644 --- a/chain/gen/genesis/miners.go +++ b/chain/gen/genesis/miners.go @@ -24,11 +24,12 @@ import ( "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" - "github.com/filecoin-project/specs-actors/actors/builtin" + + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" power0 "github.com/filecoin-project/specs-actors/actors/builtin/power" reward0 "github.com/filecoin-project/specs-actors/actors/builtin/reward" - "github.com/filecoin-project/specs-actors/actors/runtime" + runtime2 "github.com/filecoin-project/specs-actors/v2/actors/runtime" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/store" @@ -47,7 +48,7 @@ func MinerAddress(genesisIndex uint64) address.Address { } type fakedSigSyscalls struct { - runtime.Syscalls + runtime2.Syscalls } func (fss *fakedSigSyscalls) VerifySignature(signature crypto.Signature, signer address.Address, plaintext []byte) error { @@ -55,9 +56,9 @@ func (fss *fakedSigSyscalls) VerifySignature(signature crypto.Signature, signer } func mkFakedSigSyscalls(base vm.SyscallBuilder) vm.SyscallBuilder { - return func(ctx context.Context, cstate *state.StateTree, cst cbor.IpldStore) runtime.Syscalls { + return func(ctx context.Context, rt *vm.Runtime) runtime2.Syscalls { return &fakedSigSyscalls{ - base(ctx, cstate, cst), + base(ctx, rt), } } } @@ -114,7 +115,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid } params := mustEnc(constructorParams) - rval, err := doExecValue(ctx, vm, power.Address, m.Owner, m.PowerBalance, builtin.MethodsPower.CreateMiner, params) + rval, err := doExecValue(ctx, vm, power.Address, m.Owner, m.PowerBalance, builtin0.MethodsPower.CreateMiner, params) if err != nil { return cid.Undef, xerrors.Errorf("failed to create genesis miner: %w", err) } @@ -146,7 +147,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid if m.MarketBalance.GreaterThan(big.Zero()) { params := mustEnc(&minerInfos[i].maddr) - _, err := doExecValue(ctx, vm, market.Address, m.Worker, m.MarketBalance, builtin.MethodsMarket.AddBalance, params) + _, err := doExecValue(ctx, vm, market.Address, m.Worker, m.MarketBalance, builtin0.MethodsMarket.AddBalance, params) if err != nil { return cid.Undef, xerrors.Errorf("failed to create genesis miner (add balance): %w", err) } @@ -158,7 +159,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid publish := func(params *market.PublishStorageDealsParams) error { fmt.Printf("publishing %d storage deals on miner %s with worker %s\n", len(params.Deals), params.Deals[0].Proposal.Provider, m.Worker) - ret, err := doExecValue(ctx, vm, market.Address, m.Worker, big.Zero(), builtin.MethodsMarket.PublishStorageDeals, mustEnc(params)) + ret, err := doExecValue(ctx, vm, market.Address, m.Worker, big.Zero(), builtin0.MethodsMarket.PublishStorageDeals, mustEnc(params)) if err != nil { return xerrors.Errorf("failed to create genesis miner (publish deals): %w", err) } @@ -290,17 +291,17 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid pledge = big.Add(pcd, pledge) fmt.Println(types.FIL(pledge)) - _, err = doExecValue(ctx, vm, minerInfos[i].maddr, m.Worker, pledge, builtin.MethodsMiner.PreCommitSector, mustEnc(params)) + _, err = doExecValue(ctx, vm, minerInfos[i].maddr, m.Worker, pledge, builtin0.MethodsMiner.PreCommitSector, mustEnc(params)) if err != nil { return cid.Undef, xerrors.Errorf("failed to confirm presealed sectors: %w", err) } // Commit one-by-one, otherwise pledge math tends to explode - confirmParams := &builtin.ConfirmSectorProofsParams{ + confirmParams := &builtin0.ConfirmSectorProofsParams{ Sectors: []abi.SectorNumber{preseal.SectorID}, } - _, err = doExecValue(ctx, vm, minerInfos[i].maddr, power.Address, big.Zero(), builtin.MethodsMiner.ConfirmSectorProofsValid, mustEnc(confirmParams)) + _, err = doExecValue(ctx, vm, minerInfos[i].maddr, power.Address, big.Zero(), builtin0.MethodsMiner.ConfirmSectorProofsValid, mustEnc(confirmParams)) if err != nil { return cid.Undef, xerrors.Errorf("failed to confirm presealed sectors: %w", err) } @@ -349,7 +350,7 @@ func (fr *fakeRand) GetBeaconRandomness(ctx context.Context, personalization cry } func currentTotalPower(ctx context.Context, vm *vm.VM, maddr address.Address) (*power0.CurrentTotalPowerReturn, error) { - pwret, err := doExecValue(ctx, vm, power.Address, maddr, big.Zero(), builtin.MethodsPower.CurrentTotalPower, nil) + pwret, err := doExecValue(ctx, vm, power.Address, maddr, big.Zero(), builtin0.MethodsPower.CurrentTotalPower, nil) if err != nil { return nil, err } @@ -373,7 +374,7 @@ func dealWeight(ctx context.Context, vm *vm.VM, maddr address.Address, dealIDs [ market.Address, maddr, abi.NewTokenAmount(0), - builtin.MethodsMarket.VerifyDealsForActivation, + builtin0.MethodsMarket.VerifyDealsForActivation, mustEnc(params), ) if err != nil { @@ -387,7 +388,7 @@ func dealWeight(ctx context.Context, vm *vm.VM, maddr address.Address, dealIDs [ } func currentEpochBlockReward(ctx context.Context, vm *vm.VM, maddr address.Address) (*reward0.ThisEpochRewardReturn, error) { - rwret, err := doExecValue(ctx, vm, reward.Address, maddr, big.Zero(), builtin.MethodsReward.ThisEpochReward, nil) + rwret, err := doExecValue(ctx, vm, reward.Address, maddr, big.Zero(), builtin0.MethodsReward.ThisEpochReward, nil) if err != nil { return nil, err } @@ -405,7 +406,7 @@ func circSupply(ctx context.Context, vmi *vm.VM, maddr address.Address) abi.Toke rt := unsafeVM.MakeRuntime(ctx, &types.Message{ GasLimit: 1_000_000_000, From: maddr, - }, maddr, 0, 0, 0) + }) return rt.TotalFilCircSupply() } diff --git a/chain/gen/genesis/util.go b/chain/gen/genesis/util.go index bcafb007e..54cc30cc1 100644 --- a/chain/gen/genesis/util.go +++ b/chain/gen/genesis/util.go @@ -50,12 +50,27 @@ func doExecValue(ctx context.Context, vm *vm.VM, to, from address.Address, value return ret.Return, nil } -var GenesisNetworkVersion = func() network.Version { // TODO: Get from build/ - if build.UseNewestNetwork() { // TODO: Get from build/ - return build.NewestNetworkVersion // TODO: Get from build/ - } // TODO: Get from build/ - return network.Version1 // TODO: Get from build/ -}() // TODO: Get from build/ +// TODO: Get from build +// TODO: make a list/schedule of these. +var GenesisNetworkVersion = func() network.Version { + // returns the version _before_ the first upgrade. + if build.UpgradeBreezeHeight >= 0 { + return network.Version0 + } + if build.UpgradeSmokeHeight >= 0 { + return network.Version1 + } + if build.UpgradeIgnitionHeight >= 0 { + return network.Version2 + } + if build.UpgradeActorsV2Height >= 0 { + return network.Version3 + } + if build.UpgradeLiftoffHeight >= 0 { + return network.Version3 + } + return build.ActorUpgradeNetworkVersion - 1 // genesis requires actors v0. +}() func genesisNetworkVersion(context.Context, abi.ChainEpoch) network.Version { // TODO: Get from build/ return GenesisNetworkVersion // TODO: Get from build/ diff --git a/chain/gen/mining.go b/chain/gen/mining.go index 45a089452..cca4b6169 100644 --- a/chain/gen/mining.go +++ b/chain/gen/mining.go @@ -4,17 +4,14 @@ import ( "context" "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/specs-actors/actors/util/adt" + blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" cid "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/vm" "github.com/filecoin-project/lotus/lib/sigs/bls" ) @@ -30,7 +27,12 @@ func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w api.WalletA return nil, xerrors.Errorf("failed to load tipset state: %w", err) } - worker, err := stmgr.GetMinerWorkerRaw(ctx, sm, st, bt.Miner) + _, lbst, err := stmgr.GetLookbackTipSetForRound(ctx, sm, pts, bt.Epoch) + if err != nil { + return nil, xerrors.Errorf("getting lookback miner actor state: %w", err) + } + + worker, err := stmgr.GetMinerWorkerRaw(ctx, sm, lbst, bt.Miner) if err != nil { return nil, xerrors.Errorf("failed to get miner worker: %w", err) } @@ -114,23 +116,12 @@ func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w api.WalletA } next.ParentBaseFee = baseFee - cst := cbor.NewCborStore(sm.ChainStore().Blockstore()) - tree, err := state.LoadStateTree(cst, st) - if err != nil { - return nil, xerrors.Errorf("failed to load state tree: %w", err) - } - - waddr, err := vm.ResolveToKeyAddr(tree, cst, worker) - if err != nil { - return nil, xerrors.Errorf("failed to resolve miner address to key address: %w", err) - } - nosigbytes, err := next.SigningBytes() if err != nil { return nil, xerrors.Errorf("failed to get signing bytes for block: %w", err) } - sig, err := w.WalletSign(ctx, waddr, nosigbytes, api.MsgMeta{ + sig, err := w.WalletSign(ctx, worker, nosigbytes, api.MsgMeta{ Type: api.MTBlock, }) if err != nil { @@ -181,8 +172,8 @@ func aggregateSignatures(sigs []crypto.Signature) (*crypto.Signature, error) { }, nil } -func toArray(store adt.Store, cids []cid.Cid) (cid.Cid, error) { - arr := adt.MakeEmptyArray(store) +func toArray(store blockadt.Store, cids []cid.Cid) (cid.Cid, error) { + arr := blockadt.MakeEmptyArray(store) for i, c := range cids { oc := cbg.CborCid(c) if err := arr.Set(uint64(i), &oc); err != nil { diff --git a/chain/market/fundmgr.go b/chain/market/fundmgr.go index 8b5f85a12..50467a6e1 100644 --- a/chain/market/fundmgr.go +++ b/chain/market/fundmgr.go @@ -4,15 +4,13 @@ import ( "context" "sync" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" - "go.uber.org/fx" - - "github.com/filecoin-project/specs-actors/actors/builtin" "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log" + "go.uber.org/fx" - "github.com/filecoin-project/go-address" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/builtin/market" @@ -156,7 +154,7 @@ func (fm *FundMgr) EnsureAvailable(ctx context.Context, addr, wallet address.Add To: market.Address, From: wallet, Value: toAdd, - Method: builtin.MethodsMarket.AddBalance, + Method: market.Methods.AddBalance, Params: params, }, nil) if err != nil { diff --git a/chain/market/fundmgr_test.go b/chain/market/fundmgr_test.go index f5936f73d..88ca2e16f 100644 --- a/chain/market/fundmgr_test.go +++ b/chain/market/fundmgr_test.go @@ -12,8 +12,8 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/specs-actors/actors/builtin" - tutils "github.com/filecoin-project/specs-actors/support/testing" + + tutils "github.com/filecoin-project/specs-actors/v2/support/testing" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors" @@ -51,7 +51,7 @@ func addFundsMsg(toAdd abi.TokenAmount, addr address.Address, wallet address.Add To: market.Address, From: wallet, Value: toAdd, - Method: builtin.MethodsMarket.AddBalance, + Method: market.Methods.AddBalance, Params: params, } } diff --git a/chain/messagepool/gasguess/guessgas.go b/chain/messagepool/gasguess/guessgas.go index 607c7824a..76ab39078 100644 --- a/chain/messagepool/gasguess/guessgas.go +++ b/chain/messagepool/gasguess/guessgas.go @@ -6,6 +6,7 @@ import ( "github.com/ipfs/go-cid" "golang.org/x/xerrors" + "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/go-address" @@ -68,7 +69,7 @@ func failedGuess(msg *types.SignedMessage) int64 { func GuessGasUsed(ctx context.Context, tsk types.TipSetKey, msg *types.SignedMessage, al ActorLookup) (int64, error) { // MethodSend is the same in all versions. - if msg.Message.Method == builtin0.MethodSend { + if msg.Message.Method == builtin.MethodSend { switch msg.Message.From.Protocol() { case address.BLS: return 1298450, nil diff --git a/chain/messagepool/messagepool.go b/chain/messagepool/messagepool.go index d3c638b22..8c8a8af15 100644 --- a/chain/messagepool/messagepool.go +++ b/chain/messagepool/messagepool.go @@ -181,9 +181,15 @@ func ComputeMinRBF(curPrem abi.TokenAmount) abi.TokenAmount { return types.BigAdd(minPrice, types.NewInt(1)) } -func CapGasFee(msg *types.Message, maxFee abi.TokenAmount) { +func CapGasFee(mff dtypes.DefaultMaxFeeFunc, msg *types.Message, maxFee abi.TokenAmount) { if maxFee.Equals(big.Zero()) { - maxFee = types.NewInt(build.FilecoinPrecision / 10) + mf, err := mff() + if err != nil { + log.Errorf("failed to get default max gas fee: %+v", err) + mf = big.Zero() + } + + maxFee = mf } gl := types.NewInt(uint64(msg.GasLimit)) @@ -368,11 +374,23 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName, j journ return err }) - if err := mp.loadLocal(); err != nil { - log.Errorf("loading local messages: %+v", err) - } + mp.curTsLk.Lock() + mp.lk.Lock() - go mp.runLoop() + go func() { + err := mp.loadLocal() + + mp.lk.Unlock() + mp.curTsLk.Unlock() + + if err != nil { + log.Errorf("loading local messages: %+v", err) + } + + log.Info("mpool ready") + + mp.runLoop() + }() return mp, nil } @@ -418,9 +436,14 @@ func (mp *MessagePool) runLoop() { } } -func (mp *MessagePool) addLocal(m *types.SignedMessage, msgb []byte) error { +func (mp *MessagePool) addLocal(m *types.SignedMessage) error { mp.localAddrs[m.Message.From] = struct{}{} + msgb, err := m.Serialize() + if err != nil { + return xerrors.Errorf("error serializing message: %w", err) + } + if err := mp.localMsgs.Put(datastore.NewKey(string(m.Cid().Bytes())), msgb); err != nil { return xerrors.Errorf("persisting local message: %w", err) } @@ -493,11 +516,6 @@ func (mp *MessagePool) Push(m *types.SignedMessage) (cid.Cid, error) { <-mp.addSema }() - msgb, err := m.Serialize() - if err != nil { - return cid.Undef, err - } - mp.curTsLk.Lock() publish, err := mp.addTs(m, mp.curTs, true, false) if err != nil { @@ -506,18 +524,19 @@ func (mp *MessagePool) Push(m *types.SignedMessage) (cid.Cid, error) { } mp.curTsLk.Unlock() - mp.lk.Lock() - if err := mp.addLocal(m, msgb); err != nil { - mp.lk.Unlock() - return cid.Undef, err - } - mp.lk.Unlock() - if publish { + msgb, err := m.Serialize() + if err != nil { + return cid.Undef, xerrors.Errorf("error serializing message: %w", err) + } + err = mp.api.PubSubPublish(build.MessagesTopic(mp.netName), msgb) + if err != nil { + return cid.Undef, xerrors.Errorf("error publishing message: %w", err) + } } - return m.Cid(), err + return m.Cid(), nil } func (mp *MessagePool) checkMessage(m *types.SignedMessage) error { @@ -656,7 +675,19 @@ func (mp *MessagePool) addTs(m *types.SignedMessage, curTs *types.TipSet, local, return false, err } - return publish, mp.addLocked(m, !local, untrusted) + err = mp.addLocked(m, !local, untrusted) + if err != nil { + return false, err + } + + if local { + err = mp.addLocal(m) + if err != nil { + return false, xerrors.Errorf("error persisting local message: %w", err) + } + } + + return publish, nil } func (mp *MessagePool) addLoaded(m *types.SignedMessage) error { @@ -665,11 +696,12 @@ func (mp *MessagePool) addLoaded(m *types.SignedMessage) error { return err } - mp.curTsLk.Lock() - defer mp.curTsLk.Unlock() - curTs := mp.curTs + if curTs == nil { + return xerrors.Errorf("current tipset not loaded") + } + snonce, err := mp.getStateNonce(m.Message.From, curTs) if err != nil { return xerrors.Errorf("failed to look up actor state nonce: %s: %w", err, ErrSoftValidationFailure) @@ -679,9 +711,6 @@ func (mp *MessagePool) addLoaded(m *types.SignedMessage) error { return xerrors.Errorf("minimum expected nonce is %d: %w", snonce, ErrNonceTooLow) } - mp.lk.Lock() - defer mp.lk.Unlock() - _, err = mp.verifyMsgBeforeAdd(m, curTs, true) if err != nil { return err @@ -825,11 +854,6 @@ func (mp *MessagePool) PushUntrusted(m *types.SignedMessage) (cid.Cid, error) { <-mp.addSema }() - msgb, err := m.Serialize() - if err != nil { - return cid.Undef, err - } - mp.curTsLk.Lock() publish, err := mp.addTs(m, mp.curTs, false, true) if err != nil { @@ -838,18 +862,19 @@ func (mp *MessagePool) PushUntrusted(m *types.SignedMessage) (cid.Cid, error) { } mp.curTsLk.Unlock() - mp.lk.Lock() - if err := mp.addLocal(m, msgb); err != nil { - mp.lk.Unlock() - return cid.Undef, err - } - mp.lk.Unlock() - if publish { + msgb, err := m.Serialize() + if err != nil { + return cid.Undef, xerrors.Errorf("error serializing message: %w", err) + } + err = mp.api.PubSubPublish(build.MessagesTopic(mp.netName), msgb) + if err != nil { + return cid.Undef, xerrors.Errorf("error publishing message: %w", err) + } } - return m.Cid(), err + return m.Cid(), nil } func (mp *MessagePool) Remove(from address.Address, nonce uint64, applied bool) { diff --git a/chain/messagepool/messagepool_test.go b/chain/messagepool/messagepool_test.go index 063e0bbab..e31df936c 100644 --- a/chain/messagepool/messagepool_test.go +++ b/chain/messagepool/messagepool_test.go @@ -8,17 +8,18 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/crypto" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + logging "github.com/ipfs/go-log/v2" + + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + "github.com/filecoin-project/lotus/chain/messagepool/gasguess" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/mock" "github.com/filecoin-project/lotus/chain/wallet" _ "github.com/filecoin-project/lotus/lib/sigs/bls" _ "github.com/filecoin-project/lotus/lib/sigs/secp" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - logging "github.com/ipfs/go-log/v2" ) func init() { @@ -143,7 +144,7 @@ func (tma *testMpoolAPI) GetActorAfter(addr address.Address, ts *types.TipSet) ( } return &types.Actor{ - Code: builtin.StorageMarketActorCodeID, + Code: builtin2.StorageMarketActorCodeID, Nonce: nonce, Balance: balance, }, nil @@ -232,7 +233,7 @@ func TestMessagePool(t *testing.T) { a := tma.nextBlock() - sender, err := w.WalletNew(context.Background(), crypto.SigTypeBLS) + sender, err := w.WalletNew(context.Background(), types.KTSecp256k1) if err != nil { t.Fatal(err) } @@ -273,7 +274,7 @@ func TestMessagePoolMessagesInEachBlock(t *testing.T) { a := tma.nextBlock() - sender, err := w.WalletNew(context.Background(), crypto.SigTypeBLS) + sender, err := w.WalletNew(context.Background(), types.KTBLS) if err != nil { t.Fatal(err) } @@ -323,7 +324,7 @@ func TestRevertMessages(t *testing.T) { a := tma.nextBlock() b := tma.nextBlock() - sender, err := w.WalletNew(context.Background(), crypto.SigTypeBLS) + sender, err := w.WalletNew(context.Background(), types.KTBLS) if err != nil { t.Fatal(err) } @@ -386,7 +387,7 @@ func TestPruningSimple(t *testing.T) { a := tma.nextBlock() tma.applyBlock(t, a) - sender, err := w.WalletNew(context.Background(), crypto.SigTypeBLS) + sender, err := w.WalletNew(context.Background(), types.KTBLS) if err != nil { t.Fatal(err) } @@ -433,7 +434,7 @@ func TestLoadLocal(t *testing.T) { t.Fatal(err) } - a1, err := w1.WalletNew(context.Background(), crypto.SigTypeSecp256k1) + a1, err := w1.WalletNew(context.Background(), types.KTSecp256k1) if err != nil { t.Fatal(err) } @@ -443,14 +444,14 @@ func TestLoadLocal(t *testing.T) { t.Fatal(err) } - a2, err := w2.WalletNew(context.Background(), crypto.SigTypeSecp256k1) + a2, err := w2.WalletNew(context.Background(), types.KTSecp256k1) if err != nil { t.Fatal(err) } tma.setBalance(a1, 1) // in FIL tma.setBalance(a2, 1) // in FIL - gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}] + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] msgs := make(map[cid.Cid]struct{}) for i := 0; i < 10; i++ { m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) @@ -505,7 +506,7 @@ func TestClearAll(t *testing.T) { t.Fatal(err) } - a1, err := w1.WalletNew(context.Background(), crypto.SigTypeSecp256k1) + a1, err := w1.WalletNew(context.Background(), types.KTSecp256k1) if err != nil { t.Fatal(err) } @@ -515,14 +516,14 @@ func TestClearAll(t *testing.T) { t.Fatal(err) } - a2, err := w2.WalletNew(context.Background(), crypto.SigTypeSecp256k1) + a2, err := w2.WalletNew(context.Background(), types.KTSecp256k1) if err != nil { t.Fatal(err) } tma.setBalance(a1, 1) // in FIL tma.setBalance(a2, 1) // in FIL - gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}] + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] for i := 0; i < 10; i++ { m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) _, err := mp.Push(m) @@ -559,7 +560,7 @@ func TestClearNonLocal(t *testing.T) { t.Fatal(err) } - a1, err := w1.WalletNew(context.Background(), crypto.SigTypeSecp256k1) + a1, err := w1.WalletNew(context.Background(), types.KTSecp256k1) if err != nil { t.Fatal(err) } @@ -569,7 +570,7 @@ func TestClearNonLocal(t *testing.T) { t.Fatal(err) } - a2, err := w2.WalletNew(context.Background(), crypto.SigTypeSecp256k1) + a2, err := w2.WalletNew(context.Background(), types.KTSecp256k1) if err != nil { t.Fatal(err) } @@ -577,7 +578,7 @@ func TestClearNonLocal(t *testing.T) { tma.setBalance(a1, 1) // in FIL tma.setBalance(a2, 1) // in FIL - gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}] + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] for i := 0; i < 10; i++ { m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) _, err := mp.Push(m) @@ -620,7 +621,7 @@ func TestUpdates(t *testing.T) { t.Fatal(err) } - a1, err := w1.WalletNew(context.Background(), crypto.SigTypeSecp256k1) + a1, err := w1.WalletNew(context.Background(), types.KTSecp256k1) if err != nil { t.Fatal(err) } @@ -630,7 +631,7 @@ func TestUpdates(t *testing.T) { t.Fatal(err) } - a2, err := w2.WalletNew(context.Background(), crypto.SigTypeSecp256k1) + a2, err := w2.WalletNew(context.Background(), types.KTSecp256k1) if err != nil { t.Fatal(err) } @@ -643,7 +644,7 @@ func TestUpdates(t *testing.T) { t.Fatal(err) } - gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}] + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] tma.setBalance(a1, 1) // in FIL tma.setBalance(a2, 1) // in FIL diff --git a/chain/messagepool/repub_test.go b/chain/messagepool/repub_test.go index 3e1252eec..8da64f974 100644 --- a/chain/messagepool/repub_test.go +++ b/chain/messagepool/repub_test.go @@ -5,11 +5,13 @@ import ( "testing" "time" - "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/lotus/chain/messagepool/gasguess" - "github.com/filecoin-project/lotus/chain/wallet" - "github.com/filecoin-project/specs-actors/actors/builtin" "github.com/ipfs/go-datastore" + + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + + "github.com/filecoin-project/lotus/chain/messagepool/gasguess" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/wallet" ) func TestRepubMessages(t *testing.T) { @@ -33,7 +35,7 @@ func TestRepubMessages(t *testing.T) { t.Fatal(err) } - a1, err := w1.WalletNew(context.Background(), crypto.SigTypeSecp256k1) + a1, err := w1.WalletNew(context.Background(), types.KTSecp256k1) if err != nil { t.Fatal(err) } @@ -43,12 +45,12 @@ func TestRepubMessages(t *testing.T) { t.Fatal(err) } - a2, err := w2.WalletNew(context.Background(), crypto.SigTypeSecp256k1) + a2, err := w2.WalletNew(context.Background(), types.KTSecp256k1) if err != nil { t.Fatal(err) } - gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}] + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] tma.setBalance(a1, 1) // in FIL diff --git a/chain/messagepool/selection.go b/chain/messagepool/selection.go index 2ddbed0ad..5a8200bf8 100644 --- a/chain/messagepool/selection.go +++ b/chain/messagepool/selection.go @@ -12,6 +12,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" tbig "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/messagepool/gasguess" "github.com/filecoin-project/lotus/chain/types" @@ -751,11 +752,10 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6 balance = new(big.Int).Sub(balance, required) value := m.Message.Value.Int - if balance.Cmp(value) >= 0 { - // Note: we only account for the value if the balance doesn't drop below 0 - // otherwise the message will fail and the miner can reap the gas rewards - balance = new(big.Int).Sub(balance, value) + if balance.Cmp(value) < 0 { + break } + balance = new(big.Int).Sub(balance, value) gasReward := mp.getGasReward(m, baseFee) rewards = append(rewards, gasReward) diff --git a/chain/messagepool/selection_test.go b/chain/messagepool/selection_test.go index 9e4fe39e5..08cf286c8 100644 --- a/chain/messagepool/selection_test.go +++ b/chain/messagepool/selection_test.go @@ -13,18 +13,18 @@ import ( "sort" "testing" + "github.com/filecoin-project/go-address" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" logging "github.com/ipfs/go-log" - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/crypto" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/messagepool/gasguess" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/mock" "github.com/filecoin-project/lotus/chain/wallet" - "github.com/filecoin-project/specs-actors/actors/builtin" "github.com/filecoin-project/lotus/api" _ "github.com/filecoin-project/lotus/lib/sigs/bls" @@ -77,7 +77,7 @@ func TestMessageChains(t *testing.T) { t.Fatal(err) } - a1, err := w1.WalletNew(context.Background(), crypto.SigTypeSecp256k1) + a1, err := w1.WalletNew(context.Background(), types.KTSecp256k1) if err != nil { t.Fatal(err) } @@ -87,7 +87,7 @@ func TestMessageChains(t *testing.T) { t.Fatal(err) } - a2, err := w2.WalletNew(context.Background(), crypto.SigTypeSecp256k1) + a2, err := w2.WalletNew(context.Background(), types.KTSecp256k1) if err != nil { t.Fatal(err) } @@ -95,7 +95,7 @@ func TestMessageChains(t *testing.T) { block := tma.nextBlock() ts := mock.TipSet(block) - gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}] + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] tma.setBalance(a1, 1) // in FIL @@ -315,7 +315,7 @@ func TestMessageChainSkipping(t *testing.T) { t.Fatal(err) } - a1, err := w1.WalletNew(context.Background(), crypto.SigTypeSecp256k1) + a1, err := w1.WalletNew(context.Background(), types.KTSecp256k1) if err != nil { t.Fatal(err) } @@ -325,7 +325,7 @@ func TestMessageChainSkipping(t *testing.T) { t.Fatal(err) } - a2, err := w2.WalletNew(context.Background(), crypto.SigTypeSecp256k1) + a2, err := w2.WalletNew(context.Background(), types.KTSecp256k1) if err != nil { t.Fatal(err) } @@ -333,7 +333,7 @@ func TestMessageChainSkipping(t *testing.T) { block := tma.nextBlock() ts := mock.TipSet(block) - gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}] + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] baseFee := types.NewInt(0) tma.setBalance(a1, 1) // in FIL @@ -391,7 +391,7 @@ func TestBasicMessageSelection(t *testing.T) { t.Fatal(err) } - a1, err := w1.WalletNew(context.Background(), crypto.SigTypeSecp256k1) + a1, err := w1.WalletNew(context.Background(), types.KTSecp256k1) if err != nil { t.Fatal(err) } @@ -401,7 +401,7 @@ func TestBasicMessageSelection(t *testing.T) { t.Fatal(err) } - a2, err := w2.WalletNew(context.Background(), crypto.SigTypeSecp256k1) + a2, err := w2.WalletNew(context.Background(), types.KTSecp256k1) if err != nil { t.Fatal(err) } @@ -410,7 +410,7 @@ func TestBasicMessageSelection(t *testing.T) { ts := mock.TipSet(block) tma.applyBlock(t, block) - gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}] + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] tma.setBalance(a1, 1) // in FIL tma.setBalance(a2, 1) // in FIL @@ -535,7 +535,7 @@ func TestMessageSelectionTrimming(t *testing.T) { t.Fatal(err) } - a1, err := w1.WalletNew(context.Background(), crypto.SigTypeSecp256k1) + a1, err := w1.WalletNew(context.Background(), types.KTSecp256k1) if err != nil { t.Fatal(err) } @@ -545,7 +545,7 @@ func TestMessageSelectionTrimming(t *testing.T) { t.Fatal(err) } - a2, err := w2.WalletNew(context.Background(), crypto.SigTypeSecp256k1) + a2, err := w2.WalletNew(context.Background(), types.KTSecp256k1) if err != nil { t.Fatal(err) } @@ -554,7 +554,7 @@ func TestMessageSelectionTrimming(t *testing.T) { ts := mock.TipSet(block) tma.applyBlock(t, block) - gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}] + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] tma.setBalance(a1, 1) // in FIL tma.setBalance(a2, 1) // in FIL @@ -598,7 +598,7 @@ func TestPriorityMessageSelection(t *testing.T) { t.Fatal(err) } - a1, err := w1.WalletNew(context.Background(), crypto.SigTypeSecp256k1) + a1, err := w1.WalletNew(context.Background(), types.KTSecp256k1) if err != nil { t.Fatal(err) } @@ -608,7 +608,7 @@ func TestPriorityMessageSelection(t *testing.T) { t.Fatal(err) } - a2, err := w2.WalletNew(context.Background(), crypto.SigTypeSecp256k1) + a2, err := w2.WalletNew(context.Background(), types.KTSecp256k1) if err != nil { t.Fatal(err) } @@ -617,7 +617,7 @@ func TestPriorityMessageSelection(t *testing.T) { ts := mock.TipSet(block) tma.applyBlock(t, block) - gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}] + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] tma.setBalance(a1, 1) // in FIL tma.setBalance(a2, 1) // in FIL @@ -677,7 +677,7 @@ func TestPriorityMessageSelection2(t *testing.T) { t.Fatal(err) } - a1, err := w1.WalletNew(context.Background(), crypto.SigTypeSecp256k1) + a1, err := w1.WalletNew(context.Background(), types.KTSecp256k1) if err != nil { t.Fatal(err) } @@ -687,7 +687,7 @@ func TestPriorityMessageSelection2(t *testing.T) { t.Fatal(err) } - a2, err := w2.WalletNew(context.Background(), crypto.SigTypeSecp256k1) + a2, err := w2.WalletNew(context.Background(), types.KTSecp256k1) if err != nil { t.Fatal(err) } @@ -696,7 +696,7 @@ func TestPriorityMessageSelection2(t *testing.T) { ts := mock.TipSet(block) tma.applyBlock(t, block) - gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}] + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] tma.setBalance(a1, 1) // in FIL tma.setBalance(a2, 1) // in FIL @@ -746,7 +746,7 @@ func TestPriorityMessageSelection3(t *testing.T) { t.Fatal(err) } - a1, err := w1.WalletNew(context.Background(), crypto.SigTypeSecp256k1) + a1, err := w1.WalletNew(context.Background(), types.KTSecp256k1) if err != nil { t.Fatal(err) } @@ -756,7 +756,7 @@ func TestPriorityMessageSelection3(t *testing.T) { t.Fatal(err) } - a2, err := w2.WalletNew(context.Background(), crypto.SigTypeSecp256k1) + a2, err := w2.WalletNew(context.Background(), types.KTSecp256k1) if err != nil { t.Fatal(err) } @@ -765,7 +765,7 @@ func TestPriorityMessageSelection3(t *testing.T) { ts := mock.TipSet(block) tma.applyBlock(t, block) - gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}] + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] tma.setBalance(a1, 1) // in FIL tma.setBalance(a2, 1) // in FIL @@ -843,7 +843,7 @@ func TestOptimalMessageSelection1(t *testing.T) { t.Fatal(err) } - a1, err := w1.WalletNew(context.Background(), crypto.SigTypeSecp256k1) + a1, err := w1.WalletNew(context.Background(), types.KTSecp256k1) if err != nil { t.Fatal(err) } @@ -853,7 +853,7 @@ func TestOptimalMessageSelection1(t *testing.T) { t.Fatal(err) } - a2, err := w2.WalletNew(context.Background(), crypto.SigTypeSecp256k1) + a2, err := w2.WalletNew(context.Background(), types.KTSecp256k1) if err != nil { t.Fatal(err) } @@ -862,7 +862,7 @@ func TestOptimalMessageSelection1(t *testing.T) { ts := mock.TipSet(block) tma.applyBlock(t, block) - gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}] + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] tma.setBalance(a1, 1) // in FIL tma.setBalance(a2, 1) // in FIL @@ -910,7 +910,7 @@ func TestOptimalMessageSelection2(t *testing.T) { t.Fatal(err) } - a1, err := w1.WalletNew(context.Background(), crypto.SigTypeSecp256k1) + a1, err := w1.WalletNew(context.Background(), types.KTSecp256k1) if err != nil { t.Fatal(err) } @@ -920,7 +920,7 @@ func TestOptimalMessageSelection2(t *testing.T) { t.Fatal(err) } - a2, err := w2.WalletNew(context.Background(), crypto.SigTypeSecp256k1) + a2, err := w2.WalletNew(context.Background(), types.KTSecp256k1) if err != nil { t.Fatal(err) } @@ -929,7 +929,7 @@ func TestOptimalMessageSelection2(t *testing.T) { ts := mock.TipSet(block) tma.applyBlock(t, block) - gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}] + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] tma.setBalance(a1, 1) // in FIL tma.setBalance(a2, 1) // in FIL @@ -994,7 +994,7 @@ func TestOptimalMessageSelection3(t *testing.T) { t.Fatal(err) } - a, err := w.WalletNew(context.Background(), crypto.SigTypeSecp256k1) + a, err := w.WalletNew(context.Background(), types.KTSecp256k1) if err != nil { t.Fatal(err) } @@ -1007,7 +1007,7 @@ func TestOptimalMessageSelection3(t *testing.T) { ts := mock.TipSet(block) tma.applyBlock(t, block) - gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}] + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] for _, a := range actors { tma.setBalance(a, 1) // in FIL @@ -1074,7 +1074,7 @@ func testCompetitiveMessageSelection(t *testing.T, rng *rand.Rand, getPremium fu t.Fatal(err) } - a, err := w.WalletNew(context.Background(), crypto.SigTypeSecp256k1) + a, err := w.WalletNew(context.Background(), types.KTSecp256k1) if err != nil { t.Fatal(err) } @@ -1087,7 +1087,7 @@ func testCompetitiveMessageSelection(t *testing.T, rng *rand.Rand, getPremium fu ts := mock.TipSet(block) tma.applyBlock(t, block) - gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}] + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] baseFee := types.NewInt(0) for _, a := range actors { @@ -1344,7 +1344,7 @@ readLoop: t.Fatal(err) } - a, err := w.WalletNew(context.Background(), crypto.SigTypeSecp256k1) + a, err := w.WalletNew(context.Background(), types.KTSecp256k1) if err != nil { t.Fatal(err) } diff --git a/chain/messagesigner/messagesigner_test.go b/chain/messagesigner/messagesigner_test.go index 0dafce9a1..5eebd36da 100644 --- a/chain/messagesigner/messagesigner_test.go +++ b/chain/messagesigner/messagesigner_test.go @@ -9,7 +9,6 @@ import ( "github.com/filecoin-project/lotus/chain/wallet" - "github.com/filecoin-project/go-state-types/crypto" "github.com/stretchr/testify/require" ds_sync "github.com/ipfs/go-datastore/sync" @@ -47,13 +46,13 @@ func TestMessageSignerSignMessage(t *testing.T) { ctx := context.Background() w, _ := wallet.NewWallet(wallet.NewMemKeyStore()) - from1, err := w.WalletNew(ctx, crypto.SigTypeSecp256k1) + from1, err := w.WalletNew(ctx, types.KTSecp256k1) require.NoError(t, err) - from2, err := w.WalletNew(ctx, crypto.SigTypeSecp256k1) + from2, err := w.WalletNew(ctx, types.KTSecp256k1) require.NoError(t, err) - to1, err := w.WalletNew(ctx, crypto.SigTypeSecp256k1) + to1, err := w.WalletNew(ctx, types.KTSecp256k1) require.NoError(t, err) - to2, err := w.WalletNew(ctx, crypto.SigTypeSecp256k1) + to2, err := w.WalletNew(ctx, types.KTSecp256k1) require.NoError(t, err) type msgSpec struct { diff --git a/chain/state/statetree.go b/chain/state/statetree.go index e9b76ea77..7fa55b31c 100644 --- a/chain/state/statetree.go +++ b/chain/state/statetree.go @@ -26,16 +26,18 @@ var log = logging.Logger("statetree") // StateTree stores actors state by their ID. type StateTree struct { - root adt.Map - version types.StateTreeVersion - info cid.Cid - Store cbor.IpldStore + root adt.Map + version types.StateTreeVersion + info cid.Cid + Store cbor.IpldStore + lookupIDFun func(address.Address) (address.Address, error) snaps *stateSnaps } type stateSnaps struct { - layers []*stateSnapLayer + layers []*stateSnapLayer + lastMaybeNonEmptyResolveCache int } type stateSnapLayer struct { @@ -67,7 +69,12 @@ func (ss *stateSnaps) addLayer() { func (ss *stateSnaps) dropLayer() { ss.layers[len(ss.layers)-1] = nil // allow it to be GCed + ss.layers = ss.layers[:len(ss.layers)-1] + + if ss.lastMaybeNonEmptyResolveCache == len(ss.layers) { + ss.lastMaybeNonEmptyResolveCache = len(ss.layers) - 1 + } } func (ss *stateSnaps) mergeLastLayer() { @@ -86,7 +93,13 @@ func (ss *stateSnaps) mergeLastLayer() { } func (ss *stateSnaps) resolveAddress(addr address.Address) (address.Address, bool) { - for i := len(ss.layers) - 1; i >= 0; i-- { + for i := ss.lastMaybeNonEmptyResolveCache; i >= 0; i-- { + if len(ss.layers[i].resolveCache) == 0 { + if ss.lastMaybeNonEmptyResolveCache == i { + ss.lastMaybeNonEmptyResolveCache = i - 1 + } + continue + } resa, ok := ss.layers[i].resolveCache[addr] if ok { return resa, true @@ -97,6 +110,7 @@ func (ss *stateSnaps) resolveAddress(addr address.Address) (address.Address, boo func (ss *stateSnaps) cacheResolveAddress(addr, resa address.Address) { ss.layers[len(ss.layers)-1].resolveCache[addr] = resa + ss.lastMaybeNonEmptyResolveCache = len(ss.layers) - 1 } func (ss *stateSnaps) getActor(addr address.Address) (*types.Actor, error) { @@ -160,13 +174,15 @@ func NewStateTree(cst cbor.IpldStore, ver types.StateTreeVersion) (*StateTree, e return nil, err } - return &StateTree{ + s := &StateTree{ root: root, info: info, version: ver, Store: cst, snaps: newStateSnaps(), - }, nil + } + s.lookupIDFun = s.lookupIDinternal + return s, nil } func LoadStateTree(cst cbor.IpldStore, c cid.Cid) (*StateTree, error) { @@ -190,13 +206,15 @@ func LoadStateTree(cst cbor.IpldStore, c cid.Cid) (*StateTree, error) { return nil, err } - return &StateTree{ + s := &StateTree{ root: nd, info: root.Info, version: root.Version, Store: cst, snaps: newStateSnaps(), - }, nil + } + s.lookupIDFun = s.lookupIDinternal + return s, nil default: return nil, xerrors.Errorf("unsupported state tree version: %d", root.Version) } @@ -213,17 +231,7 @@ func (st *StateTree) SetActor(addr address.Address, act *types.Actor) error { return nil } -// LookupID gets the ID address of this actor's `addr` stored in the `InitActor`. -func (st *StateTree) LookupID(addr address.Address) (address.Address, error) { - if addr.Protocol() == address.ID { - return addr, nil - } - - resa, ok := st.snaps.resolveAddress(addr) - if ok { - return resa, nil - } - +func (st *StateTree) lookupIDinternal(addr address.Address) (address.Address, error) { act, err := st.GetActor(init_.Address) if err != nil { return address.Undef, xerrors.Errorf("getting init actor: %w", err) @@ -241,6 +249,23 @@ func (st *StateTree) LookupID(addr address.Address) (address.Address, error) { if err != nil { return address.Undef, xerrors.Errorf("resolve address %s: %w", addr, err) } + return a, err +} + +// LookupID gets the ID address of this actor's `addr` stored in the `InitActor`. +func (st *StateTree) LookupID(addr address.Address) (address.Address, error) { + if addr.Protocol() == address.ID { + return addr, nil + } + + resa, ok := st.snaps.resolveAddress(addr) + if ok { + return resa, nil + } + a, err := st.lookupIDFun(addr) + if err != nil { + return a, err + } st.snaps.cacheResolveAddress(addr, a) diff --git a/chain/state/statetree_test.go b/chain/state/statetree_test.go index ed1fb1889..91674337b 100644 --- a/chain/state/statetree_test.go +++ b/chain/state/statetree_test.go @@ -10,7 +10,7 @@ import ( address "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/network" - "github.com/filecoin-project/specs-actors/actors/builtin" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/types" @@ -18,7 +18,7 @@ import ( func BenchmarkStateTreeSet(b *testing.B) { cst := cbor.NewMemCborStore() - st, err := NewStateTree(cst, VersionForNetwork(build.NewestNetworkVersion)) + st, err := NewStateTree(cst, types.StateTreeVersion1) if err != nil { b.Fatal(err) } @@ -33,8 +33,8 @@ func BenchmarkStateTreeSet(b *testing.B) { } err = st.SetActor(a, &types.Actor{ Balance: types.NewInt(1258812523), - Code: builtin.StorageMinerActorCodeID, - Head: builtin.AccountActorCodeID, + Code: builtin2.StorageMinerActorCodeID, + Head: builtin2.AccountActorCodeID, Nonce: uint64(i), }) if err != nil { @@ -60,8 +60,8 @@ func BenchmarkStateTreeSetFlush(b *testing.B) { } err = st.SetActor(a, &types.Actor{ Balance: types.NewInt(1258812523), - Code: builtin.StorageMinerActorCodeID, - Head: builtin.AccountActorCodeID, + Code: builtin2.StorageMinerActorCodeID, + Head: builtin2.AccountActorCodeID, Nonce: uint64(i), }) if err != nil { @@ -73,6 +73,103 @@ func BenchmarkStateTreeSetFlush(b *testing.B) { } } +func TestResolveCache(t *testing.T) { + cst := cbor.NewMemCborStore() + st, err := NewStateTree(cst, VersionForNetwork(build.NewestNetworkVersion)) + if err != nil { + t.Fatal(err) + } + nonId := address.NewForTestGetter()() + id, _ := address.NewIDAddress(1000) + + st.lookupIDFun = func(a address.Address) (address.Address, error) { + if a == nonId { + return id, nil + } + return address.Undef, types.ErrActorNotFound + } + + err = st.SetActor(nonId, &types.Actor{Nonce: 1}) + if err != nil { + t.Fatal(err) + } + + { + err = st.Snapshot(context.TODO()) + if err != nil { + t.Fatal(err) + } + act, err := st.GetActor(nonId) + if err != nil { + t.Fatal(err) + } + if act.Nonce != 1 { + t.Fatalf("expected nonce 1, got %d", act.Nonce) + } + err = st.SetActor(nonId, &types.Actor{Nonce: 2}) + if err != nil { + t.Fatal(err) + } + + act, err = st.GetActor(nonId) + if err != nil { + t.Fatal(err) + } + if act.Nonce != 2 { + t.Fatalf("expected nonce 2, got %d", act.Nonce) + } + + if err := st.Revert(); err != nil { + t.Fatal(err) + } + st.ClearSnapshot() + } + + act, err := st.GetActor(nonId) + if err != nil { + t.Fatal(err) + } + if act.Nonce != 1 { + t.Fatalf("expected nonce 1, got %d", act.Nonce) + } + + { + err = st.Snapshot(context.TODO()) + if err != nil { + t.Fatal(err) + } + act, err := st.GetActor(nonId) + if err != nil { + t.Fatal(err) + } + if act.Nonce != 1 { + t.Fatalf("expected nonce 1, got %d", act.Nonce) + } + err = st.SetActor(nonId, &types.Actor{Nonce: 2}) + if err != nil { + t.Fatal(err) + } + + act, err = st.GetActor(nonId) + if err != nil { + t.Fatal(err) + } + if act.Nonce != 2 { + t.Fatalf("expected nonce 2, got %d", act.Nonce) + } + st.ClearSnapshot() + } + + act, err = st.GetActor(nonId) + if err != nil { + t.Fatal(err) + } + if act.Nonce != 2 { + t.Fatalf("expected nonce 2, got %d", act.Nonce) + } + +} + func BenchmarkStateTree10kGetActor(b *testing.B) { cst := cbor.NewMemCborStore() st, err := NewStateTree(cst, VersionForNetwork(build.NewestNetworkVersion)) @@ -86,8 +183,8 @@ func BenchmarkStateTree10kGetActor(b *testing.B) { } err = st.SetActor(a, &types.Actor{ Balance: types.NewInt(1258812523 + uint64(i)), - Code: builtin.StorageMinerActorCodeID, - Head: builtin.AccountActorCodeID, + Code: builtin2.StorageMinerActorCodeID, + Head: builtin2.AccountActorCodeID, Nonce: uint64(i), }) if err != nil { @@ -129,8 +226,8 @@ func TestSetCache(t *testing.T) { act := &types.Actor{ Balance: types.NewInt(0), - Code: builtin.StorageMinerActorCodeID, - Head: builtin.AccountActorCodeID, + Code: builtin2.StorageMinerActorCodeID, + Head: builtin2.AccountActorCodeID, Nonce: 0, } @@ -173,7 +270,7 @@ func TestSnapshots(t *testing.T) { t.Fatal(err) } - if err := st.SetActor(addrs[0], &types.Actor{Code: builtin.AccountActorCodeID, Head: builtin.AccountActorCodeID, Balance: types.NewInt(55)}); err != nil { + if err := st.SetActor(addrs[0], &types.Actor{Code: builtin2.AccountActorCodeID, Head: builtin2.AccountActorCodeID, Balance: types.NewInt(55)}); err != nil { t.Fatal(err) } @@ -182,7 +279,7 @@ func TestSnapshots(t *testing.T) { t.Fatal(err) } - if err := st.SetActor(addrs[1], &types.Actor{Code: builtin.AccountActorCodeID, Head: builtin.AccountActorCodeID, Balance: types.NewInt(77)}); err != nil { + if err := st.SetActor(addrs[1], &types.Actor{Code: builtin2.AccountActorCodeID, Head: builtin2.AccountActorCodeID, Balance: types.NewInt(77)}); err != nil { t.Fatal(err) } @@ -193,7 +290,7 @@ func TestSnapshots(t *testing.T) { } // more operations in top level call... - if err := st.SetActor(addrs[2], &types.Actor{Code: builtin.AccountActorCodeID, Head: builtin.AccountActorCodeID, Balance: types.NewInt(123)}); err != nil { + if err := st.SetActor(addrs[2], &types.Actor{Code: builtin2.AccountActorCodeID, Head: builtin2.AccountActorCodeID, Balance: types.NewInt(123)}); err != nil { t.Fatal(err) } @@ -202,7 +299,7 @@ func TestSnapshots(t *testing.T) { t.Fatal(err) } - if err := st.SetActor(addrs[3], &types.Actor{Code: builtin.AccountActorCodeID, Head: builtin.AccountActorCodeID, Balance: types.NewInt(5)}); err != nil { + if err := st.SetActor(addrs[3], &types.Actor{Code: builtin2.AccountActorCodeID, Head: builtin2.AccountActorCodeID, Balance: types.NewInt(5)}); err != nil { t.Fatal(err) } diff --git a/chain/stmgr/call.go b/chain/stmgr/call.go index df3bfa357..bb0f0e5ec 100644 --- a/chain/stmgr/call.go +++ b/chain/stmgr/call.go @@ -61,9 +61,10 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types. Rand: store.NewChainRand(sm.cs, ts.Cids()), Bstore: sm.cs.Blockstore(), Syscalls: sm.cs.VMSys(), - CircSupplyCalc: sm.GetCirculatingSupply, + CircSupplyCalc: sm.GetVMCirculatingSupply, NtwkVersion: sm.GetNtwkVersion, BaseFee: types.NewInt(0), + LookbackState: LookbackStateGetterForTipset(sm, ts), } vmi, err := sm.newVM(ctx, vmopt) @@ -113,6 +114,7 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types. } return &api.InvocResult{ + MsgCid: msg.Cid(), Msg: msg, MsgRct: &ret.MessageReceipt, ExecutionTrace: ret.ExecutionTrace, @@ -174,9 +176,10 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri Rand: r, Bstore: sm.cs.Blockstore(), Syscalls: sm.cs.VMSys(), - CircSupplyCalc: sm.GetCirculatingSupply, + CircSupplyCalc: sm.GetVMCirculatingSupply, NtwkVersion: sm.GetNtwkVersion, BaseFee: ts.Blocks()[0].ParentBaseFee, + LookbackState: LookbackStateGetterForTipset(sm, ts), } vmi, err := sm.newVM(ctx, vmopt) if err != nil { @@ -228,8 +231,10 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri } return &api.InvocResult{ + MsgCid: msg.Cid(), Msg: msg, MsgRct: &ret.MessageReceipt, + GasCost: MakeMsgGasCost(msg, ret), ExecutionTrace: ret.ExecutionTrace, Error: errs, Duration: ret.Duration, diff --git a/chain/stmgr/forks.go b/chain/stmgr/forks.go index fba92ee3f..e089a1084 100644 --- a/chain/stmgr/forks.go +++ b/chain/stmgr/forks.go @@ -79,10 +79,17 @@ func DefaultUpgradeSchedule() UpgradeSchedule { Network: network.Version4, Expensive: true, Migration: UpgradeActorsV2, + }, { + Height: build.UpgradeTapeHeight, + Network: network.Version5, }, { Height: build.UpgradeLiftoffHeight, - Network: network.Version4, + Network: network.Version5, Migration: UpgradeLiftoff, + }, { + Height: build.UpgradeKumquatHeight, + Network: network.Version6, + Migration: nil, }} if build.UpgradeActorsV2Height == math.MaxInt64 { // disable actors upgrade @@ -165,7 +172,7 @@ func (sm *StateManager) hasExpensiveFork(ctx context.Context, height abi.ChainEp return ok } -func doTransfer(cb ExecCallback, tree types.StateTree, from, to address.Address, amt abi.TokenAmount) error { +func doTransfer(tree types.StateTree, from, to address.Address, amt abi.TokenAmount, cb func(trace types.ExecutionTrace)) error { fromAct, err := tree.GetActor(from) if err != nil { return xerrors.Errorf("failed to get 'from' actor for transfer: %w", err) @@ -198,7 +205,6 @@ func doTransfer(cb ExecCallback, tree types.StateTree, from, to address.Address, From: from, To: to, Value: amt, - Nonce: math.MaxUint64, } fakeRct := &types.MessageReceipt{ ExitCode: 0, @@ -206,22 +212,14 @@ func doTransfer(cb ExecCallback, tree types.StateTree, from, to address.Address, GasUsed: 0, } - if err := cb(fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{ - MessageReceipt: *fakeRct, - ActorErr: nil, - ExecutionTrace: types.ExecutionTrace{ - Msg: fakeMsg, - MsgRct: fakeRct, - Error: "", - Duration: 0, - GasCharges: nil, - Subcalls: nil, - }, - Duration: 0, - GasCosts: vm.ZeroGasOutputs(), - }); err != nil { - return xerrors.Errorf("recording transfer: %w", err) - } + cb(types.ExecutionTrace{ + Msg: fakeMsg, + MsgRct: fakeRct, + Error: "", + Duration: 0, + GasCharges: nil, + Subcalls: nil, + }) } return nil @@ -262,11 +260,6 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCal return cid.Undef, xerrors.Errorf("loading state tree failed: %w", err) } - ReserveAddress, err := address.NewFromString("t090") - if err != nil { - return cid.Undef, xerrors.Errorf("failed to parse reserve address: %w", err) - } - tree, err := sm.StateTree(root) if err != nil { return cid.Undef, xerrors.Errorf("getting state tree: %w", err) @@ -279,6 +272,10 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCal } var transfers []transfer + subcalls := make([]types.ExecutionTrace, 0) + transferCb := func(trace types.ExecutionTrace) { + subcalls = append(subcalls, trace) + } // Take all excess funds away, put them into the reserve account err = tree.ForEach(func(addr address.Address, act *types.Actor) error { @@ -292,7 +289,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCal if !sysAcc { transfers = append(transfers, transfer{ From: addr, - To: ReserveAddress, + To: builtin.ReserveAddress, Amt: act.Balance, }) } @@ -314,11 +311,13 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCal available = st.GetAvailableBalance(act.Balance) } - transfers = append(transfers, transfer{ - From: addr, - To: ReserveAddress, - Amt: available, - }) + if !available.IsZero() { + transfers = append(transfers, transfer{ + From: addr, + To: builtin.ReserveAddress, + Amt: available, + }) + } } return nil }) @@ -328,7 +327,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCal // Execute transfers from previous step for _, t := range transfers { - if err := doTransfer(cb, tree, t.From, t.To, t.Amt); err != nil { + if err := doTransfer(tree, t.From, t.To, t.Amt, transferCb); err != nil { return cid.Undef, xerrors.Errorf("transfer %s %s->%s failed: %w", t.Amt, t.From, t.To, err) } } @@ -367,7 +366,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCal nbalance := big.Min(prevBalance, AccountCap) if nbalance.Sign() != 0 { transfersBack = append(transfersBack, transfer{ - From: ReserveAddress, + From: builtin.ReserveAddress, To: addr, Amt: nbalance, }) @@ -394,7 +393,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCal mfunds := minerFundsAlloc(power, totalPower) transfersBack = append(transfersBack, transfer{ - From: ReserveAddress, + From: builtin.ReserveAddress, To: minfo.Worker, Amt: mfunds, }) @@ -414,7 +413,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCal if lbsectors.Length() > 0 { transfersBack = append(transfersBack, transfer{ - From: ReserveAddress, + From: builtin.ReserveAddress, To: minfo.Worker, Amt: BaseMinerBalance, }) @@ -431,7 +430,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCal } for _, t := range transfersBack { - if err := doTransfer(cb, tree, t.From, t.To, t.Amt); err != nil { + if err := doTransfer(tree, t.From, t.To, t.Amt, transferCb); err != nil { return cid.Undef, xerrors.Errorf("transfer %s %s->%s failed: %w", t.Amt, t.From, t.To, err) } } @@ -441,7 +440,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCal if err != nil { return cid.Undef, xerrors.Errorf("failed to load burnt funds actor: %w", err) } - if err := doTransfer(cb, tree, builtin0.BurntFundsActorAddr, ReserveAddress, burntAct.Balance); err != nil { + if err := doTransfer(tree, builtin0.BurntFundsActorAddr, builtin.ReserveAddress, burntAct.Balance, transferCb); err != nil { return cid.Undef, xerrors.Errorf("failed to unburn funds: %w", err) } @@ -457,7 +456,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCal } difference := types.BigSub(DesiredReimbursementBalance, reimb.Balance) - if err := doTransfer(cb, tree, ReserveAddress, reimbAddr, difference); err != nil { + if err := doTransfer(tree, builtin.ReserveAddress, reimbAddr, difference, transferCb); err != nil { return cid.Undef, xerrors.Errorf("failed to top up reimbursement account: %w", err) } @@ -476,6 +475,39 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCal return cid.Undef, xerrors.Errorf("resultant state tree account balance was not correct: %s", total) } + if cb != nil { + // record the transfer in execution traces + + fakeMsg := &types.Message{ + From: builtin.SystemActorAddr, + To: builtin.SystemActorAddr, + Value: big.Zero(), + Nonce: uint64(epoch), + } + fakeRct := &types.MessageReceipt{ + ExitCode: 0, + Return: nil, + GasUsed: 0, + } + + if err := cb(fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{ + MessageReceipt: *fakeRct, + ActorErr: nil, + ExecutionTrace: types.ExecutionTrace{ + Msg: fakeMsg, + MsgRct: fakeRct, + Error: "", + Duration: 0, + GasCharges: nil, + Subcalls: subcalls, + }, + Duration: 0, + GasCosts: nil, + }); err != nil { + return cid.Undef, xerrors.Errorf("recording transfers: %w", err) + } + } + return tree.Flush(ctx) } @@ -511,17 +543,17 @@ func UpgradeIgnition(ctx context.Context, sm *StateManager, cb ExecCallback, roo return cid.Undef, xerrors.Errorf("second split address: %w", err) } - err = resetGenesisMsigs(ctx, sm, store, tree, build.UpgradeLiftoffHeight) + err = resetGenesisMsigs0(ctx, sm, store, tree, build.UpgradeLiftoffHeight) if err != nil { return cid.Undef, xerrors.Errorf("resetting genesis msig start epochs: %w", err) } - err = splitGenesisMultisig(ctx, cb, split1, store, tree, 50) + err = splitGenesisMultisig0(ctx, cb, split1, store, tree, 50, epoch) if err != nil { return cid.Undef, xerrors.Errorf("splitting first msig: %w", err) } - err = splitGenesisMultisig(ctx, cb, split2, store, tree, 50) + err = splitGenesisMultisig0(ctx, cb, split2, store, tree, 50, epoch) if err != nil { return cid.Undef, xerrors.Errorf("splitting second msig: %w", err) } @@ -542,22 +574,17 @@ func UpgradeRefuel(ctx context.Context, sm *StateManager, cb ExecCallback, root return cid.Undef, xerrors.Errorf("getting state tree: %w", err) } - addr, err := address.NewFromString("t0122") - if err != nil { - return cid.Undef, xerrors.Errorf("getting address: %w", err) - } - - err = resetMultisigVesting(ctx, store, tree, addr, 0, 0, big.Zero()) + err = resetMultisigVesting0(ctx, store, tree, builtin.SaftAddress, 0, 0, big.Zero()) if err != nil { return cid.Undef, xerrors.Errorf("tweaking msig vesting: %w", err) } - err = resetMultisigVesting(ctx, store, tree, builtin.ReserveAddress, 0, 0, big.Zero()) + err = resetMultisigVesting0(ctx, store, tree, builtin.ReserveAddress, 0, 0, big.Zero()) if err != nil { return cid.Undef, xerrors.Errorf("tweaking msig vesting: %w", err) } - err = resetMultisigVesting(ctx, store, tree, builtin.RootVerifierAddress, 0, 0, big.Zero()) + err = resetMultisigVesting0(ctx, store, tree, builtin.RootVerifierAddress, 0, 0, big.Zero()) if err != nil { return cid.Undef, xerrors.Errorf("tweaking msig vesting: %w", err) } @@ -652,7 +679,7 @@ func setNetworkName(ctx context.Context, store adt.Store, tree *state.StateTree, return nil } -func splitGenesisMultisig(ctx context.Context, cb ExecCallback, addr address.Address, store adt0.Store, tree *state.StateTree, portions uint64) error { +func splitGenesisMultisig0(ctx context.Context, cb ExecCallback, addr address.Address, store adt0.Store, tree *state.StateTree, portions uint64, epoch abi.ChainEpoch) error { if portions < 1 { return xerrors.Errorf("cannot split into 0 portions") } @@ -721,6 +748,11 @@ func splitGenesisMultisig(ctx context.Context, cb ExecCallback, addr address.Add } i := uint64(0) + subcalls := make([]types.ExecutionTrace, 0, portions) + transferCb := func(trace types.ExecutionTrace) { + subcalls = append(subcalls, trace) + } + for i < portions { keyAddr, err := makeKeyAddr(addr, i) if err != nil { @@ -737,13 +769,46 @@ func splitGenesisMultisig(ctx context.Context, cb ExecCallback, addr address.Add return xerrors.Errorf("setting new msig actor state: %w", err) } - if err := doTransfer(cb, tree, addr, idAddr, newIbal); err != nil { + if err := doTransfer(tree, addr, idAddr, newIbal, transferCb); err != nil { return xerrors.Errorf("transferring split msig balance: %w", err) } i++ } + if cb != nil { + // record the transfer in execution traces + + fakeMsg := &types.Message{ + From: builtin.SystemActorAddr, + To: addr, + Value: big.Zero(), + Nonce: uint64(epoch), + } + fakeRct := &types.MessageReceipt{ + ExitCode: 0, + Return: nil, + GasUsed: 0, + } + + if err := cb(fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{ + MessageReceipt: *fakeRct, + ActorErr: nil, + ExecutionTrace: types.ExecutionTrace{ + Msg: fakeMsg, + MsgRct: fakeRct, + Error: "", + Duration: 0, + GasCharges: nil, + Subcalls: subcalls, + }, + Duration: 0, + GasCosts: nil, + }); err != nil { + return xerrors.Errorf("recording transfers: %w", err) + } + } + return nil } @@ -770,7 +835,7 @@ func makeKeyAddr(splitAddr address.Address, count uint64) (address.Address, erro } // TODO: After the Liftoff epoch, refactor this to use resetMultisigVesting -func resetGenesisMsigs(ctx context.Context, sm *StateManager, store adt0.Store, tree *state.StateTree, startEpoch abi.ChainEpoch) error { +func resetGenesisMsigs0(ctx context.Context, sm *StateManager, store adt0.Store, tree *state.StateTree, startEpoch abi.ChainEpoch) error { gb, err := sm.cs.GetGenesis() if err != nil { return xerrors.Errorf("getting genesis block: %w", err) @@ -820,7 +885,7 @@ func resetGenesisMsigs(ctx context.Context, sm *StateManager, store adt0.Store, return nil } -func resetMultisigVesting(ctx context.Context, store adt0.Store, tree *state.StateTree, addr address.Address, startEpoch abi.ChainEpoch, duration abi.ChainEpoch, balance abi.TokenAmount) error { +func resetMultisigVesting0(ctx context.Context, store adt0.Store, tree *state.StateTree, addr address.Address, startEpoch abi.ChainEpoch, duration abi.ChainEpoch, balance abi.TokenAmount) error { act, err := tree.GetActor(addr) if err != nil { return xerrors.Errorf("getting actor: %w", err) diff --git a/chain/stmgr/forks_test.go b/chain/stmgr/forks_test.go index 0388af6ad..a2b7a179f 100644 --- a/chain/stmgr/forks_test.go +++ b/chain/stmgr/forks_test.go @@ -16,14 +16,15 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/cbor" - "github.com/filecoin-project/specs-actors/actors/builtin" - init0 "github.com/filecoin-project/specs-actors/actors/builtin/init" - "github.com/filecoin-project/specs-actors/actors/runtime" + + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + init2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/init" + rt2 "github.com/filecoin-project/specs-actors/v2/actors/runtime" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/aerrors" - lotusinit "github.com/filecoin-project/lotus/chain/actors/builtin/init" + _init "github.com/filecoin-project/lotus/chain/actors/builtin/init" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/gen" . "github.com/filecoin-project/lotus/chain/stmgr" @@ -45,7 +46,7 @@ type testActor struct { } // must use existing actor that an account is allowed to exec. -func (testActor) Code() cid.Cid { return builtin.PaymentChannelActorCodeID } +func (testActor) Code() cid.Cid { return builtin0.PaymentChannelActorCodeID } func (testActor) State() cbor.Er { return new(testActorState) } type testActorState struct { @@ -75,7 +76,7 @@ func (ta testActor) Exports() []interface{} { } } -func (ta *testActor) Constructor(rt runtime.Runtime, params *abi.EmptyValue) *abi.EmptyValue { +func (ta *testActor) Constructor(rt rt2.Runtime, params *abi.EmptyValue) *abi.EmptyValue { rt.ValidateImmediateCallerAcceptAny() rt.StateCreate(&testActorState{11}) //fmt.Println("NEW ACTOR ADDRESS IS: ", rt.Receiver()) @@ -83,7 +84,7 @@ func (ta *testActor) Constructor(rt runtime.Runtime, params *abi.EmptyValue) *ab return abi.Empty } -func (ta *testActor) TestMethod(rt runtime.Runtime, params *abi.EmptyValue) *abi.EmptyValue { +func (ta *testActor) TestMethod(rt rt2.Runtime, params *abi.EmptyValue) *abi.EmptyValue { rt.ValidateImmediateCallerAcceptAny() var st testActorState rt.StateReadonly(&st) @@ -175,15 +176,15 @@ func TestForkHeightTriggers(t *testing.T) { var msgs []*types.SignedMessage - enc, err := actors.SerializeParams(&init0.ExecParams{CodeCID: (testActor{}).Code()}) + enc, err := actors.SerializeParams(&init2.ExecParams{CodeCID: (testActor{}).Code()}) if err != nil { t.Fatal(err) } m := &types.Message{ From: cg.Banker(), - To: lotusinit.Address, - Method: builtin.MethodsInit.Exec, + To: _init.Address, + Method: _init.Methods.Exec, Params: enc, GasLimit: types.TestGasLimit, } @@ -273,15 +274,15 @@ func TestForkRefuseCall(t *testing.T) { cg.SetStateManager(sm) - enc, err := actors.SerializeParams(&init0.ExecParams{CodeCID: (testActor{}).Code()}) + enc, err := actors.SerializeParams(&init2.ExecParams{CodeCID: (testActor{}).Code()}) if err != nil { t.Fatal(err) } m := &types.Message{ From: cg.Banker(), - To: lotusinit.Address, - Method: builtin.MethodsInit.Exec, + To: _init.Address, + Method: _init.Methods.Exec, Params: enc, GasLimit: types.TestGasLimit, Value: types.NewInt(0), diff --git a/chain/stmgr/stmgr.go b/chain/stmgr/stmgr.go index d6b6f4360..7e5809a84 100644 --- a/chain/stmgr/stmgr.go +++ b/chain/stmgr/stmgr.go @@ -6,11 +6,6 @@ import ( "fmt" "sync" - "github.com/filecoin-project/lotus/chain/actors/builtin" - - builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" - msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" - "github.com/ipfs/go-cid" cbor "github.com/ipfs/go-ipld-cbor" logging "github.com/ipfs/go-log/v2" @@ -23,15 +18,23 @@ import ( "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/network" + // Used for genesis. + msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/builtin/cron" + _init "github.com/filecoin-project/lotus/chain/actors/builtin/init" "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/multisig" "github.com/filecoin-project/lotus/chain/actors/builtin/paych" "github.com/filecoin-project/lotus/chain/actors/builtin/power" "github.com/filecoin-project/lotus/chain/actors/builtin/reward" + "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" @@ -43,6 +46,8 @@ const LookbackNoLimit = abi.ChainEpoch(-1) var log = logging.Logger("statemgr") type StateManagerAPI interface { + Call(ctx context.Context, msg *types.Message, ts *types.TipSet) (*api.InvocResult, error) + GetPaychState(ctx context.Context, addr address.Address, ts *types.TipSet) (*types.Actor, paych.State, error) LoadActorTsk(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*types.Actor, error) LookupID(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) ResolveToKeyAddress(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) @@ -195,6 +200,7 @@ func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st c func traceFunc(trace *[]*api.InvocResult) func(mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet) error { return func(mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet) error { ir := &api.InvocResult{ + MsgCid: mcid, Msg: msg, MsgRct: &ret.MessageReceipt, ExecutionTrace: ret.ExecutionTrace, @@ -203,6 +209,9 @@ func traceFunc(trace *[]*api.InvocResult) func(mcid cid.Cid, msg *types.Message, if ret.ActorErr != nil { ir.Error = ret.ActorErr.Error() } + if ret.GasCosts != nil { + ir.GasCost = MakeMsgGasCost(msg, ret) + } *trace = append(*trace, ir) return nil } @@ -229,9 +238,10 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp Rand: r, Bstore: sm.cs.Blockstore(), Syscalls: sm.cs.VMSys(), - CircSupplyCalc: sm.GetCirculatingSupply, + CircSupplyCalc: sm.GetVMCirculatingSupply, NtwkVersion: sm.GetNtwkVersion, BaseFee: baseFee, + LookbackState: LookbackStateGetterForTipset(sm, ts), } return sm.newVM(ctx, vmopt) @@ -242,22 +252,17 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp return cid.Undef, cid.Undef, xerrors.Errorf("making vm: %w", err) } - runCron := func() error { - // TODO: this nonce-getting is a tiny bit ugly - ca, err := vmi.StateTree().GetActor(builtin0.SystemActorAddr) - if err != nil { - return err - } + runCron := func(epoch abi.ChainEpoch) error { cronMsg := &types.Message{ - To: builtin0.CronActorAddr, - From: builtin0.SystemActorAddr, - Nonce: ca.Nonce, + To: cron.Address, + From: builtin.SystemActorAddr, + Nonce: uint64(epoch), Value: types.NewInt(0), GasFeeCap: types.NewInt(0), GasPremium: types.NewInt(0), GasLimit: build.BlockGasLimit * 10000, // Make super sure this is never too little - Method: builtin0.MethodsCron.EpochTick, + Method: cron.Methods.EpochTick, Params: nil, } ret, err := vmi.ApplyImplicitMessage(ctx, cronMsg) @@ -279,7 +284,7 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp for i := parentEpoch; i < epoch; i++ { if i > parentEpoch { // run cron for null rounds if any - if err := runCron(); err != nil { + if err := runCron(i); err != nil { return cid.Undef, cid.Undef, err } @@ -308,7 +313,7 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp } var receipts []cbg.CBORMarshaler - processedMsgs := map[cid.Cid]bool{} + processedMsgs := make(map[cid.Cid]struct{}) for _, b := range bms { penalty := types.NewInt(0) gasReward := big.Zero() @@ -332,7 +337,7 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp return cid.Undef, cid.Undef, err } } - processedMsgs[m.Cid()] = true + processedMsgs[m.Cid()] = struct{}{} } params, err := actors.SerializeParams(&reward.AwardBlockRewardParams{ @@ -345,20 +350,15 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp return cid.Undef, cid.Undef, xerrors.Errorf("failed to serialize award params: %w", err) } - sysAct, actErr := vmi.StateTree().GetActor(builtin0.SystemActorAddr) - if actErr != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("failed to get system actor: %w", actErr) - } - rwMsg := &types.Message{ - From: builtin0.SystemActorAddr, + From: builtin.SystemActorAddr, To: reward.Address, - Nonce: sysAct.Nonce, + Nonce: uint64(epoch), Value: types.NewInt(0), GasFeeCap: types.NewInt(0), GasPremium: types.NewInt(0), GasLimit: 1 << 30, - Method: builtin0.MethodsReward.AwardBlockReward, + Method: reward.Methods.AwardBlockReward, Params: params, } ret, actErr := vmi.ApplyImplicitMessage(ctx, rwMsg) @@ -376,7 +376,7 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp } } - if err := runCron(); err != nil { + if err := runCron(epoch); err != nil { return cid.Cid{}, cid.Cid{}, err } @@ -430,12 +430,7 @@ func (sm *StateManager) computeTipSetState(ctx context.Context, ts *types.TipSet parentEpoch = parent.Height } - cids := make([]cid.Cid, len(blks)) - for i, v := range blks { - cids[i] = v.Cid() - } - - r := store.NewChainRand(sm.cs, cids) + r := store.NewChainRand(sm.cs, ts.Cids()) blkmsgs, err := sm.cs.BlockMsgsForTipset(ts) if err != nil { @@ -733,7 +728,7 @@ func (sm *StateManager) searchBackForMsg(ctx context.Context, from *types.TipSet } if r != nil { - return pts, r, foundMsg, nil + return cur, r, foundMsg, nil } } @@ -980,7 +975,7 @@ func (sm *StateManager) setupGenesisActors(ctx context.Context) error { } else if builtin.IsAccountActor(act.Code) { // should exclude burnt funds actor and "remainder account actor" // should only ever be "faucet" accounts in testnets - if kaddr == builtin0.BurntFundsActorAddr { + if kaddr == builtin.BurntFundsActorAddr { return nil } @@ -1058,24 +1053,24 @@ func (sm *StateManager) setupPreIgnitionGenesisActorsTestnet(ctx context.Context totalsByEpoch := make(map[abi.ChainEpoch]abi.TokenAmount) // 6 months - sixMonths := abi.ChainEpoch(183 * builtin0.EpochsInDay) + sixMonths := abi.ChainEpoch(183 * builtin.EpochsInDay) totalsByEpoch[sixMonths] = big.NewInt(49_929_341) totalsByEpoch[sixMonths] = big.Add(totalsByEpoch[sixMonths], big.NewInt(32_787_700)) // 1 year - oneYear := abi.ChainEpoch(365 * builtin0.EpochsInDay) + oneYear := abi.ChainEpoch(365 * builtin.EpochsInDay) totalsByEpoch[oneYear] = big.NewInt(22_421_712) // 2 years - twoYears := abi.ChainEpoch(2 * 365 * builtin0.EpochsInDay) + twoYears := abi.ChainEpoch(2 * 365 * builtin.EpochsInDay) totalsByEpoch[twoYears] = big.NewInt(7_223_364) // 3 years - threeYears := abi.ChainEpoch(3 * 365 * builtin0.EpochsInDay) + threeYears := abi.ChainEpoch(3 * 365 * builtin.EpochsInDay) totalsByEpoch[threeYears] = big.NewInt(87_637_883) // 6 years - sixYears := abi.ChainEpoch(6 * 365 * builtin0.EpochsInDay) + sixYears := abi.ChainEpoch(6 * 365 * builtin.EpochsInDay) totalsByEpoch[sixYears] = big.NewInt(100_000_000) totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(300_000_000)) @@ -1135,24 +1130,24 @@ func (sm *StateManager) setupPostIgnitionGenesisActors(ctx context.Context) erro totalsByEpoch := make(map[abi.ChainEpoch]abi.TokenAmount) // 6 months - sixMonths := abi.ChainEpoch(183 * builtin0.EpochsInDay) + sixMonths := abi.ChainEpoch(183 * builtin.EpochsInDay) totalsByEpoch[sixMonths] = big.NewInt(49_929_341) totalsByEpoch[sixMonths] = big.Add(totalsByEpoch[sixMonths], big.NewInt(32_787_700)) // 1 year - oneYear := abi.ChainEpoch(365 * builtin0.EpochsInDay) + oneYear := abi.ChainEpoch(365 * builtin.EpochsInDay) totalsByEpoch[oneYear] = big.NewInt(22_421_712) // 2 years - twoYears := abi.ChainEpoch(2 * 365 * builtin0.EpochsInDay) + twoYears := abi.ChainEpoch(2 * 365 * builtin.EpochsInDay) totalsByEpoch[twoYears] = big.NewInt(7_223_364) // 3 years - threeYears := abi.ChainEpoch(3 * 365 * builtin0.EpochsInDay) + threeYears := abi.ChainEpoch(3 * 365 * builtin.EpochsInDay) totalsByEpoch[threeYears] = big.NewInt(87_637_883) // 6 years - sixYears := abi.ChainEpoch(6 * 365 * builtin0.EpochsInDay) + sixYears := abi.ChainEpoch(6 * 365 * builtin.EpochsInDay) totalsByEpoch[sixYears] = big.NewInt(100_000_000) totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(300_000_000)) @@ -1286,7 +1281,7 @@ func (sm *StateManager) GetFilLocked(ctx context.Context, st *state.StateTree) ( } func GetFilBurnt(ctx context.Context, st *state.StateTree) (abi.TokenAmount, error) { - burnt, err := st.GetActor(builtin0.BurntFundsActorAddr) + burnt, err := st.GetActor(builtin.BurntFundsActorAddr) if err != nil { return big.Zero(), xerrors.Errorf("failed to load burnt actor: %w", err) } @@ -1294,7 +1289,16 @@ func GetFilBurnt(ctx context.Context, st *state.StateTree) (abi.TokenAmount, err return burnt.Balance, nil } -func (sm *StateManager) GetCirculatingSupplyDetailed(ctx context.Context, height abi.ChainEpoch, st *state.StateTree) (api.CirculatingSupply, error) { +func (sm *StateManager) GetVMCirculatingSupply(ctx context.Context, height abi.ChainEpoch, st *state.StateTree) (abi.TokenAmount, error) { + cs, err := sm.GetVMCirculatingSupplyDetailed(ctx, height, st) + if err != nil { + return types.EmptyInt, err + } + + return cs.FilCirculating, err +} + +func (sm *StateManager) GetVMCirculatingSupplyDetailed(ctx context.Context, height abi.ChainEpoch, st *state.StateTree) (api.CirculatingSupply, error) { sm.genesisMsigLk.Lock() defer sm.genesisMsigLk.Unlock() if sm.preIgnitionGenInfos == nil { @@ -1357,12 +1361,91 @@ func (sm *StateManager) GetCirculatingSupplyDetailed(ctx context.Context, height } func (sm *StateManager) GetCirculatingSupply(ctx context.Context, height abi.ChainEpoch, st *state.StateTree) (abi.TokenAmount, error) { - csi, err := sm.GetCirculatingSupplyDetailed(ctx, height, st) + circ := big.Zero() + unCirc := big.Zero() + err := st.ForEach(func(a address.Address, actor *types.Actor) error { + switch { + case actor.Balance.IsZero(): + // Do nothing for zero-balance actors + break + case a == _init.Address || + a == reward.Address || + a == verifreg.Address || + // The power actor itself should never receive funds + a == power.Address || + a == builtin.SystemActorAddr || + a == builtin.CronActorAddr || + a == builtin.BurntFundsActorAddr || + a == builtin.SaftAddress || + a == builtin.ReserveAddress: + + unCirc = big.Add(unCirc, actor.Balance) + + case a == market.Address: + mst, err := market.Load(sm.cs.Store(ctx), actor) + if err != nil { + return err + } + + lb, err := mst.TotalLocked() + if err != nil { + return err + } + + circ = big.Add(circ, big.Sub(actor.Balance, lb)) + unCirc = big.Add(unCirc, lb) + + case builtin.IsAccountActor(actor.Code) || builtin.IsPaymentChannelActor(actor.Code): + circ = big.Add(circ, actor.Balance) + + case builtin.IsStorageMinerActor(actor.Code): + mst, err := miner.Load(sm.cs.Store(ctx), actor) + if err != nil { + return err + } + + ab, err := mst.AvailableBalance(actor.Balance) + + if err == nil { + circ = big.Add(circ, ab) + unCirc = big.Add(unCirc, big.Sub(actor.Balance, ab)) + } else { + // Assume any error is because the miner state is "broken" (lower actor balance than locked funds) + // In this case, the actor's entire balance is considered "uncirculating" + unCirc = big.Add(unCirc, actor.Balance) + } + + case builtin.IsMultisigActor(actor.Code): + mst, err := multisig.Load(sm.cs.Store(ctx), actor) + if err != nil { + return err + } + + lb, err := mst.LockedBalance(height) + if err != nil { + return err + } + + ab := big.Sub(actor.Balance, lb) + circ = big.Add(circ, big.Max(ab, big.Zero())) + unCirc = big.Add(unCirc, big.Min(actor.Balance, lb)) + default: + return xerrors.Errorf("unexpected actor: %s", a) + } + + return nil + }) + if err != nil { - return big.Zero(), err + return types.EmptyInt, err } - return csi.FilCirculating, nil + total := big.Add(circ, unCirc) + if !total.Equals(types.TotalFilecoinInt) { + return types.EmptyInt, xerrors.Errorf("total filecoin didn't add to expected amount: %s != %s", total, types.TotalFilecoinInt) + } + + return circ, nil } func (sm *StateManager) GetNtwkVersion(ctx context.Context, height abi.ChainEpoch) network.Version { diff --git a/chain/stmgr/utils.go b/chain/stmgr/utils.go index c0f0c4d2f..78121cc4c 100644 --- a/chain/stmgr/utils.go +++ b/chain/stmgr/utils.go @@ -12,7 +12,6 @@ import ( "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/network" - "github.com/filecoin-project/lotus/chain/actors/policy" cid "github.com/ipfs/go-cid" cbg "github.com/whyrusleeping/cbor-gen" @@ -24,16 +23,16 @@ import ( "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/rt" - builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" exported0 "github.com/filecoin-project/specs-actors/actors/builtin/exported" - proof0 "github.com/filecoin-project/specs-actors/actors/runtime/proof" exported2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/exported" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin" init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" "github.com/filecoin-project/lotus/chain/actors/builtin/market" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/beacon" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/store" @@ -159,7 +158,7 @@ func GetMinerSectorSet(ctx context.Context, sm *StateManager, ts *types.TipSet, return mas.LoadSectors(snos) } -func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *StateManager, st cid.Cid, maddr address.Address, rand abi.PoStRandomness) ([]proof0.SectorInfo, error) { +func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *StateManager, st cid.Cid, maddr address.Address, rand abi.PoStRandomness) ([]builtin.SectorInfo, error) { act, err := sm.LoadActorRaw(ctx, maddr, st) if err != nil { return nil, xerrors.Errorf("failed to load miner actor: %w", err) @@ -244,9 +243,9 @@ func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *S return nil, xerrors.Errorf("loading proving sectors: %w", err) } - out := make([]proof0.SectorInfo, len(sectors)) + out := make([]builtin.SectorInfo, len(sectors)) for i, sinfo := range sectors { - out[i] = proof0.SectorInfo{ + out[i] = builtin.SectorInfo{ SealProof: spt, SectorNumber: sinfo.SectorNumber, SealedCID: sinfo.SealedCID, @@ -256,25 +255,6 @@ func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *S return out, nil } -func StateMinerInfo(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) (*miner.MinerInfo, error) { - act, err := sm.LoadActor(ctx, maddr, ts) - if err != nil { - return nil, xerrors.Errorf("failed to load miner actor: %w", err) - } - - mas, err := miner.Load(sm.cs.Store(ctx), act) - if err != nil { - return nil, xerrors.Errorf("failed to load miner actor state: %w", err) - } - - mi, err := mas.Info() - if err != nil { - return nil, err - } - - return &mi, err -} - func GetMinerSlashed(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) (bool, error) { act, err := sm.LoadActor(ctx, power.Address, ts) if err != nil { @@ -383,9 +363,10 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch, Rand: r, Bstore: sm.cs.Blockstore(), Syscalls: sm.cs.VMSys(), - CircSupplyCalc: sm.GetCirculatingSupply, + CircSupplyCalc: sm.GetVMCirculatingSupply, NtwkVersion: sm.GetNtwkVersion, BaseFee: ts.Blocks()[0].ParentBaseFee, + LookbackState: LookbackStateGetterForTipset(sm, ts), } vmi, err := sm.newVM(ctx, vmopt) if err != nil { @@ -411,7 +392,17 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch, return root, trace, nil } -func GetLookbackTipSetForRound(ctx context.Context, sm *StateManager, ts *types.TipSet, round abi.ChainEpoch) (*types.TipSet, error) { +func LookbackStateGetterForTipset(sm *StateManager, ts *types.TipSet) vm.LookbackStateGetter { + return func(ctx context.Context, round abi.ChainEpoch) (*state.StateTree, error) { + _, st, err := GetLookbackTipSetForRound(ctx, sm, ts, round) + if err != nil { + return nil, err + } + return sm.StateTree(st) + } +} + +func GetLookbackTipSetForRound(ctx context.Context, sm *StateManager, ts *types.TipSet, round abi.ChainEpoch) (*types.TipSet, cid.Cid, error) { var lbr abi.ChainEpoch lb := policy.GetWinningPoStSectorSetLookback(sm.GetNtwkVersion(ctx, round)) if round > lb { @@ -419,16 +410,33 @@ func GetLookbackTipSetForRound(ctx context.Context, sm *StateManager, ts *types. } // more null blocks than our lookback - if lbr > ts.Height() { - return ts, nil + if lbr >= ts.Height() { + // This should never happen at this point, but may happen before + // network version 3 (where the lookback was only 10 blocks). + st, _, err := sm.TipSetState(ctx, ts) + if err != nil { + return nil, cid.Undef, err + } + return ts, st, nil } - lbts, err := sm.ChainStore().GetTipsetByHeight(ctx, lbr, ts, true) + // Get the tipset after the lookback tipset, or the next non-null one. + nextTs, err := sm.ChainStore().GetTipsetByHeight(ctx, lbr+1, ts, false) if err != nil { - return nil, xerrors.Errorf("failed to get lookback tipset: %w", err) + return nil, cid.Undef, xerrors.Errorf("failed to get lookback tipset+1: %w", err) } - return lbts, nil + if lbr > nextTs.Height() { + return nil, cid.Undef, xerrors.Errorf("failed to find non-null tipset %s (%d) which is known to exist, found %s (%d)", ts.Key(), ts.Height(), nextTs.Key(), nextTs.Height()) + + } + + lbts, err := sm.ChainStore().GetTipSetFromKey(nextTs.Parents()) + if err != nil { + return nil, cid.Undef, xerrors.Errorf("failed to resolve lookback tipset: %w", err) + } + + return lbts, nextTs.ParentState(), nil } func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcs beacon.Schedule, tsk types.TipSetKey, round abi.ChainEpoch, maddr address.Address, pv ffiwrapper.Verifier) (*api.MiningBaseInfo, error) { @@ -456,17 +464,20 @@ func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcs beacon.Schedule rbase = entries[len(entries)-1] } - lbts, err := GetLookbackTipSetForRound(ctx, sm, ts, round) + lbts, lbst, err := GetLookbackTipSetForRound(ctx, sm, ts, round) if err != nil { return nil, xerrors.Errorf("getting lookback miner actor state: %w", err) } - lbst, _, err := sm.TipSetState(ctx, lbts) - if err != nil { - return nil, err - } - act, err := sm.LoadActorRaw(ctx, maddr, lbst) + if xerrors.Is(err, types.ErrActorNotFound) { + _, err := sm.LoadActor(ctx, maddr, ts) + if err != nil { + return nil, xerrors.Errorf("loading miner in current state: %w", err) + } + + return nil, nil + } if err != nil { return nil, xerrors.Errorf("failed to load miner actor: %w", err) } @@ -548,8 +559,7 @@ func init() { methods := make(map[abi.MethodNum]MethodMeta, len(exports)) // Explicitly add send, it's special. - // Note that builtin2.MethodSend = builtin0.MethodSend = 0. - methods[builtin0.MethodSend] = MethodMeta{ + methods[builtin.MethodSend] = MethodMeta{ Name: "Send", Params: reflect.TypeOf(new(abi.EmptyValue)), Ret: reflect.TypeOf(new(abi.EmptyValue)), @@ -573,11 +583,9 @@ func init() { fnName = strings.TrimSuffix(fnName[strings.LastIndexByte(fnName, '.')+1:], "-fm") switch abi.MethodNum(number) { - case builtin0.MethodSend: - // Note that builtin2.MethodSend = builtin0.MethodSend = 0. + case builtin.MethodSend: panic("method 0 is reserved for Send") - case builtin0.MethodConstructor: - // Note that builtin2.MethodConstructor = builtin0.MethodConstructor = 1. + case builtin.MethodConstructor: if fnName != "Constructor" { panic("method 1 is reserved for Constructor") } @@ -606,6 +614,14 @@ func GetReturnType(ctx context.Context, sm *StateManager, to address.Address, me return reflect.New(m.Ret.Elem()).Interface().(cbg.CBORUnmarshaler), nil } +func GetParamType(actCode cid.Cid, method abi.MethodNum) (cbg.CBORUnmarshaler, error) { + m, found := MethodsMap[actCode][method] + if !found { + return nil, fmt.Errorf("unknown method %d for actor %s", method, actCode) + } + return reflect.New(m.Params.Elem()).Interface().(cbg.CBORUnmarshaler), nil +} + func minerHasMinPower(ctx context.Context, sm *StateManager, addr address.Address, ts *types.TipSet) (bool, error) { pact, err := sm.LoadActor(ctx, power.Address, ts) if err != nil { @@ -701,3 +717,16 @@ func CheckTotalFIL(ctx context.Context, sm *StateManager, ts *types.TipSet) (abi return sum, nil } + +func MakeMsgGasCost(msg *types.Message, ret *vm.ApplyRet) api.MsgGasCost { + return api.MsgGasCost{ + Message: msg.Cid(), + GasUsed: big.NewInt(ret.GasUsed), + BaseFeeBurn: ret.GasCosts.BaseFeeBurn, + OverEstimationBurn: ret.GasCosts.OverEstimationBurn, + MinerPenalty: ret.GasCosts.MinerPenalty, + MinerTip: ret.GasCosts.MinerTip, + Refund: ret.GasCosts.Refund, + TotalCost: big.Sub(msg.RequiredFunds(), ret.GasCosts.Refund), + } +} diff --git a/chain/store/store.go b/chain/store/store.go index c6fc0cbef..00a78500e 100644 --- a/chain/store/store.go +++ b/chain/store/store.go @@ -19,12 +19,12 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/specs-actors/actors/builtin" - adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" + blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/vm" "github.com/filecoin-project/lotus/journal" bstore "github.com/filecoin-project/lotus/lib/blockstore" @@ -815,7 +815,7 @@ func (cs *ChainStore) GetSignedMessage(c cid.Cid) (*types.SignedMessage, error) func (cs *ChainStore) readAMTCids(root cid.Cid) ([]cid.Cid, error) { ctx := context.TODO() // block headers use adt0, for now. - a, err := adt0.AsArray(cs.Store(ctx), root) + a, err := blockadt.AsArray(cs.Store(ctx), root) if err != nil { return nil, xerrors.Errorf("amt load: %w", err) } @@ -1009,7 +1009,7 @@ func (cs *ChainStore) MessagesForBlock(b *types.BlockHeader) ([]*types.Message, func (cs *ChainStore) GetParentReceipt(b *types.BlockHeader, i int) (*types.MessageReceipt, error) { ctx := context.TODO() // block headers use adt0, for now. - a, err := adt0.AsArray(cs.Store(ctx), b.ParentMessageReceipts) + a, err := blockadt.AsArray(cs.Store(ctx), b.ParentMessageReceipts) if err != nil { return nil, xerrors.Errorf("amt load: %w", err) } @@ -1312,11 +1312,13 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe var cids []cid.Cid if !skipOldMsgs || b.Height > ts.Height()-inclRecentRoots { - mcids, err := recurseLinks(cs.bs, walked, b.Messages, []cid.Cid{b.Messages}) - if err != nil { - return xerrors.Errorf("recursing messages failed: %w", err) + if walked.Visit(b.Messages) { + mcids, err := recurseLinks(cs.bs, walked, b.Messages, []cid.Cid{b.Messages}) + if err != nil { + return xerrors.Errorf("recursing messages failed: %w", err) + } + cids = mcids } - cids = mcids } if b.Height > 0 { @@ -1331,12 +1333,14 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe out := cids if b.Height == 0 || b.Height > ts.Height()-inclRecentRoots { - cids, err := recurseLinks(cs.bs, walked, b.ParentStateRoot, []cid.Cid{b.ParentStateRoot}) - if err != nil { - return xerrors.Errorf("recursing genesis state failed: %w", err) - } + if walked.Visit(b.ParentStateRoot) { + cids, err := recurseLinks(cs.bs, walked, b.ParentStateRoot, []cid.Cid{b.ParentStateRoot}) + if err != nil { + return xerrors.Errorf("recursing genesis state failed: %w", err) + } - out = append(out, cids...) + out = append(out, cids...) + } } for _, c := range out { diff --git a/chain/store/store_test.go b/chain/store/store_test.go index 326899f90..160527104 100644 --- a/chain/store/store_test.go +++ b/chain/store/store_test.go @@ -12,6 +12,7 @@ import ( "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/gen" + "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/lib/blockstore" @@ -107,3 +108,60 @@ func TestChainExportImport(t *testing.T) { t.Fatal("imported chain differed from exported chain") } } + +func TestChainExportImportFull(t *testing.T) { + cg, err := gen.NewGenerator() + if err != nil { + t.Fatal(err) + } + + var last *types.TipSet + for i := 0; i < 100; i++ { + ts, err := cg.NextTipSet() + if err != nil { + t.Fatal(err) + } + + last = ts.TipSet.TipSet() + } + + buf := new(bytes.Buffer) + if err := cg.ChainStore().Export(context.TODO(), last, last.Height(), false, buf); err != nil { + t.Fatal(err) + } + + nbs := blockstore.NewTemporary() + cs := store.NewChainStore(nbs, datastore.NewMapDatastore(), nil, nil) + root, err := cs.Import(buf) + if err != nil { + t.Fatal(err) + } + + err = cs.SetHead(last) + if err != nil { + t.Fatal(err) + } + + if !root.Equals(last) { + t.Fatal("imported chain differed from exported chain") + } + + sm := stmgr.NewStateManager(cs) + for i := 0; i < 100; i++ { + ts, err := cs.GetTipsetByHeight(context.TODO(), abi.ChainEpoch(i), nil, false) + if err != nil { + t.Fatal(err) + } + + st, err := sm.ParentState(ts) + if err != nil { + t.Fatal(err) + } + + // touches a bunch of actors + _, err = sm.GetCirculatingSupply(context.TODO(), abi.ChainEpoch(i), st) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/chain/sub/incoming.go b/chain/sub/incoming.go index 68ee5e20c..1701866eb 100644 --- a/chain/sub/incoming.go +++ b/chain/sub/incoming.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "sync" "time" "golang.org/x/xerrors" @@ -23,20 +22,18 @@ import ( "go.opencensus.io/stats" "go.opencensus.io/tag" - adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" + blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/messagepool" - "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/lib/blockstore" - "github.com/filecoin-project/lotus/lib/bufbstore" "github.com/filecoin-project/lotus/lib/sigs" "github.com/filecoin-project/lotus/metrics" + "github.com/filecoin-project/lotus/node/impl/client" ) var log = logging.Logger("sub") @@ -44,6 +41,13 @@ var log = logging.Logger("sub") var ErrSoftFailure = errors.New("soft validation failure") var ErrInsufficientPower = errors.New("incoming block's miner does not have minimum power") +var msgCidPrefix = cid.Prefix{ + Version: 1, + Codec: cid.DagCBOR, + MhType: client.DefaultHashFunction, + MhLength: 32, +} + func HandleIncomingBlocks(ctx context.Context, bsub *pubsub.Subscription, s *chain.Syncer, bs bserv.BlockService, cmgr connmgr.ConnManager) { // Timeout after (block time + propagation delay). This is useless at // this point. @@ -168,6 +172,9 @@ func fetchCids( cidIndex := make(map[cid.Cid]int) for i, c := range cids { + if c.Prefix() != msgCidPrefix { + return fmt.Errorf("invalid msg CID: %s", c) + } cidIndex[c] = i } if len(cids) != len(cidIndex) { @@ -216,9 +223,6 @@ type BlockValidator struct { // necessary for block validation chain *store.ChainStore stmgr *stmgr.StateManager - - mx sync.Mutex - keycache map[string]address.Address } func NewBlockValidator(self peer.ID, chain *store.ChainStore, stmgr *stmgr.StateManager, blacklist func(peer.ID)) *BlockValidator { @@ -231,7 +235,6 @@ func NewBlockValidator(self peer.ID, chain *store.ChainStore, stmgr *stmgr.State recvBlocks: newBlockReceiptCache(), chain: chain, stmgr: stmgr, - keycache: make(map[string]address.Address), } } @@ -380,9 +383,9 @@ func (bv *BlockValidator) isChainNearSynced() bool { func (bv *BlockValidator) validateMsgMeta(ctx context.Context, msg *types.BlockMsg) error { // TODO there has to be a simpler way to do this without the blockstore dance // block headers use adt0 - store := adt0.WrapStore(ctx, cbor.NewCborStore(blockstore.NewTemporary())) - bmArr := adt0.MakeEmptyArray(store) - smArr := adt0.MakeEmptyArray(store) + store := blockadt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewTemporary())) + bmArr := blockadt.MakeEmptyArray(store) + smArr := blockadt.MakeEmptyArray(store) for i, m := range msg.BlsMessages { c := cbg.CborCid(m) @@ -425,60 +428,25 @@ func (bv *BlockValidator) validateMsgMeta(ctx context.Context, msg *types.BlockM } func (bv *BlockValidator) checkPowerAndGetWorkerKey(ctx context.Context, bh *types.BlockHeader) (address.Address, error) { - addr := bh.Miner - - bv.mx.Lock() - key, ok := bv.keycache[addr.String()] - bv.mx.Unlock() - if !ok { - // TODO I have a feeling all this can be simplified by cleverer DI to use the API - ts := bv.chain.GetHeaviestTipSet() - st, _, err := bv.stmgr.TipSetState(ctx, ts) - if err != nil { - return address.Undef, err - } - - buf := bufbstore.NewBufferedBstore(bv.chain.Blockstore()) - cst := cbor.NewCborStore(buf) - state, err := state.LoadStateTree(cst, st) - if err != nil { - return address.Undef, err - } - act, err := state.GetActor(addr) - if err != nil { - return address.Undef, err - } - - mst, err := miner.Load(bv.chain.Store(ctx), act) - if err != nil { - return address.Undef, err - } - - info, err := mst.Info() - if err != nil { - return address.Undef, err - } - - worker := info.Worker - key, err = bv.stmgr.ResolveToKeyAddress(ctx, worker, ts) - if err != nil { - return address.Undef, err - } - - bv.mx.Lock() - bv.keycache[addr.String()] = key - bv.mx.Unlock() - } - // we check that the miner met the minimum power at the lookback tipset baseTs := bv.chain.GetHeaviestTipSet() - lbts, err := stmgr.GetLookbackTipSetForRound(ctx, bv.stmgr, baseTs, bh.Height) + lbts, lbst, err := stmgr.GetLookbackTipSetForRound(ctx, bv.stmgr, baseTs, bh.Height) if err != nil { log.Warnf("failed to load lookback tipset for incoming block: %s", err) return address.Undef, ErrSoftFailure } + key, err := stmgr.GetMinerWorkerRaw(ctx, bv.stmgr, lbst, bh.Miner) + if err != nil { + log.Warnf("failed to resolve worker key for miner %s: %s", bh.Miner, err) + return address.Undef, ErrSoftFailure + } + + // NOTE: we check to see if the miner was eligible in the lookback + // tipset - 1 for historical reasons. DO NOT use the lookback state + // returned by GetLookbackTipSetForRound. + eligible, err := stmgr.MinerEligibleToMine(ctx, bv.stmgr, bh.Miner, baseTs, lbts) if err != nil { log.Warnf("failed to determine if incoming block's miner has minimum power: %s", err) diff --git a/chain/sync.go b/chain/sync.go index 9040e3f05..1410dd2a7 100644 --- a/chain/sync.go +++ b/chain/sync.go @@ -15,8 +15,6 @@ import ( "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/specs-actors/actors/runtime/proof" - "github.com/Gurpartap/async" "github.com/hashicorp/go-multierror" blocks "github.com/ipfs/go-block-format" @@ -37,7 +35,11 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" blst "github.com/supranational/blst/bindings/go" - adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" + // named msgarray here to make it clear that these are the types used by + // messages, regardless of specs-actors version. + blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" + + proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" @@ -268,14 +270,15 @@ func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool { syncer.Exchange.AddPeer(from) - bestPweight := syncer.store.GetHeaviestTipSet().ParentWeight() + hts := syncer.store.GetHeaviestTipSet() + bestPweight := hts.ParentWeight() targetWeight := fts.TipSet().ParentWeight() if targetWeight.LessThan(bestPweight) { var miners []string for _, blk := range fts.TipSet().Blocks() { miners = append(miners, blk.Miner.String()) } - log.Infof("incoming tipset from %s does not appear to be better than our best chain, ignoring for now", miners) + log.Infow("incoming tipset does not appear to be better than our best chain, ignoring for now", "miners", miners, "bestPweight", bestPweight, "bestTS", hts.Cids(), "incomingWeight", targetWeight, "incomingTS", fts.TipSet().Cids()) return false } @@ -323,25 +326,35 @@ func (syncer *Syncer) ValidateMsgMeta(fblk *types.FullBlock) error { return xerrors.Errorf("block %s has too many messages (%d)", fblk.Header.Cid(), msgc) } - // Collect the CIDs of both types of messages separately: BLS and Secpk. - var bcids, scids []cid.Cid - for _, m := range fblk.BlsMessages { - bcids = append(bcids, m.Cid()) - } - - for _, m := range fblk.SecpkMessages { - scids = append(scids, m.Cid()) - } - // TODO: IMPORTANT(GARBAGE). These message puts and the msgmeta // computation need to go into the 'temporary' side of the blockstore when // we implement that - blockstore := syncer.store.Blockstore() - bs := cbor.NewCborStore(blockstore) + // We use a temporary bstore here to avoid writing intermediate pieces + // into the blockstore. + blockstore := bstore.NewTemporary() + cst := cbor.NewCborStore(blockstore) + + var bcids, scids []cid.Cid + + for _, m := range fblk.BlsMessages { + c, err := store.PutMessage(blockstore, m) + if err != nil { + return xerrors.Errorf("putting bls message to blockstore after msgmeta computation: %w", err) + } + bcids = append(bcids, c) + } + + for _, m := range fblk.SecpkMessages { + c, err := store.PutMessage(blockstore, m) + if err != nil { + return xerrors.Errorf("putting bls message to blockstore after msgmeta computation: %w", err) + } + scids = append(scids, c) + } // Compute the root CID of the combined message trie. - smroot, err := computeMsgMeta(bs, bcids, scids) + smroot, err := computeMsgMeta(cst, bcids, scids) if err != nil { return xerrors.Errorf("validating msgmeta, compute failed: %w", err) } @@ -351,21 +364,8 @@ func (syncer *Syncer) ValidateMsgMeta(fblk *types.FullBlock) error { return xerrors.Errorf("messages in full block did not match msgmeta root in header (%s != %s)", fblk.Header.Messages, smroot) } - for _, m := range fblk.BlsMessages { - _, err := store.PutMessage(blockstore, m) - if err != nil { - return xerrors.Errorf("putting bls message to blockstore after msgmeta computation: %w", err) - } - } - - for _, m := range fblk.SecpkMessages { - _, err := store.PutMessage(blockstore, m) - if err != nil { - return xerrors.Errorf("putting bls message to blockstore after msgmeta computation: %w", err) - } - } - - return nil + // Finally, flush. + return vm.Copy(context.TODO(), blockstore, syncer.store.Blockstore(), smroot) } func (syncer *Syncer) LocalPeer() peer.ID { @@ -465,9 +465,9 @@ func zipTipSetAndMessages(bs cbor.IpldStore, ts *types.TipSet, allbmsgs []*types // of both types (BLS and Secpk). func computeMsgMeta(bs cbor.IpldStore, bmsgCids, smsgCids []cid.Cid) (cid.Cid, error) { // block headers use adt0 - store := adt0.WrapStore(context.TODO(), bs) - bmArr := adt0.MakeEmptyArray(store) - smArr := adt0.MakeEmptyArray(store) + store := blockadt.WrapStore(context.TODO(), bs) + bmArr := blockadt.MakeEmptyArray(store) + smArr := blockadt.MakeEmptyArray(store) for i, m := range bmsgCids { c := cbg.CborCid(m) @@ -730,16 +730,11 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock, use return xerrors.Errorf("load parent tipset failed (%s): %w", h.Parents, err) } - lbts, err := stmgr.GetLookbackTipSetForRound(ctx, syncer.sm, baseTs, h.Height) + lbts, lbst, err := stmgr.GetLookbackTipSetForRound(ctx, syncer.sm, baseTs, h.Height) if err != nil { return xerrors.Errorf("failed to get lookback tipset for block: %w", err) } - lbst, _, err := syncer.sm.TipSetState(ctx, lbts) - if err != nil { - return xerrors.Errorf("failed to compute lookback tipset state (epoch %d): %w", lbts.Height(), err) - } - prevBeacon, err := syncer.store.GetLatestBeaconEntry(baseTs) if err != nil { return xerrors.Errorf("failed to get latest beacon entry: %w", err) @@ -1017,7 +1012,7 @@ func (syncer *Syncer) VerifyWinningPoStProof(ctx context.Context, h *types.Block return xerrors.Errorf("getting winning post sector set: %w", err) } - ok, err := ffiwrapper.ProofVerifier.VerifyWinningPoSt(ctx, proof.WinningPoStVerifyInfo{ + ok, err := ffiwrapper.ProofVerifier.VerifyWinningPoSt(ctx, proof2.WinningPoStVerifyInfo{ Randomness: rand, Proofs: h.WinPoStProof, ChallengedSectors: sectors, @@ -1064,8 +1059,7 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock return err } - cst := cbor.NewCborStore(syncer.store.Blockstore()) - st, err := state.LoadStateTree(cst, stateroot) + st, err := state.LoadStateTree(syncer.store.Store(ctx), stateroot) if err != nil { return xerrors.Errorf("failed to load base state tree: %w", err) } @@ -1111,21 +1105,28 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock return nil } - store := adt0.WrapStore(ctx, cst) + // Validate message arrays in a temporary blockstore. + tmpbs := bstore.NewTemporary() + tmpstore := blockadt.WrapStore(ctx, cbor.NewCborStore(tmpbs)) - bmArr := adt0.MakeEmptyArray(store) + bmArr := blockadt.MakeEmptyArray(tmpstore) for i, m := range b.BlsMessages { if err := checkMsg(m); err != nil { return xerrors.Errorf("block had invalid bls message at index %d: %w", i, err) } - c := cbg.CborCid(m.Cid()) - if err := bmArr.Set(uint64(i), &c); err != nil { + c, err := store.PutMessage(tmpbs, m) + if err != nil { + return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err) + } + + k := cbg.CborCid(c) + if err := bmArr.Set(uint64(i), &k); err != nil { return xerrors.Errorf("failed to put bls message at index %d: %w", i, err) } } - smArr := adt0.MakeEmptyArray(store) + smArr := blockadt.MakeEmptyArray(tmpstore) for i, m := range b.SecpkMessages { if err := checkMsg(m); err != nil { return xerrors.Errorf("block had invalid secpk message at index %d: %w", i, err) @@ -1142,8 +1143,12 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock return xerrors.Errorf("secpk message %s has invalid signature: %w", m.Cid(), err) } - c := cbg.CborCid(m.Cid()) - if err := smArr.Set(uint64(i), &c); err != nil { + c, err := store.PutMessage(tmpbs, m) + if err != nil { + return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err) + } + k := cbg.CborCid(c) + if err := smArr.Set(uint64(i), &k); err != nil { return xerrors.Errorf("failed to put secpk message at index %d: %w", i, err) } } @@ -1158,7 +1163,7 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock return err } - mrcid, err := cst.Put(ctx, &types.MsgMeta{ + mrcid, err := tmpstore.Put(ctx, &types.MsgMeta{ BlsMessages: bmroot, SecpkMessages: smroot, }) @@ -1170,7 +1175,8 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock return fmt.Errorf("messages didnt match message root in header") } - return nil + // Finally, flush. + return vm.Copy(ctx, tmpbs, syncer.store.Blockstore(), mrcid) } func (syncer *Syncer) verifyBlsAggregate(ctx context.Context, sig *crypto.Signature, msgs []cid.Cid, pubks [][]byte) error { @@ -1385,6 +1391,11 @@ loop: } base := blockSet[len(blockSet)-1] + if base.Equals(known) { + blockSet = blockSet[:len(blockSet)-1] + base = blockSet[len(blockSet)-1] + } + if base.IsChildOf(known) { // common case: receiving blocks that are building on top of our best tipset return blockSet, nil @@ -1478,7 +1489,7 @@ func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, know func (syncer *Syncer) syncMessagesAndCheckState(ctx context.Context, headers []*types.TipSet) error { ss := extractSyncState(ctx) - ss.SetHeight(0) + ss.SetHeight(headers[len(headers)-1].Height()) return syncer.iterFullTipsets(ctx, headers, func(ctx context.Context, fts *store.FullTipSet) error { log.Debugw("validating tipset", "height", fts.TipSet().Height(), "size", len(fts.TipSet().Cids())) @@ -1720,9 +1731,6 @@ func (syncer *Syncer) collectChain(ctx context.Context, ts *types.TipSet) error } func VerifyElectionPoStVRF(ctx context.Context, worker address.Address, rand []byte, evrf []byte) error { - if build.InsecurePoStValidation { - return nil - } return gen.VerifyVRF(ctx, worker, rand, evrf) } diff --git a/chain/sync_manager.go b/chain/sync_manager.go index c7fdea726..c25068f60 100644 --- a/chain/sync_manager.go +++ b/chain/sync_manager.go @@ -2,7 +2,9 @@ package chain import ( "context" + "os" "sort" + "strings" "sync" "github.com/filecoin-project/lotus/chain/types" @@ -11,6 +13,14 @@ import ( const BootstrapPeerThreshold = 2 +var coalesceForksParents = false + +func init() { + if os.Getenv("LOTUS_SYNC_REL_PARENT") == "yes" { + coalesceForksParents = true + } +} + const ( BSStateInit = 0 BSStateSelected = 1 @@ -152,6 +162,19 @@ func newSyncTargetBucket(tipsets ...*types.TipSet) *syncTargetBucket { return &stb } +func (sbs *syncBucketSet) String() string { + var bStrings []string + for _, b := range sbs.buckets { + var tsStrings []string + for _, t := range b.tips { + tsStrings = append(tsStrings, t.String()) + } + bStrings = append(bStrings, "["+strings.Join(tsStrings, ",")+"]") + } + + return "{" + strings.Join(bStrings, ";") + "}" +} + func (sbs *syncBucketSet) RelatedToAny(ts *types.TipSet) bool { for _, b := range sbs.buckets { if b.sameChainAs(ts) { @@ -198,13 +221,17 @@ func (sbs *syncBucketSet) removeBucket(toremove *syncTargetBucket) { } func (sbs *syncBucketSet) PopRelated(ts *types.TipSet) *syncTargetBucket { + var bOut *syncTargetBucket for _, b := range sbs.buckets { if b.sameChainAs(ts) { sbs.removeBucket(b) - return b + if bOut == nil { + bOut = &syncTargetBucket{} + } + bOut.tips = append(bOut.tips, b.tips...) } } - return nil + return bOut } func (sbs *syncBucketSet) Heaviest() *types.TipSet { @@ -224,8 +251,7 @@ func (sbs *syncBucketSet) Empty() bool { } type syncTargetBucket struct { - tips []*types.TipSet - count int + tips []*types.TipSet } func (stb *syncTargetBucket) sameChainAs(ts *types.TipSet) bool { @@ -239,12 +265,14 @@ func (stb *syncTargetBucket) sameChainAs(ts *types.TipSet) bool { if ts.Parents() == t.Key() { return true } + if coalesceForksParents && ts.Parents() == t.Parents() { + return true + } } return false } func (stb *syncTargetBucket) add(ts *types.TipSet) { - stb.count++ for _, t := range stb.tips { if t.Equals(ts) { @@ -294,7 +322,6 @@ func (sm *syncManager) selectSyncTarget() (*types.TipSet, error) { } func (sm *syncManager) syncScheduler() { - for { select { case ts, ok := <-sm.incomingTipSets: @@ -326,7 +353,8 @@ func (sm *syncManager) scheduleIncoming(ts *types.TipSet) { var relatedToActiveSync bool for _, acts := range sm.activeSyncs { if ts.Equals(acts) { - break + // ignore, we are already syncing it + return } if ts.Parents() == acts.Key() { @@ -376,7 +404,9 @@ func (sm *syncManager) scheduleProcessResult(res *syncResult) { sm.nextSyncTarget = relbucket sm.workerChan = sm.syncTargets } else { - sm.syncQueue.buckets = append(sm.syncQueue.buckets, relbucket) + for _, t := range relbucket.tips { + sm.syncQueue.Insert(t) + } } return } diff --git a/chain/sync_manager_test.go b/chain/sync_manager_test.go index 269b3a62e..709e03a41 100644 --- a/chain/sync_manager_test.go +++ b/chain/sync_manager_test.go @@ -67,6 +67,69 @@ func assertGetSyncOp(t *testing.T, c chan *syncOp, ts *types.TipSet) { } } +func TestSyncManagerEdgeCase(t *testing.T) { + ctx := context.Background() + + a := mock.TipSet(mock.MkBlock(genTs, 1, 1)) + t.Logf("a: %s", a) + b1 := mock.TipSet(mock.MkBlock(a, 1, 2)) + t.Logf("b1: %s", b1) + b2 := mock.TipSet(mock.MkBlock(a, 2, 3)) + t.Logf("b2: %s", b2) + c1 := mock.TipSet(mock.MkBlock(b1, 2, 4)) + t.Logf("c1: %s", c1) + c2 := mock.TipSet(mock.MkBlock(b2, 1, 5)) + t.Logf("c2: %s", c2) + d1 := mock.TipSet(mock.MkBlock(c1, 1, 6)) + t.Logf("d1: %s", d1) + e1 := mock.TipSet(mock.MkBlock(d1, 1, 7)) + t.Logf("e1: %s", e1) + + runSyncMgrTest(t, "edgeCase", 1, func(t *testing.T, sm *syncManager, stc chan *syncOp) { + sm.SetPeerHead(ctx, "peer1", a) + assertGetSyncOp(t, stc, a) + + sm.SetPeerHead(ctx, "peer1", b1) + sm.SetPeerHead(ctx, "peer1", b2) + // b1 and b2 are being processed + + b1op := <-stc + b2op := <-stc + if !b1op.ts.Equals(b1) { + b1op, b2op = b2op, b1op + } + + sm.SetPeerHead(ctx, "peer2", c2) // c2 is put into activeSyncTips at index 0 + sm.SetPeerHead(ctx, "peer2", c1) // c1 is put into activeSyncTips at index 1 + sm.SetPeerHead(ctx, "peer3", b2) // b2 is related to c2 and even though it is actively synced it is put into activeSyncTips index 0 + sm.SetPeerHead(ctx, "peer1", a) // a is related to b2 and is put into activeSyncTips index 0 + + b1op.done() // b1 completes first, is related to a, so it pops activeSyncTips index 0 + // even though correct one is index 1 + + b2op.done() + // b2 completes and is not related to c1, so it leaves activeSyncTips as it is + + waitUntilAllWorkersAreDone(stc) + + if len(sm.activeSyncTips.buckets) != 0 { + t.Errorf("activeSyncTips expected empty but got: %s", sm.activeSyncTips.String()) + } + }) +} + +func waitUntilAllWorkersAreDone(stc chan *syncOp) { + for i := 0; i < 10; { + select { + case so := <-stc: + so.done() + default: + i++ + time.Sleep(10 * time.Millisecond) + } + } +} + func TestSyncManager(t *testing.T) { ctx := context.Background() diff --git a/chain/sync_test.go b/chain/sync_test.go index 0a8174c41..559a73bf5 100644 --- a/chain/sync_test.go +++ b/chain/sync_test.go @@ -7,8 +7,6 @@ import ( "testing" "time" - "github.com/filecoin-project/specs-actors/actors/runtime/proof" - "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" @@ -20,6 +18,8 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/policy" @@ -469,8 +469,8 @@ func (wpp badWpp) GenerateCandidates(context.Context, abi.PoStRandomness, uint64 return []uint64{1}, nil } -func (wpp badWpp) ComputeProof(context.Context, []proof.SectorInfo, abi.PoStRandomness) ([]proof.PoStProof, error) { - return []proof.PoStProof{ +func (wpp badWpp) ComputeProof(context.Context, []proof2.SectorInfo, abi.PoStRandomness) ([]proof2.PoStProof, error) { + return []proof2.PoStProof{ { PoStProof: abi.RegisteredPoStProof_StackedDrgWinning2KiBV1, ProofBytes: []byte("evil"), diff --git a/chain/types/blockheader.go b/chain/types/blockheader.go index 0ec33fe42..4db6788e1 100644 --- a/chain/types/blockheader.go +++ b/chain/types/blockheader.go @@ -4,7 +4,7 @@ import ( "bytes" "math/big" - "github.com/filecoin-project/specs-actors/actors/runtime/proof" + proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" "github.com/minio/blake2b-simd" @@ -55,7 +55,7 @@ type BlockHeader struct { BeaconEntries []BeaconEntry // 3 - WinPoStProof []proof.PoStProof // 4 + WinPoStProof []proof2.PoStProof // 4 Parents []cid.Cid // 5 diff --git a/chain/types/blockheader_test.go b/chain/types/blockheader_test.go index f5faac3b3..6674f1205 100644 --- a/chain/types/blockheader_test.go +++ b/chain/types/blockheader_test.go @@ -7,7 +7,7 @@ import ( "reflect" "testing" - "github.com/filecoin-project/specs-actors/actors/runtime/proof" + proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" cid "github.com/ipfs/go-cid" "github.com/stretchr/testify/require" @@ -82,7 +82,7 @@ func TestInteropBH(t *testing.T) { t.Fatal(err) } - posts := []proof.PoStProof{ + posts := []proof2.PoStProof{ {PoStProof: abi.RegisteredPoStProof_StackedDrgWinning2KiBV1, ProofBytes: []byte{0x07}}, } diff --git a/chain/types/execresult.go b/chain/types/execresult.go index 6fc93fac6..917b84a92 100644 --- a/chain/types/execresult.go +++ b/chain/types/execresult.go @@ -3,6 +3,7 @@ package types import ( "encoding/json" "fmt" + "regexp" "runtime" "strings" "time" @@ -68,11 +69,10 @@ func (l Loc) String() string { return fmt.Sprintf("%s@%s:%d", fnpkg, file[len(file)-1], l.Line) } +var importantRegex = regexp.MustCompile(`github.com/filecoin-project/specs-actors/(v\d+/)?actors/builtin`) + func (l Loc) Important() bool { - if strings.HasPrefix(l.Function, "github.com/filecoin-project/specs-actors/actors/builtin") { - return true - } - return false + return importantRegex.MatchString(l.Function) } func (gt *GasTrace) MarshalJSON() ([]byte, error) { diff --git a/chain/types/fil.go b/chain/types/fil.go index 7eac8ce93..0ea77660c 100644 --- a/chain/types/fil.go +++ b/chain/types/fil.go @@ -81,5 +81,14 @@ func ParseFIL(s string) (FIL, error) { return FIL{r.Num()}, nil } +func MustParseFIL(s string) FIL { + n, err := ParseFIL(s) + if err != nil { + panic(err) + } + + return n +} + var _ encoding.TextMarshaler = (*FIL)(nil) var _ encoding.TextUnmarshaler = (*FIL)(nil) diff --git a/chain/types/keystore.go b/chain/types/keystore.go index 76eb5f296..107c1fbe3 100644 --- a/chain/types/keystore.go +++ b/chain/types/keystore.go @@ -1,7 +1,10 @@ package types import ( + "encoding/json" "fmt" + + "github.com/filecoin-project/go-state-types/crypto" ) var ( @@ -9,9 +12,50 @@ var ( ErrKeyExists = fmt.Errorf("key already exists") ) +// KeyType defines a type of a key +type KeyType string + +func (kt *KeyType) UnmarshalJSON(bb []byte) error { + { + // first option, try unmarshaling as string + var s string + err := json.Unmarshal(bb, &s) + if err == nil { + *kt = KeyType(s) + return nil + } + } + + { + var b byte + err := json.Unmarshal(bb, &b) + if err != nil { + return fmt.Errorf("could not unmarshal KeyType either as string nor integer: %w", err) + } + bst := crypto.SigType(b) + + switch bst { + case crypto.SigTypeBLS: + *kt = KTBLS + case crypto.SigTypeSecp256k1: + *kt = KTSecp256k1 + default: + return fmt.Errorf("unknown sigtype: %d", bst) + } + log.Warnf("deprecation: integer style 'KeyType' is deprecated, switch to string style") + return nil + } +} + +const ( + KTBLS KeyType = "bls" + KTSecp256k1 KeyType = "secp256k1" + KTSecp256k1Ledger KeyType = "secp256k1-ledger" +) + // KeyInfo is used for storing keys in KeyStore type KeyInfo struct { - Type string + Type KeyType PrivateKey []byte } diff --git a/chain/types/message.go b/chain/types/message.go index 4fead44bc..c53ecc7c1 100644 --- a/chain/types/message.go +++ b/chain/types/message.go @@ -2,6 +2,7 @@ package types import ( "bytes" + "encoding/json" "fmt" "github.com/filecoin-project/go-state-types/abi" @@ -106,6 +107,20 @@ func (m *Message) Cid() cid.Cid { return b.Cid() } +type mCid struct { + *RawMessage + CID cid.Cid +} + +type RawMessage Message + +func (m *Message) MarshalJSON() ([]byte, error) { + return json.Marshal(&mCid{ + RawMessage: (*RawMessage)(m), + CID: m.Cid(), + }) +} + func (m *Message) RequiredFunds() BigInt { return BigMul(m.GasFeeCap, NewInt(uint64(m.GasLimit))) } @@ -180,7 +195,7 @@ func (m *Message) ValidForBlockInclusion(minGas int64) error { // since prices might vary with time, this is technically semantic validation if m.GasLimit < minGas { - return xerrors.New("'GasLimit' field cannot be less than the cost of storing a message on chain") + return xerrors.Errorf("'GasLimit' field cannot be less than the cost of storing a message on chain %d < %d", m.GasLimit, minGas) } return nil diff --git a/chain/types/message_test.go b/chain/types/message_test.go index f57385a09..a5a00f66b 100644 --- a/chain/types/message_test.go +++ b/chain/types/message_test.go @@ -1,18 +1,23 @@ package types import ( + "encoding/json" + "fmt" "testing" "github.com/stretchr/testify/require" "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/specs-actors/actors/builtin" + "github.com/filecoin-project/go-state-types/crypto" + + // we can't import the actors shims from this package due to cyclic imports. + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" ) func TestEqualCall(t *testing.T) { m1 := &Message{ - To: builtin.StoragePowerActorAddr, - From: builtin.SystemActorAddr, + To: builtin2.StoragePowerActorAddr, + From: builtin2.SystemActorAddr, Nonce: 34, Value: big.Zero(), @@ -25,8 +30,8 @@ func TestEqualCall(t *testing.T) { } m2 := &Message{ - To: builtin.StoragePowerActorAddr, - From: builtin.SystemActorAddr, + To: builtin2.StoragePowerActorAddr, + From: builtin2.SystemActorAddr, Nonce: 34, Value: big.Zero(), @@ -39,8 +44,8 @@ func TestEqualCall(t *testing.T) { } m3 := &Message{ - To: builtin.StoragePowerActorAddr, - From: builtin.SystemActorAddr, + To: builtin2.StoragePowerActorAddr, + From: builtin2.SystemActorAddr, Nonce: 34, Value: big.Zero(), @@ -53,8 +58,8 @@ func TestEqualCall(t *testing.T) { } m4 := &Message{ - To: builtin.StoragePowerActorAddr, - From: builtin.SystemActorAddr, + To: builtin2.StoragePowerActorAddr, + From: builtin2.SystemActorAddr, Nonce: 34, Value: big.Zero(), @@ -70,3 +75,66 @@ func TestEqualCall(t *testing.T) { require.True(t, m1.EqualCall(m3)) require.False(t, m1.EqualCall(m4)) } + +func TestMessageJson(t *testing.T) { + m := &Message{ + To: builtin2.StoragePowerActorAddr, + From: builtin2.SystemActorAddr, + Nonce: 34, + Value: big.Zero(), + + GasLimit: 123, + GasFeeCap: big.NewInt(234), + GasPremium: big.NewInt(234), + + Method: 6, + Params: []byte("hai"), + } + + b, err := json.Marshal(m) + require.NoError(t, err) + + exp := []byte("{\"Version\":0,\"To\":\"f04\",\"From\":\"f00\",\"Nonce\":34,\"Value\":\"0\",\"GasLimit\":123,\"GasFeeCap\":\"234\",\"GasPremium\":\"234\",\"Method\":6,\"Params\":\"aGFp\",\"CID\":{\"/\":\"bafy2bzaced5rdpz57e64sc7mdwjn3blicglhpialnrph2dlbufhf6iha63dmc\"}}") + fmt.Println(string(b)) + + require.Equal(t, exp, b) + + var um Message + require.NoError(t, json.Unmarshal(b, &um)) + + require.EqualValues(t, *m, um) +} + +func TestSignedMessageJson(t *testing.T) { + m := Message{ + To: builtin2.StoragePowerActorAddr, + From: builtin2.SystemActorAddr, + Nonce: 34, + Value: big.Zero(), + + GasLimit: 123, + GasFeeCap: big.NewInt(234), + GasPremium: big.NewInt(234), + + Method: 6, + Params: []byte("hai"), + } + + sm := &SignedMessage{ + Message: m, + Signature: crypto.Signature{}, + } + + b, err := json.Marshal(sm) + require.NoError(t, err) + + exp := []byte("{\"Message\":{\"Version\":0,\"To\":\"f04\",\"From\":\"f00\",\"Nonce\":34,\"Value\":\"0\",\"GasLimit\":123,\"GasFeeCap\":\"234\",\"GasPremium\":\"234\",\"Method\":6,\"Params\":\"aGFp\",\"CID\":{\"/\":\"bafy2bzaced5rdpz57e64sc7mdwjn3blicglhpialnrph2dlbufhf6iha63dmc\"}},\"Signature\":{\"Type\":0,\"Data\":null},\"CID\":{\"/\":\"bafy2bzacea5ainifngxj3rygaw2hppnyz2cw72x5pysqty2x6dxmjs5qg2uus\"}}") + fmt.Println(string(b)) + + require.Equal(t, exp, b) + + var um SignedMessage + require.NoError(t, json.Unmarshal(b, &um)) + + require.EqualValues(t, *sm, um) +} diff --git a/chain/types/signedmessage.go b/chain/types/signedmessage.go index 17d2f5d94..c539ac240 100644 --- a/chain/types/signedmessage.go +++ b/chain/types/signedmessage.go @@ -2,6 +2,7 @@ package types import ( "bytes" + "encoding/json" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" @@ -62,8 +63,29 @@ func (sm *SignedMessage) Serialize() ([]byte, error) { return buf.Bytes(), nil } +type smCid struct { + *RawSignedMessage + CID cid.Cid +} + +type RawSignedMessage SignedMessage + +func (sm *SignedMessage) MarshalJSON() ([]byte, error) { + return json.Marshal(&smCid{ + RawSignedMessage: (*RawSignedMessage)(sm), + CID: sm.Cid(), + }) +} + func (sm *SignedMessage) ChainLength() int { - ser, err := sm.Serialize() + var ser []byte + var err error + if sm.Signature.Type == crypto.SigTypeBLS { + // BLS chain message length doesn't include signature + ser, err = sm.Message.Serialize() + } else { + ser, err = sm.Serialize() + } if err != nil { panic(err) } diff --git a/chain/vectors/gen/main.go b/chain/vectors/gen/main.go index 096548e04..757227d0d 100644 --- a/chain/vectors/gen/main.go +++ b/chain/vectors/gen/main.go @@ -11,7 +11,6 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/gen" "github.com/filecoin-project/lotus/chain/types" @@ -62,7 +61,7 @@ func MakeMessageSigningVectors() []vectors.MessageSigningVector { panic(err) } - blsk, err := w.WalletNew(context.Background(), crypto.SigTypeBLS) + blsk, err := w.WalletNew(context.Background(), types.KTBLS) if err != nil { panic(err) } @@ -86,7 +85,7 @@ func MakeMessageSigningVectors() []vectors.MessageSigningVector { Signature: &bmsg.Signature, } - secpk, err := w.WalletNew(context.Background(), crypto.SigTypeBLS) + secpk, err := w.WalletNew(context.Background(), types.KTBLS) if err != nil { panic(err) } diff --git a/chain/vm/gas.go b/chain/vm/gas.go index 6802013e5..95551f153 100644 --- a/chain/vm/gas.go +++ b/chain/vm/gas.go @@ -3,13 +3,13 @@ package vm import ( "fmt" - "github.com/filecoin-project/specs-actors/actors/runtime/proof" + vmr2 "github.com/filecoin-project/specs-actors/v2/actors/runtime" + proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" "github.com/filecoin-project/go-address" addr "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" - vmr "github.com/filecoin-project/specs-actors/actors/runtime" "github.com/ipfs/go-cid" ) @@ -78,8 +78,8 @@ type Pricelist interface { OnVerifySignature(sigType crypto.SigType, planTextSize int) (GasCharge, error) OnHashing(dataSize int) GasCharge OnComputeUnsealedSectorCid(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) GasCharge - OnVerifySeal(info proof.SealVerifyInfo) GasCharge - OnVerifyPost(info proof.WindowPoStVerifyInfo) GasCharge + OnVerifySeal(info proof2.SealVerifyInfo) GasCharge + OnVerifyPost(info proof2.WindowPoStVerifyInfo) GasCharge OnVerifyConsensusFault() GasCharge } @@ -126,6 +126,7 @@ var prices = map[abi.ChainEpoch]Pricelist{ scale: 85639, }, }, + verifyPostDiscount: true, verifyConsensusFault: 495422, }, } @@ -150,7 +151,7 @@ func PricelistByEpoch(epoch abi.ChainEpoch) Pricelist { } type pricedSyscalls struct { - under vmr.Syscalls + under vmr2.Syscalls pl Pricelist chargeGas func(GasCharge) } @@ -184,7 +185,7 @@ func (ps pricedSyscalls) ComputeUnsealedSectorCID(reg abi.RegisteredSealProof, p } // Verifies a sector seal proof. -func (ps pricedSyscalls) VerifySeal(vi proof.SealVerifyInfo) error { +func (ps pricedSyscalls) VerifySeal(vi proof2.SealVerifyInfo) error { ps.chargeGas(ps.pl.OnVerifySeal(vi)) defer ps.chargeGas(gasOnActorExec) @@ -192,7 +193,7 @@ func (ps pricedSyscalls) VerifySeal(vi proof.SealVerifyInfo) error { } // Verifies a proof of spacetime. -func (ps pricedSyscalls) VerifyPoSt(vi proof.WindowPoStVerifyInfo) error { +func (ps pricedSyscalls) VerifyPoSt(vi proof2.WindowPoStVerifyInfo) error { ps.chargeGas(ps.pl.OnVerifyPost(vi)) defer ps.chargeGas(gasOnActorExec) @@ -209,14 +210,14 @@ func (ps pricedSyscalls) VerifyPoSt(vi proof.WindowPoStVerifyInfo) error { // the "parent grinding fault", in which case it must be the sibling of h1 (same parent tipset) and one of the // blocks in the parent of h2 (i.e. h2's grandparent). // Returns nil and an error if the headers don't prove a fault. -func (ps pricedSyscalls) VerifyConsensusFault(h1 []byte, h2 []byte, extra []byte) (*vmr.ConsensusFault, error) { +func (ps pricedSyscalls) VerifyConsensusFault(h1 []byte, h2 []byte, extra []byte) (*vmr2.ConsensusFault, error) { ps.chargeGas(ps.pl.OnVerifyConsensusFault()) defer ps.chargeGas(gasOnActorExec) return ps.under.VerifyConsensusFault(h1, h2, extra) } -func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]proof.SealVerifyInfo) (map[address.Address][]bool, error) { +func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]proof2.SealVerifyInfo) (map[address.Address][]bool, error) { count := int64(0) for _, svis := range inp { count += int64(len(svis)) diff --git a/chain/vm/gas_v0.go b/chain/vm/gas_v0.go index bfb49c345..e4028039b 100644 --- a/chain/vm/gas_v0.go +++ b/chain/vm/gas_v0.go @@ -3,12 +3,13 @@ package vm import ( "fmt" - "github.com/filecoin-project/specs-actors/actors/runtime/proof" + proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" - builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + + "github.com/filecoin-project/lotus/chain/actors/builtin" ) type scalingCost struct { @@ -89,6 +90,7 @@ type pricelistV0 struct { computeUnsealedSectorCidBase int64 verifySealBase int64 verifyPostLookup map[abi.RegisteredPoStProof]scalingCost + verifyPostDiscount bool verifyConsensusFault int64 } @@ -112,14 +114,14 @@ func (pl *pricelistV0) OnMethodInvocation(value abi.TokenAmount, methodNum abi.M if big.Cmp(value, abi.NewTokenAmount(0)) != 0 { ret += pl.sendTransferFunds - if methodNum == builtin0.MethodSend { + if methodNum == builtin.MethodSend { // transfer only ret += pl.sendTransferOnlyPremium } extra += "t" } - if methodNum != builtin0.MethodSend { + if methodNum != builtin.MethodSend { extra += "i" // running actors is cheaper becase we hand over to actors ret += pl.sendInvokeMethod @@ -175,14 +177,14 @@ func (pl *pricelistV0) OnComputeUnsealedSectorCid(proofType abi.RegisteredSealPr } // OnVerifySeal -func (pl *pricelistV0) OnVerifySeal(info proof.SealVerifyInfo) GasCharge { +func (pl *pricelistV0) OnVerifySeal(info proof2.SealVerifyInfo) GasCharge { // TODO: this needs more cost tunning, check with @lotus // this is not used return newGasCharge("OnVerifySeal", pl.verifySealBase, 0) } // OnVerifyPost -func (pl *pricelistV0) OnVerifyPost(info proof.WindowPoStVerifyInfo) GasCharge { +func (pl *pricelistV0) OnVerifyPost(info proof2.WindowPoStVerifyInfo) GasCharge { sectorSize := "unknown" var proofType abi.RegisteredPoStProof @@ -200,7 +202,9 @@ func (pl *pricelistV0) OnVerifyPost(info proof.WindowPoStVerifyInfo) GasCharge { } gasUsed := cost.flat + int64(len(info.ChallengedSectors))*cost.scale - gasUsed /= 2 // XXX: this is an artificial discount + if pl.verifyPostDiscount { + gasUsed /= 2 // XXX: this is an artificial discount + } return newGasCharge("OnVerifyPost", gasUsed, 0). WithExtra(map[string]interface{}{ diff --git a/chain/vm/invoker_test.go b/chain/vm/invoker_test.go index 4005dd42f..bce385b02 100644 --- a/chain/vm/invoker_test.go +++ b/chain/vm/invoker_test.go @@ -5,16 +5,17 @@ import ( "io" "testing" - "github.com/filecoin-project/go-state-types/abi" - cbor "github.com/ipfs/go-ipld-cbor" "github.com/stretchr/testify/assert" cbg "github.com/whyrusleeping/cbor-gen" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/exitcode" + + runtime2 "github.com/filecoin-project/specs-actors/v2/actors/runtime" + "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/aerrors" - "github.com/filecoin-project/specs-actors/actors/runtime" ) type basicContract struct{} @@ -61,17 +62,17 @@ func (b basicContract) Exports() []interface{} { } } -func (basicContract) InvokeSomething0(rt runtime.Runtime, params *basicParams) *abi.EmptyValue { +func (basicContract) InvokeSomething0(rt runtime2.Runtime, params *basicParams) *abi.EmptyValue { rt.Abortf(exitcode.ExitCode(params.B), "params.B") return nil } -func (basicContract) BadParam(rt runtime.Runtime, params *basicParams) *abi.EmptyValue { +func (basicContract) BadParam(rt runtime2.Runtime, params *basicParams) *abi.EmptyValue { rt.Abortf(255, "bad params") return nil } -func (basicContract) InvokeSomething10(rt runtime.Runtime, params *basicParams) *abi.EmptyValue { +func (basicContract) InvokeSomething10(rt runtime2.Runtime, params *basicParams) *abi.EmptyValue { rt.Abortf(exitcode.ExitCode(params.B+10), "params.B") return nil } diff --git a/chain/vm/mkactor.go b/chain/vm/mkactor.go index 22a2acb8b..885d3c0db 100644 --- a/chain/vm/mkactor.go +++ b/chain/vm/mkactor.go @@ -15,6 +15,8 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/lotus/chain/actors/aerrors" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/builtin/account" "github.com/filecoin-project/lotus/chain/types" ) @@ -56,7 +58,7 @@ func TryCreateAccountActor(rt *Runtime, addr address.Address) (*types.Actor, add } // call constructor on account - _, aerr = rt.internalSend(builtin0.SystemActorAddr, addrID, builtin0.MethodsAccount.Constructor, big.Zero(), p) + _, aerr = rt.internalSend(builtin.SystemActorAddr, addrID, account.Methods.Constructor, big.Zero(), p) if aerr != nil { return nil, address.Undef, aerrors.Wrap(aerr, "failed to invoke account constructor") } diff --git a/chain/vm/runtime.go b/chain/vm/runtime.go index 8f124247c..6e36e8e87 100644 --- a/chain/vm/runtime.go +++ b/chain/vm/runtime.go @@ -53,8 +53,8 @@ func (m *Message) ValueReceived() abi.TokenAmount { var EnableGasTracing = false type Runtime struct { - rt0.Message - rt0.Syscalls + rt2.Message + rt2.Syscalls ctx context.Context @@ -72,6 +72,7 @@ type Runtime struct { originNonce uint64 executionTrace types.ExecutionTrace + depth uint64 numActorsCreated uint64 allowInternal bool callerValidated bool diff --git a/chain/vm/syscalls.go b/chain/vm/syscalls.go index a7f5dab0c..d2f1f77d3 100644 --- a/chain/vm/syscalls.go +++ b/chain/vm/syscalls.go @@ -7,8 +7,6 @@ import ( goruntime "runtime" "sync" - "github.com/filecoin-project/specs-actors/actors/runtime/proof" - "github.com/filecoin-project/go-address" "github.com/ipfs/go-cid" cbor "github.com/ipfs/go-ipld-cbor" @@ -23,7 +21,9 @@ import ( "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/lib/sigs" - "github.com/filecoin-project/specs-actors/actors/runtime" + + runtime2 "github.com/filecoin-project/specs-actors/v2/actors/runtime" + proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" ) @@ -34,15 +34,18 @@ func init() { // Actual type is defined in chain/types/vmcontext.go because the VMContext interface is there -type SyscallBuilder func(ctx context.Context, cstate *state.StateTree, cst cbor.IpldStore) runtime.Syscalls +type SyscallBuilder func(ctx context.Context, rt *Runtime) runtime2.Syscalls func Syscalls(verifier ffiwrapper.Verifier) SyscallBuilder { - return func(ctx context.Context, cstate *state.StateTree, cst cbor.IpldStore) runtime.Syscalls { + return func(ctx context.Context, rt *Runtime) runtime2.Syscalls { + return &syscallShim{ ctx: ctx, - cstate: cstate, - cst: cst, + actor: rt.Receiver(), + cstate: rt.state, + cst: rt.cst, + lbState: rt.vm.lbStateGet, verifier: verifier, } @@ -52,6 +55,8 @@ func Syscalls(verifier ffiwrapper.Verifier) SyscallBuilder { type syscallShim struct { ctx context.Context + lbState LookbackStateGetter + actor address.Address cstate *state.StateTree cst cbor.IpldStore verifier ffiwrapper.Verifier @@ -79,7 +84,7 @@ func (ss *syscallShim) HashBlake2b(data []byte) [32]byte { // Checks validity of the submitted consensus fault with the two block headers needed to prove the fault // and an optional extra one to check common ancestry (as needed). // Note that the blocks are ordered: the method requires a.Epoch() <= b.Epoch(). -func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime.ConsensusFault, error) { +func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime2.ConsensusFault, error) { // Note that block syntax is not validated. Any validly signed block will be accepted pursuant to the below conditions. // Whether or not it could ever have been accepted in a chain is not checked/does not matter here. // for that reason when checking block parent relationships, rather than instantiating a Tipset to do so @@ -115,14 +120,14 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime.Consen } // (2) check for the consensus faults themselves - var consensusFault *runtime.ConsensusFault + var consensusFault *runtime2.ConsensusFault // (a) double-fork mining fault if blockA.Height == blockB.Height { - consensusFault = &runtime.ConsensusFault{ + consensusFault = &runtime2.ConsensusFault{ Target: blockA.Miner, Epoch: blockB.Height, - Type: runtime.ConsensusFaultDoubleForkMining, + Type: runtime2.ConsensusFaultDoubleForkMining, } } @@ -130,10 +135,10 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime.Consen // strictly speaking no need to compare heights based on double fork mining check above, // but at same height this would be a different fault. if types.CidArrsEqual(blockA.Parents, blockB.Parents) && blockA.Height != blockB.Height { - consensusFault = &runtime.ConsensusFault{ + consensusFault = &runtime2.ConsensusFault{ Target: blockA.Miner, Epoch: blockB.Height, - Type: runtime.ConsensusFaultTimeOffsetMining, + Type: runtime2.ConsensusFaultTimeOffsetMining, } } @@ -153,10 +158,10 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime.Consen if types.CidArrsEqual(blockA.Parents, blockC.Parents) && blockA.Height == blockC.Height && types.CidArrsContains(blockB.Parents, blockC.Cid()) && !types.CidArrsContains(blockB.Parents, blockA.Cid()) { - consensusFault = &runtime.ConsensusFault{ + consensusFault = &runtime2.ConsensusFault{ Target: blockA.Miner, Epoch: blockB.Height, - Type: runtime.ConsensusFaultParentGrinding, + Type: runtime2.ConsensusFaultParentGrinding, } } } @@ -184,26 +189,7 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime.Consen } func (ss *syscallShim) VerifyBlockSig(blk *types.BlockHeader) error { - - // get appropriate miner actor - act, err := ss.cstate.GetActor(blk.Miner) - if err != nil { - return err - } - - // use that to get the miner state - mas, err := miner.Load(adt.WrapStore(ss.ctx, ss.cst), act) - if err != nil { - return err - } - - info, err := mas.Info() - if err != nil { - return err - } - - // and use to get resolved workerKey - waddr, err := ResolveToKeyAddr(ss.cstate, ss.cst, info.Worker) + waddr, err := ss.workerKeyAtLookback(blk.Height) if err != nil { return err } @@ -215,7 +201,32 @@ func (ss *syscallShim) VerifyBlockSig(blk *types.BlockHeader) error { return nil } -func (ss *syscallShim) VerifyPoSt(proof proof.WindowPoStVerifyInfo) error { +func (ss *syscallShim) workerKeyAtLookback(height abi.ChainEpoch) (address.Address, error) { + lbState, err := ss.lbState(ss.ctx, height) + if err != nil { + return address.Undef, err + } + // get appropriate miner actor + act, err := lbState.GetActor(ss.actor) + if err != nil { + return address.Undef, err + } + + // use that to get the miner state + mas, err := miner.Load(adt.WrapStore(ss.ctx, ss.cst), act) + if err != nil { + return address.Undef, err + } + + info, err := mas.Info() + if err != nil { + return address.Undef, err + } + + return ResolveToKeyAddr(ss.cstate, ss.cst, info.Worker) +} + +func (ss *syscallShim) VerifyPoSt(proof proof2.WindowPoStVerifyInfo) error { ok, err := ss.verifier.VerifyWindowPoSt(context.TODO(), proof) if err != nil { return err @@ -226,7 +237,7 @@ func (ss *syscallShim) VerifyPoSt(proof proof.WindowPoStVerifyInfo) error { return nil } -func (ss *syscallShim) VerifySeal(info proof.SealVerifyInfo) error { +func (ss *syscallShim) VerifySeal(info proof2.SealVerifyInfo) error { //_, span := trace.StartSpan(ctx, "ValidatePoRep") //defer span.End() @@ -266,7 +277,7 @@ func (ss *syscallShim) VerifySignature(sig crypto.Signature, addr address.Addres var BatchSealVerifyParallelism = goruntime.NumCPU() -func (ss *syscallShim) BatchVerifySeals(inp map[address.Address][]proof.SealVerifyInfo) (map[address.Address][]bool, error) { +func (ss *syscallShim) BatchVerifySeals(inp map[address.Address][]proof2.SealVerifyInfo) (map[address.Address][]bool, error) { out := make(map[address.Address][]bool) sema := make(chan struct{}, BatchSealVerifyParallelism) @@ -278,7 +289,7 @@ func (ss *syscallShim) BatchVerifySeals(inp map[address.Address][]proof.SealVeri for i, s := range seals { wg.Add(1) - go func(ma address.Address, ix int, svi proof.SealVerifyInfo, res []bool) { + go func(ma address.Address, ix int, svi proof2.SealVerifyInfo, res []bool) { defer wg.Done() sema <- struct{}{} diff --git a/chain/vm/vm.go b/chain/vm/vm.go index a4efccb29..8b7f78074 100644 --- a/chain/vm/vm.go +++ b/chain/vm/vm.go @@ -38,6 +38,8 @@ import ( "github.com/filecoin-project/lotus/lib/bufbstore" ) +const MaxCallDepth = 4096 + var log = logging.Logger("vm") var actorLog = logging.Logger("actors") var gasOnActorExec = newGasCharge("OnActorExec", 0, 0) @@ -97,33 +99,41 @@ func (bs *gasChargingBlocks) Put(blk block.Block) error { return nil } -func (vm *VM) makeRuntime(ctx context.Context, msg *types.Message, origin address.Address, originNonce uint64, usedGas int64, nac uint64) *Runtime { +func (vm *VM) makeRuntime(ctx context.Context, msg *types.Message, parent *Runtime) *Runtime { rt := &Runtime{ ctx: ctx, vm: vm, state: vm.cstate, - origin: origin, - originNonce: originNonce, + origin: msg.From, + originNonce: msg.Nonce, height: vm.blockHeight, - gasUsed: usedGas, + gasUsed: 0, gasAvailable: msg.GasLimit, - numActorsCreated: nac, + depth: 0, + numActorsCreated: 0, pricelist: PricelistByEpoch(vm.blockHeight), allowInternal: true, callerValidated: false, executionTrace: types.ExecutionTrace{Msg: msg}, } + if parent != nil { + rt.gasUsed = parent.gasUsed + rt.origin = parent.origin + rt.originNonce = parent.originNonce + rt.numActorsCreated = parent.numActorsCreated + rt.depth = parent.depth + 1 + } + + if rt.depth > MaxCallDepth && rt.NetworkVersion() >= network.Version6 { + rt.Abortf(exitcode.SysErrForbidden, "message execution exceeds call depth") + } + rt.cst = &cbor.BasicIpldStore{ Blocks: &gasChargingBlocks{rt.chargeGasFunc(2), rt.pricelist, vm.cst.Blocks}, Atlas: vm.cst.Atlas, } - rt.Syscalls = pricedSyscalls{ - under: vm.Syscalls(ctx, vm.cstate, rt.cst), - chargeGas: rt.chargeGasFunc(1), - pl: rt.pricelist, - } vmm := *msg resF, ok := rt.ResolveAddress(msg.From) @@ -141,6 +151,12 @@ func (vm *VM) makeRuntime(ctx context.Context, msg *types.Message, origin addres rt.Message = &Message{msg: vmm} } + rt.Syscalls = pricedSyscalls{ + under: vm.Syscalls(ctx, rt), + chargeGas: rt.chargeGasFunc(1), + pl: rt.pricelist, + } + return rt } @@ -148,12 +164,13 @@ type UnsafeVM struct { VM *VM } -func (vm *UnsafeVM) MakeRuntime(ctx context.Context, msg *types.Message, origin address.Address, originNonce uint64, usedGas int64, nac uint64) *Runtime { - return vm.VM.makeRuntime(ctx, msg, origin, originNonce, usedGas, nac) +func (vm *UnsafeVM) MakeRuntime(ctx context.Context, msg *types.Message) *Runtime { + return vm.VM.makeRuntime(ctx, msg, nil) } type CircSupplyCalculator func(context.Context, abi.ChainEpoch, *state.StateTree) (abi.TokenAmount, error) type NtwkVersionGetter func(context.Context, abi.ChainEpoch) network.Version +type LookbackStateGetter func(context.Context, abi.ChainEpoch) (*state.StateTree, error) type VM struct { cstate *state.StateTree @@ -166,6 +183,7 @@ type VM struct { circSupplyCalc CircSupplyCalculator ntwkVersion NtwkVersionGetter baseFee abi.TokenAmount + lbStateGet LookbackStateGetter Syscalls SyscallBuilder } @@ -179,6 +197,7 @@ type VMOpts struct { CircSupplyCalc CircSupplyCalculator NtwkVersion NtwkVersionGetter // TODO: stebalien: In what cases do we actually need this? It seems like even when creating new networks we want to use the 'global'/build-default version getter BaseFee abi.TokenAmount + LookbackState LookbackStateGetter } func NewVM(ctx context.Context, opts *VMOpts) (*VM, error) { @@ -201,6 +220,7 @@ func NewVM(ctx context.Context, opts *VMOpts) (*VM, error) { ntwkVersion: opts.NtwkVersion, Syscalls: opts.Syscalls, baseFee: opts.BaseFee, + lbStateGet: opts.LookbackState, }, nil } @@ -214,7 +234,7 @@ type ApplyRet struct { ActorErr aerrors.ActorError ExecutionTrace types.ExecutionTrace Duration time.Duration - GasCosts GasOutputs + GasCosts *GasOutputs } func (vm *VM) send(ctx context.Context, msg *types.Message, parent *Runtime, @@ -224,18 +244,7 @@ func (vm *VM) send(ctx context.Context, msg *types.Message, parent *Runtime, st := vm.cstate - origin := msg.From - on := msg.Nonce - var nac uint64 = 0 - var gasUsed int64 - if parent != nil { - gasUsed = parent.gasUsed - origin = parent.origin - on = parent.originNonce - nac = parent.numActorsCreated - } - - rt := vm.makeRuntime(ctx, msg, origin, on, gasUsed, nac) + rt := vm.makeRuntime(ctx, msg, parent) if EnableGasTracing { rt.lastGasChargeTime = start if parent != nil { @@ -361,7 +370,7 @@ func (vm *VM) ApplyImplicitMessage(ctx context.Context, msg *types.Message) (*Ap }, ActorErr: actorErr, ExecutionTrace: rt.executionTrace, - GasCosts: GasOutputs{}, + GasCosts: nil, Duration: time.Since(start), }, actorErr } @@ -397,7 +406,7 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, ExitCode: exitcode.SysErrOutOfGas, GasUsed: 0, }, - GasCosts: gasOutputs, + GasCosts: &gasOutputs, Duration: time.Since(start), }, nil } @@ -417,7 +426,7 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, GasUsed: 0, }, ActorErr: aerrors.Newf(exitcode.SysErrSenderInvalid, "actor not found: %s", msg.From), - GasCosts: gasOutputs, + GasCosts: &gasOutputs, Duration: time.Since(start), }, nil } @@ -434,7 +443,7 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, GasUsed: 0, }, ActorErr: aerrors.Newf(exitcode.SysErrSenderInvalid, "send from not account actor: %s", fromActor.Code), - GasCosts: gasOutputs, + GasCosts: &gasOutputs, Duration: time.Since(start), }, nil } @@ -450,7 +459,7 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, ActorErr: aerrors.Newf(exitcode.SysErrSenderStateInvalid, "actor nonce invalid: msg:%d != state:%d", msg.Nonce, fromActor.Nonce), - GasCosts: gasOutputs, + GasCosts: &gasOutputs, Duration: time.Since(start), }, nil } @@ -466,7 +475,7 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, }, ActorErr: aerrors.Newf(exitcode.SysErrSenderStateInvalid, "actor balance less than needed: %s < %s", types.FIL(fromActor.Balance), types.FIL(gascost)), - GasCosts: gasOutputs, + GasCosts: &gasOutputs, Duration: time.Since(start), }, nil } @@ -560,7 +569,7 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, }, ActorErr: actorErr, ExecutionTrace: rt.executionTrace, - GasCosts: gasOutputs, + GasCosts: &gasOutputs, Duration: time.Since(start), }, nil } diff --git a/chain/wallet/key.go b/chain/wallet/key.go index 4b746a17a..1b191cc4b 100644 --- a/chain/wallet/key.go +++ b/chain/wallet/key.go @@ -10,13 +10,17 @@ import ( "github.com/filecoin-project/lotus/lib/sigs" ) -func GenerateKey(typ crypto.SigType) (*Key, error) { - pk, err := sigs.Generate(typ) +func GenerateKey(typ types.KeyType) (*Key, error) { + ctyp := ActSigType(typ) + if ctyp == crypto.SigTypeUnknown { + return nil, xerrors.Errorf("unknown sig type: %s", typ) + } + pk, err := sigs.Generate(ctyp) if err != nil { return nil, err } ki := types.KeyInfo{ - Type: kstoreSigType(typ), + Type: typ, PrivateKey: pk, } return NewKey(ki) @@ -41,41 +45,30 @@ func NewKey(keyinfo types.KeyInfo) (*Key, error) { } switch k.Type { - case KTSecp256k1: + case types.KTSecp256k1: k.Address, err = address.NewSecp256k1Address(k.PublicKey) if err != nil { return nil, xerrors.Errorf("converting Secp256k1 to address: %w", err) } - case KTBLS: + case types.KTBLS: k.Address, err = address.NewBLSAddress(k.PublicKey) if err != nil { return nil, xerrors.Errorf("converting BLS to address: %w", err) } default: - return nil, xerrors.Errorf("unknown key type") + return nil, xerrors.Errorf("unsupported key type: %s", k.Type) } return k, nil } -func kstoreSigType(typ crypto.SigType) string { +func ActSigType(typ types.KeyType) crypto.SigType { switch typ { - case crypto.SigTypeBLS: - return KTBLS - case crypto.SigTypeSecp256k1: - return KTSecp256k1 - default: - return "" - } -} - -func ActSigType(typ string) crypto.SigType { - switch typ { - case KTBLS: + case types.KTBLS: return crypto.SigTypeBLS - case KTSecp256k1: + case types.KTSecp256k1: return crypto.SigTypeSecp256k1 default: - return 0 + return crypto.SigTypeUnknown } } diff --git a/chain/wallet/ledger/ledger.go b/chain/wallet/ledger/ledger.go new file mode 100644 index 000000000..07f92e7ff --- /dev/null +++ b/chain/wallet/ledger/ledger.go @@ -0,0 +1,242 @@ +package ledgerwallet + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/query" + logging "github.com/ipfs/go-log" + ledgerfil "github.com/whyrusleeping/ledger-filecoin-go" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/node/modules/dtypes" +) + +var log = logging.Logger("wallet-ledger") + +type LedgerWallet struct { + ds datastore.Datastore +} + +func NewWallet(ds dtypes.MetadataDS) *LedgerWallet { + return &LedgerWallet{ds} +} + +type LedgerKeyInfo struct { + Address address.Address + Path []uint32 +} + +var _ api.WalletAPI = (*LedgerWallet)(nil) + +func (lw LedgerWallet) WalletSign(ctx context.Context, signer address.Address, toSign []byte, meta api.MsgMeta) (*crypto.Signature, error) { + ki, err := lw.getKeyInfo(signer) + if err != nil { + return nil, err + } + + fl, err := ledgerfil.FindLedgerFilecoinApp() + if err != nil { + return nil, err + } + defer fl.Close() // nolint:errcheck + if meta.Type != api.MTChainMsg { + return nil, fmt.Errorf("ledger can only sign chain messages") + } + + { + var cmsg types.Message + if err := cmsg.UnmarshalCBOR(bytes.NewReader(meta.Extra)); err != nil { + return nil, xerrors.Errorf("unmarshalling message: %w", err) + } + + _, bc, err := cid.CidFromBytes(toSign) + if err != nil { + return nil, xerrors.Errorf("getting cid from signing bytes: %w", err) + } + + if !cmsg.Cid().Equals(bc) { + return nil, xerrors.Errorf("cid(meta.Extra).bytes() != toSign") + } + } + + sig, err := fl.SignSECP256K1(ki.Path, meta.Extra) + if err != nil { + return nil, err + } + + return &crypto.Signature{ + Type: crypto.SigTypeSecp256k1, + Data: sig.SignatureBytes(), + }, nil +} + +func (lw LedgerWallet) getKeyInfo(addr address.Address) (*LedgerKeyInfo, error) { + kib, err := lw.ds.Get(keyForAddr(addr)) + if err != nil { + return nil, err + } + + var out LedgerKeyInfo + if err := json.Unmarshal(kib, &out); err != nil { + return nil, xerrors.Errorf("unmarshalling ledger key info: %w", err) + } + + return &out, nil +} + +func (lw LedgerWallet) WalletDelete(ctx context.Context, k address.Address) error { + return lw.ds.Delete(keyForAddr(k)) +} + +func (lw LedgerWallet) WalletExport(ctx context.Context, k address.Address) (*types.KeyInfo, error) { + return nil, fmt.Errorf("cannot export keys from ledger wallets") +} + +func (lw LedgerWallet) WalletHas(ctx context.Context, k address.Address) (bool, error) { + _, err := lw.ds.Get(keyForAddr(k)) + if err == nil { + return true, nil + } + if err == datastore.ErrNotFound { + return false, nil + } + return false, err +} + +func (lw LedgerWallet) WalletImport(ctx context.Context, kinfo *types.KeyInfo) (address.Address, error) { + var ki LedgerKeyInfo + if err := json.Unmarshal(kinfo.PrivateKey, &ki); err != nil { + return address.Undef, err + } + return lw.importKey(ki) +} + +func (lw LedgerWallet) importKey(ki LedgerKeyInfo) (address.Address, error) { + if ki.Address == address.Undef { + return address.Undef, fmt.Errorf("no address given in imported key info") + } + if len(ki.Path) != filHdPathLen { + return address.Undef, fmt.Errorf("bad hd path len: %d, expected: %d", len(ki.Path), filHdPathLen) + } + bb, err := json.Marshal(ki) + if err != nil { + return address.Undef, xerrors.Errorf("marshaling key info: %w", err) + } + + if err := lw.ds.Put(keyForAddr(ki.Address), bb); err != nil { + return address.Undef, err + } + + return ki.Address, nil +} + +func (lw LedgerWallet) WalletList(ctx context.Context) ([]address.Address, error) { + res, err := lw.ds.Query(query.Query{Prefix: dsLedgerPrefix}) + if err != nil { + return nil, err + } + defer res.Close() // nolint:errcheck + + var out []address.Address + for { + res, ok := res.NextSync() + if !ok { + break + } + + var ki LedgerKeyInfo + if err := json.Unmarshal(res.Value, &ki); err != nil { + return nil, err + } + + out = append(out, ki.Address) + } + return out, nil +} + +const hdHard = 0x80000000 + +var filHDBasePath = []uint32{hdHard | 44, hdHard | 461, hdHard, 0} +var filHdPathLen = 5 + +func (lw LedgerWallet) WalletNew(ctx context.Context, t types.KeyType) (address.Address, error) { + if t != types.KTSecp256k1Ledger { + return address.Undef, fmt.Errorf("unsupported key type: '%s', only '%s' supported", + t, types.KTSecp256k1Ledger) + } + + res, err := lw.ds.Query(query.Query{Prefix: dsLedgerPrefix}) + if err != nil { + return address.Undef, err + } + defer res.Close() // nolint:errcheck + + var maxi int64 = -1 + for { + res, ok := res.NextSync() + if !ok { + break + } + + var ki LedgerKeyInfo + if err := json.Unmarshal(res.Value, &ki); err != nil { + return address.Undef, err + } + if i := ki.Path[filHdPathLen-1]; maxi == -1 || maxi < int64(i) { + maxi = int64(i) + } + } + + fl, err := ledgerfil.FindLedgerFilecoinApp() + if err != nil { + return address.Undef, xerrors.Errorf("finding ledger: %w", err) + } + defer fl.Close() // nolint:errcheck + + path := append(append([]uint32(nil), filHDBasePath...), uint32(maxi+1)) + _, _, addr, err := fl.GetAddressPubKeySECP256K1(path) + if err != nil { + return address.Undef, xerrors.Errorf("getting public key from ledger: %w", err) + } + + log.Warnf("creating key: %s, accept the key in ledger device", addr) + _, _, addr, err = fl.ShowAddressPubKeySECP256K1(path) + if err != nil { + return address.Undef, xerrors.Errorf("verifying public key with ledger: %w", err) + } + + a, err := address.NewFromString(addr) + if err != nil { + return address.Undef, fmt.Errorf("parsing address: %w", err) + } + + var lki LedgerKeyInfo + lki.Address = a + lki.Path = path + + return lw.importKey(lki) +} + +func (lw *LedgerWallet) Get() api.WalletAPI { + if lw == nil { + return nil + } + + return lw +} + +var dsLedgerPrefix = "/ledgerkey/" + +func keyForAddr(addr address.Address) datastore.Key { + return datastore.NewKey(dsLedgerPrefix + addr.String()) +} diff --git a/chain/wallet/multi.go b/chain/wallet/multi.go new file mode 100644 index 000000000..532ad217b --- /dev/null +++ b/chain/wallet/multi.go @@ -0,0 +1,170 @@ +package wallet + +import ( + "context" + + "go.uber.org/fx" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/types" + ledgerwallet "github.com/filecoin-project/lotus/chain/wallet/ledger" + "github.com/filecoin-project/lotus/chain/wallet/remotewallet" +) + +type MultiWallet struct { + fx.In // "constructed" with fx.In instead of normal constructor + + Local *LocalWallet `optional:"true"` + Remote *remotewallet.RemoteWallet `optional:"true"` + Ledger *ledgerwallet.LedgerWallet `optional:"true"` +} + +type getif interface { + api.WalletAPI + + // workaround for the fact that iface(*struct(nil)) != nil + Get() api.WalletAPI +} + +func firstNonNil(wallets ...getif) api.WalletAPI { + for _, w := range wallets { + if w.Get() != nil { + return w + } + } + + return nil +} + +func nonNil(wallets ...getif) []api.WalletAPI { + var out []api.WalletAPI + for _, w := range wallets { + if w.Get() == nil { + continue + } + + out = append(out, w) + } + + return out +} + +func (m MultiWallet) find(ctx context.Context, address address.Address, wallets ...getif) (api.WalletAPI, error) { + ws := nonNil(wallets...) + + for _, w := range ws { + have, err := w.WalletHas(ctx, address) + if err != nil { + return nil, err + } + + if have { + return w, nil + } + } + + return nil, nil +} + +func (m MultiWallet) WalletNew(ctx context.Context, keyType types.KeyType) (address.Address, error) { + var local getif = m.Local + if keyType == types.KTSecp256k1Ledger { + local = m.Ledger + } + + w := firstNonNil(m.Remote, local) + if w == nil { + return address.Undef, xerrors.Errorf("no wallet backends supporting key type: %s", keyType) + } + + return w.WalletNew(ctx, keyType) +} + +func (m MultiWallet) WalletHas(ctx context.Context, address address.Address) (bool, error) { + w, err := m.find(ctx, address, m.Remote, m.Ledger, m.Local) + return w != nil, err +} + +func (m MultiWallet) WalletList(ctx context.Context) ([]address.Address, error) { + var out []address.Address + seen := map[address.Address]struct{}{} + + ws := nonNil(m.Remote, m.Ledger, m.Local) + for _, w := range ws { + l, err := w.WalletList(ctx) + if err != nil { + return nil, err + } + + for _, a := range l { + if _, ok := seen[a]; ok { + continue + } + seen[a] = struct{}{} + + out = append(out, a) + } + } + + return out, nil +} + +func (m MultiWallet) WalletSign(ctx context.Context, signer address.Address, toSign []byte, meta api.MsgMeta) (*crypto.Signature, error) { + w, err := m.find(ctx, signer, m.Remote, m.Ledger, m.Local) + if err != nil { + return nil, err + } + if w == nil { + return nil, xerrors.Errorf("key not found") + } + + return w.WalletSign(ctx, signer, toSign, meta) +} + +func (m MultiWallet) WalletExport(ctx context.Context, address address.Address) (*types.KeyInfo, error) { + w, err := m.find(ctx, address, m.Remote, m.Local) + if err != nil { + return nil, err + } + if w == nil { + return nil, xerrors.Errorf("key not found") + } + + return w.WalletExport(ctx, address) +} + +func (m MultiWallet) WalletImport(ctx context.Context, info *types.KeyInfo) (address.Address, error) { + var local getif = m.Local + if info.Type == types.KTSecp256k1Ledger { + local = m.Ledger + } + + w := firstNonNil(m.Remote, local) + if w == nil { + return address.Undef, xerrors.Errorf("no wallet backends configured") + } + + return w.WalletImport(ctx, info) +} + +func (m MultiWallet) WalletDelete(ctx context.Context, address address.Address) error { + for { + w, err := m.find(ctx, address, m.Remote, m.Ledger, m.Local) + if err != nil { + return err + } + if w == nil { + return nil + } + + if err := w.WalletDelete(ctx, address); err != nil { + return err + } + } +} + +var _ api.WalletAPI = MultiWallet{} diff --git a/chain/wallet/remotewallet/remote.go b/chain/wallet/remotewallet/remote.go index c7192f496..aa4427132 100644 --- a/chain/wallet/remotewallet/remote.go +++ b/chain/wallet/remotewallet/remote.go @@ -40,3 +40,11 @@ func SetupRemoteWallet(info string) func(mctx helpers.MetricsCtx, lc fx.Lifecycl return &RemoteWallet{wapi}, nil } } + +func (w *RemoteWallet) Get() api.WalletAPI { + if w == nil { + return nil + } + + return w +} diff --git a/chain/wallet/wallet.go b/chain/wallet/wallet.go index 57e63625b..33fa3135e 100644 --- a/chain/wallet/wallet.go +++ b/chain/wallet/wallet.go @@ -26,8 +26,6 @@ const ( KNamePrefix = "wallet-" KTrashPrefix = "trash-" KDefault = "default" - KTBLS = "bls" - KTSecp256k1 = "secp256k1" ) type LocalWallet struct { @@ -236,7 +234,7 @@ func (w *LocalWallet) SetDefault(a address.Address) error { return nil } -func (w *LocalWallet) WalletNew(ctx context.Context, typ crypto.SigType) (address.Address, error) { +func (w *LocalWallet) WalletNew(ctx context.Context, typ types.KeyType) (address.Address, error) { w.lk.Lock() defer w.lk.Unlock() @@ -274,6 +272,7 @@ func (w *LocalWallet) WalletHas(ctx context.Context, addr address.Address) (bool func (w *LocalWallet) WalletDelete(ctx context.Context, addr address.Address) error { k, err := w.findKey(addr) + if err != nil { return xerrors.Errorf("failed to delete key %s : %w", addr, err) } @@ -281,6 +280,13 @@ func (w *LocalWallet) WalletDelete(ctx context.Context, addr address.Address) er return nil // already not there } + w.lk.Lock() + defer w.lk.Unlock() + + if err := w.keystore.Delete(KTrashPrefix + k.Address.String()); err != nil && !xerrors.Is(err, types.ErrKeyInfoNotFound) { + return xerrors.Errorf("failed to purge trashed key %s: %w", addr, err) + } + if err := w.keystore.Put(KTrashPrefix+k.Address.String(), k.KeyInfo); err != nil { return xerrors.Errorf("failed to mark key %s as trashed: %w", addr, err) } @@ -297,9 +303,19 @@ func (w *LocalWallet) WalletDelete(ctx context.Context, addr address.Address) er // TODO: Does this always error in the not-found case? Just ignoring an error return for now. _ = w.keystore.Delete(KNamePrefix + tAddr) + delete(w.keys, addr) + return nil } +func (w *LocalWallet) Get() api.WalletAPI { + if w == nil { + return nil + } + + return w +} + var _ api.WalletAPI = &LocalWallet{} func swapMainnetForTestnetPrefix(addr string) (string, error) { @@ -312,3 +328,16 @@ func swapMainnetForTestnetPrefix(addr string) (string, error) { aChars[0] = prefixRunes[0] return string(aChars), nil } + +type nilDefault struct{} + +func (n nilDefault) GetDefault() (address.Address, error) { + return address.Undef, nil +} + +func (n nilDefault) SetDefault(a address.Address) error { + return xerrors.Errorf("not supported; local wallet disabled") +} + +var NilDefault nilDefault +var _ Default = NilDefault diff --git a/cli/chain.go b/cli/chain.go index 763752f23..e2d0ebb4a 100644 --- a/cli/chain.go +++ b/cli/chain.go @@ -3,6 +3,7 @@ package cli import ( "bytes" "context" + "encoding/hex" "encoding/json" "fmt" "os" @@ -53,6 +54,7 @@ var chainCmd = &cli.Command{ slashConsensusFault, chainGasPriceCmd, chainInspectUsage, + chainDecodeCmd, }, } @@ -521,8 +523,9 @@ var chainInspectUsage = &cli.Command{ } var chainListCmd = &cli.Command{ - Name: "list", - Usage: "View a segment of the chain", + Name: "list", + Aliases: []string{"love"}, + Usage: "View a segment of the chain", Flags: []cli.Flag{ &cli.Uint64Flag{Name: "height"}, &cli.IntFlag{Name: "count", Value: 30}, @@ -1233,3 +1236,68 @@ var chainGasPriceCmd = &cli.Command{ return nil }, } + +var chainDecodeCmd = &cli.Command{ + Name: "decode", + Usage: "decode various types", + Subcommands: []*cli.Command{ + chainDecodeParamsCmd, + }, +} + +var chainDecodeParamsCmd = &cli.Command{ + Name: "params", + Usage: "Decode message params", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "tipset", + }, + }, + ArgsUsage: "[toAddr method hexParams]", + Action: func(cctx *cli.Context) error { + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + + if cctx.Args().Len() != 3 { + return ShowHelp(cctx, fmt.Errorf("incorrect number of arguments")) + } + + to, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return xerrors.Errorf("parsing toAddr: %w", err) + } + + method, err := strconv.ParseInt(cctx.Args().Get(1), 10, 64) + if err != nil { + return xerrors.Errorf("parsing method id: %w", err) + } + + params, err := hex.DecodeString(cctx.Args().Get(2)) + if err != nil { + return xerrors.Errorf("parsing hex params: %w", err) + } + + ts, err := LoadTipSet(ctx, cctx, api) + if err != nil { + return err + } + + act, err := api.StateGetActor(ctx, to, ts.Key()) + if err != nil { + return xerrors.Errorf("getting actor: %w", err) + } + + pstr, err := JsonParams(act.Code, abi.MethodNum(method), params) + if err != nil { + return err + } + + fmt.Println(pstr) + + return nil + }, +} diff --git a/cli/client.go b/cli/client.go index 34d151ace..07e3cb2c8 100644 --- a/cli/client.go +++ b/cli/client.go @@ -1,23 +1,29 @@ package cli import ( + "bufio" "context" "encoding/json" + "errors" "fmt" "io" + "math/rand" "os" "path/filepath" "sort" "strconv" + "strings" + "sync" + "sync/atomic" "text/tabwriter" "time" tm "github.com/buger/goterm" + "github.com/chzyer/readline" "github.com/docker/go-units" "github.com/fatih/color" datatransfer "github.com/filecoin-project/go-data-transfer" "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/specs-actors/actors/builtin" "github.com/ipfs/go-cid" "github.com/ipfs/go-cidutil/cidenc" "github.com/libp2p/go-libp2p-core/peer" @@ -34,6 +40,7 @@ import ( "github.com/filecoin-project/lotus/api" lapi "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin/market" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/lib/tablewriter" @@ -73,15 +80,19 @@ var clientCmd = &cli.Command{ WithCategory("storage", clientQueryAskCmd), WithCategory("storage", clientListDeals), WithCategory("storage", clientGetDealCmd), + WithCategory("storage", clientListAsksCmd), WithCategory("data", clientImportCmd), WithCategory("data", clientDropCmd), WithCategory("data", clientLocalCmd), + WithCategory("data", clientStat), WithCategory("retrieval", clientFindCmd), WithCategory("retrieval", clientRetrieveCmd), WithCategory("util", clientCommPCmd), WithCategory("util", clientCarGenCmd), WithCategory("util", clientInfoCmd), WithCategory("util", clientListTransfers), + WithCategory("util", clientRestartTransfer), + WithCategory("util", clientCancelTransfer), }, } @@ -334,6 +345,7 @@ var clientDealCmd = &cli.Command{ } defer closer() ctx := ReqContext(cctx) + afmt := NewAppFmt(cctx.App) if cctx.NArg() != 4 { return xerrors.New("expected 4 args: dataCid, miner, price, duration") @@ -453,7 +465,7 @@ var clientDealCmd = &cli.Command{ return err } - fmt.Println(encoder.Encode(*proposal)) + afmt.Println(encoder.Encode(*proposal)) return nil }, @@ -466,16 +478,27 @@ func interactiveDeal(cctx *cli.Context) error { } defer closer() ctx := ReqContext(cctx) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + afmt := NewAppFmt(cctx.App) state := "import" + gib := types.NewInt(1 << 30) var data cid.Cid var days int - var maddr address.Address - var ask storagemarket.StorageAsk - var epochPrice big.Int + var maddrs []address.Address + var ask []storagemarket.StorageAsk + var epochPrices []big.Int + var dur time.Duration var epochs abi.ChainEpoch var verified bool + var ds lapi.DataCIDSize + + // find + var candidateAsks []*storagemarket.StorageAsk + var budget types.FIL + var dealCount int64 var a address.Address if from := cctx.String("from"); from != "" { @@ -492,10 +515,24 @@ func interactiveDeal(cctx *cli.Context) error { a = def } - printErr := func(err error) { - fmt.Printf("%s %s\n", color.RedString("Error:"), err.Error()) + fromBal, err := api.WalletBalance(ctx, a) + if err != nil { + return xerrors.Errorf("checking from address balance: %w", err) } + printErr := func(err error) { + afmt.Printf("%s %s\n", color.RedString("Error:"), err.Error()) + } + + cs := readline.NewCancelableStdin(afmt.Stdin) + go func() { + <-ctx.Done() + cs.Close() // nolint:errcheck + }() + + rl := bufio.NewReader(cs) + +uiLoop: for { // TODO: better exit handling if err := ctx.Err(); err != nil { @@ -504,10 +541,10 @@ func interactiveDeal(cctx *cli.Context) error { switch state { case "import": - fmt.Print("Data CID (from " + color.YellowString("lotus client import") + "): ") + afmt.Print("Data CID (from " + color.YellowString("lotus client import") + "): ") - var cidStr string - _, err := fmt.Scan(&cidStr) + _cidStr, _, err := rl.ReadLine() + cidStr := string(_cidStr) if err != nil { printErr(xerrors.Errorf("reading cid string: %w", err)) continue @@ -519,11 +556,23 @@ func interactiveDeal(cctx *cli.Context) error { continue } + color.Blue(".. calculating data size\n") + ds, err = api.ClientDealPieceCID(ctx, data) + if err != nil { + return err + } + state = "duration" case "duration": - fmt.Print("Deal duration (days): ") + afmt.Print("Deal duration (days): ") - _, err := fmt.Scan(&days) + _daystr, _, err := rl.ReadLine() + daystr := string(_daystr) + if err != nil { + return err + } + + _, err = fmt.Sscan(daystr, &days) if err != nil { printErr(xerrors.Errorf("parsing duration: %w", err)) continue @@ -534,44 +583,9 @@ func interactiveDeal(cctx *cli.Context) error { continue } - state = "miner" - case "miner": - fmt.Print("Miner Address (f0..): ") - var maddrStr string + dur = 24 * time.Hour * time.Duration(days) + epochs = abi.ChainEpoch(dur / (time.Duration(build.BlockDelaySecs) * time.Second)) - _, err := fmt.Scan(&maddrStr) - if err != nil { - printErr(xerrors.Errorf("reading miner address: %w", err)) - continue - } - - maddr, err = address.NewFromString(maddrStr) - if err != nil { - printErr(xerrors.Errorf("parsing miner address: %w", err)) - continue - } - - state = "query" - case "query": - color.Blue(".. querying miner ask") - - mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) - if err != nil { - printErr(xerrors.Errorf("failed to get peerID for miner: %w", err)) - state = "miner" - continue - } - - a, err := api.ClientQueryAsk(ctx, *mi.PeerId, maddr) - if err != nil { - printErr(xerrors.Errorf("failed to query ask: %w", err)) - state = "miner" - continue - } - - ask = *a - - // TODO: run more validation state = "verified" case "verified": ts, err := api.ChainHead(ctx) @@ -585,26 +599,20 @@ func interactiveDeal(cctx *cli.Context) error { } if dcap == nil { - state = "confirm" + state = "miner" continue } - color.Blue(".. checking verified deal eligibility\n") - ds, err := api.ClientDealSize(ctx, data) - if err != nil { - return err - } - if dcap.Uint64() < uint64(ds.PieceSize) { color.Yellow(".. not enough DataCap available for a verified deal\n") - state = "confirm" + state = "miner" continue } - fmt.Print("\nMake this a verified deal? (yes/no): ") + afmt.Print("\nMake this a verified deal? (yes/no): ") - var yn string - _, err = fmt.Scan(&yn) + _yn, _, err := rl.ReadLine() + yn := string(_yn) if err != nil { return err } @@ -615,54 +623,208 @@ func interactiveDeal(cctx *cli.Context) error { case "no": verified = false default: - fmt.Println("Type in full 'yes' or 'no'") + afmt.Println("Type in full 'yes' or 'no'") continue } - state = "confirm" - case "confirm": - fromBal, err := api.WalletBalance(ctx, a) + state = "miner" + case "miner": + afmt.Print("Miner Addresses (f0.. f0..), none to find: ") + + _maddrsStr, _, err := rl.ReadLine() + maddrsStr := string(_maddrsStr) if err != nil { - return xerrors.Errorf("checking from address balance: %w", err) + printErr(xerrors.Errorf("reading miner address: %w", err)) + continue } - color.Blue(".. calculating data size\n") - ds, err := api.ClientDealSize(ctx, data) + for _, s := range strings.Fields(maddrsStr) { + maddr, err := address.NewFromString(strings.TrimSpace(s)) + if err != nil { + printErr(xerrors.Errorf("parsing miner address: %w", err)) + continue uiLoop + } + + maddrs = append(maddrs, maddr) + } + + state = "query" + if len(maddrs) == 0 { + state = "find" + } + case "find": + asks, err := getAsks(ctx, api) if err != nil { return err } - dur := 24 * time.Hour * time.Duration(days) - - epochs = abi.ChainEpoch(dur / (time.Duration(build.BlockDelaySecs) * time.Second)) - // TODO: do some more or epochs math (round to miner PP, deal start buffer) - - pricePerGib := ask.Price - if verified { - pricePerGib = ask.VerifiedPrice + for _, ask := range asks { + if ask.MinPieceSize > ds.PieceSize { + continue + } + if ask.MaxPieceSize < ds.PieceSize { + continue + } + candidateAsks = append(candidateAsks, ask) } - gib := types.NewInt(1 << 30) + afmt.Printf("Found %d candidate asks\n", len(candidateAsks)) + state = "find-budget" + case "find-budget": + afmt.Printf("Proposing from %s, Current Balance: %s\n", a, types.FIL(fromBal)) + afmt.Print("Maximum budget (FIL): ") // TODO: Propose some default somehow? + + _budgetStr, _, err := rl.ReadLine() + budgetStr := string(_budgetStr) + if err != nil { + printErr(xerrors.Errorf("reading miner address: %w", err)) + continue + } + + budget, err = types.ParseFIL(budgetStr) + if err != nil { + printErr(xerrors.Errorf("parsing FIL: %w", err)) + continue uiLoop + } + + var goodAsks []*storagemarket.StorageAsk + for _, ask := range candidateAsks { + p := ask.Price + if verified { + p = ask.VerifiedPrice + } + + epochPrice := types.BigDiv(types.BigMul(p, types.NewInt(uint64(ds.PieceSize))), gib) + totalPrice := types.BigMul(epochPrice, types.NewInt(uint64(epochs))) + + if totalPrice.LessThan(abi.TokenAmount(budget)) { + goodAsks = append(goodAsks, ask) + } + } + candidateAsks = goodAsks + afmt.Printf("%d asks within budget\n", len(candidateAsks)) + state = "find-count" + case "find-count": + afmt.Print("Deals to make (1): ") + dealcStr, _, err := rl.ReadLine() + if err != nil { + printErr(xerrors.Errorf("reading deal count: %w", err)) + continue + } + + dealCount, err = strconv.ParseInt(string(dealcStr), 10, 64) + if err != nil { + return err + } + + color.Blue(".. Picking miners") + + // TODO: some better strategy (this tries to pick randomly) + var pickedAsks []*storagemarket.StorageAsk + pickLoop: + for i := 0; i < 64; i++ { + rand.Shuffle(len(candidateAsks), func(i, j int) { + candidateAsks[i], candidateAsks[j] = candidateAsks[j], candidateAsks[i] + }) + + remainingBudget := abi.TokenAmount(budget) + pickedAsks = []*storagemarket.StorageAsk{} + + for _, ask := range candidateAsks { + p := ask.Price + if verified { + p = ask.VerifiedPrice + } + + epochPrice := types.BigDiv(types.BigMul(p, types.NewInt(uint64(ds.PieceSize))), gib) + totalPrice := types.BigMul(epochPrice, types.NewInt(uint64(epochs))) + + if totalPrice.GreaterThan(remainingBudget) { + continue + } + + pickedAsks = append(pickedAsks, ask) + remainingBudget = big.Sub(remainingBudget, totalPrice) + + if len(pickedAsks) == int(dealCount) { + break pickLoop + } + } + } + + for _, pickedAsk := range pickedAsks { + maddrs = append(maddrs, pickedAsk.Miner) + ask = append(ask, *pickedAsk) + } + + state = "confirm" + case "query": + color.Blue(".. querying miner asks") + + for _, maddr := range maddrs { + mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + printErr(xerrors.Errorf("failed to get peerID for miner: %w", err)) + state = "miner" + continue uiLoop + } + + a, err := api.ClientQueryAsk(ctx, *mi.PeerId, maddr) + if err != nil { + printErr(xerrors.Errorf("failed to query ask: %w", err)) + state = "miner" + continue uiLoop + } + + ask = append(ask, *a) + } + + // TODO: run more validation + state = "confirm" + case "confirm": + // TODO: do some more or epochs math (round to miner PP, deal start buffer) + + afmt.Printf("-----\n") + afmt.Printf("Proposing from %s\n", a) + afmt.Printf("\tBalance: %s\n", types.FIL(fromBal)) + afmt.Printf("\n") + afmt.Printf("Piece size: %s (Payload size: %s)\n", units.BytesSize(float64(ds.PieceSize)), units.BytesSize(float64(ds.PayloadSize))) + afmt.Printf("Duration: %s\n", dur) + + pricePerGib := big.Zero() + for _, a := range ask { + p := a.Price + if verified { + p = a.VerifiedPrice + } + pricePerGib = big.Add(pricePerGib, p) + epochPrice := types.BigDiv(types.BigMul(p, types.NewInt(uint64(ds.PieceSize))), gib) + epochPrices = append(epochPrices, epochPrice) + + mpow, err := api.StateMinerPower(ctx, a.Miner, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting power (%s): %w", a.Miner, err) + } + + if len(ask) > 1 { + totalPrice := types.BigMul(epochPrice, types.NewInt(uint64(epochs))) + afmt.Printf("Miner %s (Power:%s) price: ~%s (%s per epoch)\n", color.YellowString(a.Miner.String()), color.GreenString(types.SizeStr(mpow.MinerPower.QualityAdjPower)), color.BlueString(types.FIL(totalPrice).String()), types.FIL(epochPrice)) + } + } // TODO: price is based on PaddedPieceSize, right? - epochPrice = types.BigDiv(types.BigMul(pricePerGib, types.NewInt(uint64(ds.PieceSize))), gib) + epochPrice := types.BigDiv(types.BigMul(pricePerGib, types.NewInt(uint64(ds.PieceSize))), gib) totalPrice := types.BigMul(epochPrice, types.NewInt(uint64(epochs))) - fmt.Printf("-----\n") - fmt.Printf("Proposing from %s\n", a) - fmt.Printf("\tBalance: %s\n", types.FIL(fromBal)) - fmt.Printf("\n") - fmt.Printf("Piece size: %s (Payload size: %s)\n", units.BytesSize(float64(ds.PieceSize)), units.BytesSize(float64(ds.PayloadSize))) - fmt.Printf("Duration: %s\n", dur) - fmt.Printf("Total price: ~%s (%s per epoch)\n", types.FIL(totalPrice), types.FIL(epochPrice)) - fmt.Printf("Verified: %v\n", verified) + afmt.Printf("Total price: ~%s (%s per epoch)\n", color.CyanString(types.FIL(totalPrice).String()), types.FIL(epochPrice)) + afmt.Printf("Verified: %v\n", verified) state = "accept" case "accept": - fmt.Print("\nAccept (yes/no): ") + afmt.Print("\nAccept (yes/no): ") - var yn string - _, err := fmt.Scan(&yn) + _yn, _, err := rl.ReadLine() + yn := string(_yn) if err != nil { return err } @@ -672,36 +834,43 @@ func interactiveDeal(cctx *cli.Context) error { } if yn != "yes" { - fmt.Println("Type in full 'yes' or 'no'") + afmt.Println("Type in full 'yes' or 'no'") continue } state = "execute" case "execute": - color.Blue(".. executing") - proposal, err := api.ClientStartDeal(ctx, &lapi.StartDealParams{ - Data: &storagemarket.DataRef{ - TransferType: storagemarket.TTGraphsync, - Root: data, - }, - Wallet: a, - Miner: maddr, - EpochPrice: epochPrice, - MinBlocksDuration: uint64(epochs), - DealStartEpoch: abi.ChainEpoch(cctx.Int64("start-epoch")), - FastRetrieval: cctx.Bool("fast-retrieval"), - VerifiedDeal: verified, - }) - if err != nil { - return err + color.Blue(".. executing\n") + + for i, maddr := range maddrs { + proposal, err := api.ClientStartDeal(ctx, &lapi.StartDealParams{ + Data: &storagemarket.DataRef{ + TransferType: storagemarket.TTGraphsync, + Root: data, + + PieceCid: &ds.PieceCID, + PieceSize: ds.PieceSize.Unpadded(), + }, + Wallet: a, + Miner: maddr, + EpochPrice: epochPrices[i], + MinBlocksDuration: uint64(epochs), + DealStartEpoch: abi.ChainEpoch(cctx.Int64("start-epoch")), + FastRetrieval: cctx.Bool("fast-retrieval"), + VerifiedDeal: verified, + }) + if err != nil { + return err + } + + encoder, err := GetCidEncoder(cctx) + if err != nil { + return err + } + + afmt.Printf("Deal (%s) CID: %s\n", maddr, color.GreenString(encoder.Encode(*proposal))) } - encoder, err := GetCidEncoder(cctx) - if err != nil { - return err - } - - fmt.Println("\nDeal CID:", color.GreenString(encoder.Encode(*proposal))) return nil default: return xerrors.Errorf("unknown state: %s", state) @@ -813,6 +982,7 @@ var clientRetrieveCmd = &cli.Command{ } defer closer() ctx := ReqContext(cctx) + afmt := NewAppFmt(cctx.App) var payer address.Address if cctx.String("from") != "" { @@ -921,14 +1091,14 @@ var clientRetrieveCmd = &cli.Command{ select { case evt, ok := <-updates: if ok { - fmt.Printf("> Recv: %s, Paid %s, %s (%s)\n", + afmt.Printf("> Recv: %s, Paid %s, %s (%s)\n", types.SizeStr(types.NewInt(evt.BytesReceived)), types.FIL(evt.FundsSpent), retrievalmarket.ClientEvents[evt.Event], retrievalmarket.DealStatuses[evt.Status], ) } else { - fmt.Println("Success") + afmt.Println("Success") return nil } @@ -942,6 +1112,152 @@ var clientRetrieveCmd = &cli.Command{ }, } +var clientListAsksCmd = &cli.Command{ + Name: "list-asks", + Usage: "List asks for top miners", + Action: func(cctx *cli.Context) error { + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + + asks, err := getAsks(ctx, api) + if err != nil { + return err + } + + for _, ask := range asks { + fmt.Printf("%s: min:%s max:%s price:%s/GiB/Epoch verifiedPrice:%s/GiB/Epoch\n", ask.Miner, + types.SizeStr(types.NewInt(uint64(ask.MinPieceSize))), + types.SizeStr(types.NewInt(uint64(ask.MaxPieceSize))), + types.FIL(ask.Price), + types.FIL(ask.VerifiedPrice), + ) + } + + return nil + }, +} + +func getAsks(ctx context.Context, api lapi.FullNode) ([]*storagemarket.StorageAsk, error) { + color.Blue(".. getting miner list") + miners, err := api.StateListMiners(ctx, types.EmptyTSK) + if err != nil { + return nil, xerrors.Errorf("getting miner list: %w", err) + } + + var lk sync.Mutex + var found int64 + var withMinPower []address.Address + done := make(chan struct{}) + + go func() { + defer close(done) + + var wg sync.WaitGroup + wg.Add(len(miners)) + + throttle := make(chan struct{}, 50) + for _, miner := range miners { + throttle <- struct{}{} + go func(miner address.Address) { + defer wg.Done() + defer func() { + <-throttle + }() + + power, err := api.StateMinerPower(ctx, miner, types.EmptyTSK) + if err != nil { + return + } + + if power.HasMinPower { // TODO: Lower threshold + atomic.AddInt64(&found, 1) + lk.Lock() + withMinPower = append(withMinPower, miner) + lk.Unlock() + } + }(miner) + } + }() + +loop: + for { + select { + case <-time.After(150 * time.Millisecond): + fmt.Printf("\r* Found %d miners with power", atomic.LoadInt64(&found)) + case <-done: + break loop + } + } + fmt.Printf("\r* Found %d miners with power\n", atomic.LoadInt64(&found)) + + color.Blue(".. querying asks") + + var asks []*storagemarket.StorageAsk + var queried, got int64 + + done = make(chan struct{}) + go func() { + defer close(done) + + var wg sync.WaitGroup + wg.Add(len(withMinPower)) + + throttle := make(chan struct{}, 50) + for _, miner := range withMinPower { + throttle <- struct{}{} + go func(miner address.Address) { + defer wg.Done() + defer func() { + <-throttle + atomic.AddInt64(&queried, 1) + }() + + ctx, cancel := context.WithTimeout(ctx, 4*time.Second) + defer cancel() + + mi, err := api.StateMinerInfo(ctx, miner, types.EmptyTSK) + if err != nil { + return + } + if mi.PeerId == nil { + return + } + + ask, err := api.ClientQueryAsk(ctx, *mi.PeerId, miner) + if err != nil { + return + } + + atomic.AddInt64(&got, 1) + lk.Lock() + asks = append(asks, ask) + lk.Unlock() + }(miner) + } + }() + +loop2: + for { + select { + case <-time.After(150 * time.Millisecond): + fmt.Printf("\r* Queried %d asks, got %d responses", atomic.LoadInt64(&queried), atomic.LoadInt64(&got)) + case <-done: + break loop2 + } + } + fmt.Printf("\r* Queried %d asks, got %d responses\n", atomic.LoadInt64(&queried), atomic.LoadInt64(&got)) + + sort.Slice(asks, func(i, j int) bool { + return asks[i].Price.LessThan(asks[j].Price) + }) + + return asks, nil +} + var clientQueryAskCmd = &cli.Command{ Name: "query-ask", Usage: "Find a miners ask", @@ -961,8 +1277,9 @@ var clientQueryAskCmd = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { + afmt := NewAppFmt(cctx.App) if cctx.NArg() != 1 { - fmt.Println("Usage: query-ask [minerAddress]") + afmt.Println("Usage: query-ask [minerAddress]") return nil } @@ -1003,23 +1320,23 @@ var clientQueryAskCmd = &cli.Command{ return err } - fmt.Printf("Ask: %s\n", maddr) - fmt.Printf("Price per GiB: %s\n", types.FIL(ask.Price)) - fmt.Printf("Verified Price per GiB: %s\n", types.FIL(ask.VerifiedPrice)) - fmt.Printf("Max Piece size: %s\n", types.SizeStr(types.NewInt(uint64(ask.MaxPieceSize)))) + afmt.Printf("Ask: %s\n", maddr) + afmt.Printf("Price per GiB: %s\n", types.FIL(ask.Price)) + afmt.Printf("Verified Price per GiB: %s\n", types.FIL(ask.VerifiedPrice)) + afmt.Printf("Max Piece size: %s\n", types.SizeStr(types.NewInt(uint64(ask.MaxPieceSize)))) size := cctx.Int64("size") if size == 0 { return nil } perEpoch := types.BigDiv(types.BigMul(ask.Price, types.NewInt(uint64(size))), types.NewInt(1<<30)) - fmt.Printf("Price per Block: %s\n", types.FIL(perEpoch)) + afmt.Printf("Price per Block: %s\n", types.FIL(perEpoch)) duration := cctx.Int64("duration") if duration == 0 { return nil } - fmt.Printf("Total Price: %s\n", types.FIL(types.BigMul(perEpoch, types.NewInt(uint64(duration))))) + afmt.Printf("Total Price: %s\n", types.FIL(types.BigMul(perEpoch, types.NewInt(uint64(duration))))) return nil }, @@ -1102,7 +1419,7 @@ var clientListDeals = &cli.Command{ } } - return outputStorageDeals(ctx, os.Stdout, api, localDeals, cctx.Bool("verbose"), cctx.Bool("color"), showFailed) + return outputStorageDeals(ctx, cctx.App.Writer, api, localDeals, cctx.Bool("verbose"), cctx.Bool("color"), showFailed) }, } @@ -1326,6 +1643,159 @@ var clientInfoCmd = &cli.Command{ }, } +var clientStat = &cli.Command{ + Name: "stat", + Usage: "Print information about a locally stored file (piece size, etc)", + ArgsUsage: "", + Action: func(cctx *cli.Context) error { + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + + if !cctx.Args().Present() || cctx.NArg() != 1 { + return fmt.Errorf("must specify cid of data") + } + + dataCid, err := cid.Parse(cctx.Args().First()) + if err != nil { + return fmt.Errorf("parsing data cid: %w", err) + } + + ds, err := api.ClientDealSize(ctx, dataCid) + if err != nil { + return err + } + + fmt.Printf("Piece Size : %v\n", ds.PieceSize) + fmt.Printf("Payload Size: %v\n", ds.PayloadSize) + + return nil + }, +} + +var clientRestartTransfer = &cli.Command{ + Name: "restart-transfer", + Usage: "Force restart a stalled data transfer", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "peerid", + Usage: "narrow to transfer with specific peer", + }, + &cli.BoolFlag{ + Name: "initiator", + Usage: "specify only transfers where peer is/is not initiator", + Value: true, + }, + }, + Action: func(cctx *cli.Context) error { + if !cctx.Args().Present() { + return cli.ShowCommandHelp(cctx, cctx.Command.Name) + } + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + + transferUint, err := strconv.ParseUint(cctx.Args().First(), 10, 64) + if err != nil { + return fmt.Errorf("Error reading transfer ID: %w", err) + } + transferID := datatransfer.TransferID(transferUint) + initiator := cctx.Bool("initiator") + var other peer.ID + if pidstr := cctx.String("peerid"); pidstr != "" { + p, err := peer.Decode(pidstr) + if err != nil { + return err + } + other = p + } else { + channels, err := api.ClientListDataTransfers(ctx) + if err != nil { + return err + } + found := false + for _, channel := range channels { + if channel.IsInitiator == initiator && channel.TransferID == transferID { + other = channel.OtherPeer + found = true + break + } + } + if !found { + return errors.New("unable to find matching data transfer") + } + } + + return api.ClientRestartDataTransfer(ctx, transferID, other, initiator) + }, +} + +var clientCancelTransfer = &cli.Command{ + Name: "cancel-transfer", + Usage: "Force cancel a data transfer", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "peerid", + Usage: "narrow to transfer with specific peer", + }, + &cli.BoolFlag{ + Name: "initiator", + Usage: "specify only transfers where peer is/is not initiator", + Value: true, + }, + }, + Action: func(cctx *cli.Context) error { + if !cctx.Args().Present() { + return cli.ShowCommandHelp(cctx, cctx.Command.Name) + } + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + + transferUint, err := strconv.ParseUint(cctx.Args().First(), 10, 64) + if err != nil { + return fmt.Errorf("Error reading transfer ID: %w", err) + } + transferID := datatransfer.TransferID(transferUint) + initiator := cctx.Bool("initiator") + var other peer.ID + if pidstr := cctx.String("peerid"); pidstr != "" { + p, err := peer.Decode(pidstr) + if err != nil { + return err + } + other = p + } else { + channels, err := api.ClientListDataTransfers(ctx) + if err != nil { + return err + } + found := false + for _, channel := range channels { + if channel.IsInitiator == initiator && channel.TransferID == transferID { + other = channel.OtherPeer + found = true + break + } + } + if !found { + return errors.New("unable to find matching data transfer") + } + } + + return api.ClientCancelDataTransfer(ctx, transferID, other, initiator) + }, +} + var clientListTransfers = &cli.Command{ Name: "list-transfers", Usage: "List ongoing data transfers for deals", @@ -1343,6 +1813,10 @@ var clientListTransfers = &cli.Command{ Name: "watch", Usage: "watch deal updates in real-time, rather than a one time list", }, + &cli.BoolFlag{ + Name: "show-failed", + Usage: "show failed/cancelled transfers", + }, }, Action: func(cctx *cli.Context) error { api, closer, err := GetFullNodeAPI(cctx) @@ -1360,7 +1834,7 @@ var clientListTransfers = &cli.Command{ completed := cctx.Bool("completed") color := cctx.Bool("color") watch := cctx.Bool("watch") - + showFailed := cctx.Bool("show-failed") if watch { channelUpdates, err := api.ClientDataTransferUpdates(ctx) if err != nil { @@ -1372,7 +1846,7 @@ var clientListTransfers = &cli.Command{ tm.MoveCursor(1, 1) - OutputDataTransferChannels(tm.Screen, channels, completed, color) + OutputDataTransferChannels(tm.Screen, channels, completed, color, showFailed) tm.Flush() @@ -1397,13 +1871,13 @@ var clientListTransfers = &cli.Command{ } } } - OutputDataTransferChannels(os.Stdout, channels, completed, color) + OutputDataTransferChannels(os.Stdout, channels, completed, color, showFailed) return nil }, } // OutputDataTransferChannels generates table output for a list of channels -func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChannel, completed bool, color bool) { +func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChannel, completed bool, color bool, showFailed bool) { sort.Slice(channels, func(i, j int) bool { return channels[i].TransferID < channels[j].TransferID }) @@ -1413,6 +1887,9 @@ func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChann if !completed && channel.Status == datatransfer.Completed { continue } + if !showFailed && (channel.Status == datatransfer.Failed || channel.Status == datatransfer.Cancelled) { + continue + } if channel.IsSender { sendingChannels = append(sendingChannels, channel) } else { diff --git a/cli/client_test.go b/cli/client_test.go new file mode 100644 index 000000000..f0e8efda8 --- /dev/null +++ b/cli/client_test.go @@ -0,0 +1,22 @@ +package cli + +import ( + "context" + "os" + "testing" + "time" + + clitest "github.com/filecoin-project/lotus/cli/test" +) + +// TestClient does a basic test to exercise the client CLI +// commands +func TestClient(t *testing.T) { + _ = os.Setenv("BELLMAN_NO_GPU", "1") + clitest.QuietMiningLogs() + + blocktime := 5 * time.Millisecond + ctx := context.Background() + clientNode, _ := clitest.StartOneNodeOneMiner(ctx, t, blocktime) + clitest.RunClientTest(t, Commands, clientNode) +} diff --git a/cli/cmd.go b/cli/cmd.go index eef73b241..02ef06002 100644 --- a/cli/cmd.go +++ b/cli/cmd.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net/http" + "net/url" "os" "os/signal" "strings" @@ -206,7 +207,22 @@ func GetFullNodeAPI(ctx *cli.Context) (api.FullNode, jsonrpc.ClientCloser, error return client.NewFullNodeRPC(ctx.Context, addr, headers) } -func GetStorageMinerAPI(ctx *cli.Context, opts ...jsonrpc.Option) (api.StorageMiner, jsonrpc.ClientCloser, error) { +type GetStorageMinerOptions struct { + PreferHttp bool +} + +type GetStorageMinerOption func(*GetStorageMinerOptions) + +func StorageMinerUseHttp(opts *GetStorageMinerOptions) { + opts.PreferHttp = true +} + +func GetStorageMinerAPI(ctx *cli.Context, opts ...GetStorageMinerOption) (api.StorageMiner, jsonrpc.ClientCloser, error) { + var options GetStorageMinerOptions + for _, opt := range opts { + opt(&options) + } + if tn, ok := ctx.App.Metadata["testnode-storage"]; ok { return tn.(api.StorageMiner), func() {}, nil } @@ -216,7 +232,23 @@ func GetStorageMinerAPI(ctx *cli.Context, opts ...jsonrpc.Option) (api.StorageMi return nil, nil, err } - return client.NewStorageMinerRPC(ctx.Context, addr, headers, opts...) + if options.PreferHttp { + u, err := url.Parse(addr) + if err != nil { + return nil, nil, xerrors.Errorf("parsing miner api URL: %w", err) + } + + switch u.Scheme { + case "ws": + u.Scheme = "http" + case "wss": + u.Scheme = "https" + } + + addr = u.String() + } + + return client.NewStorageMinerRPC(ctx.Context, addr, headers) } func GetWorkerAPI(ctx *cli.Context) (api.WorkerAPI, jsonrpc.ClientCloser, error) { diff --git a/cli/helper.go b/cli/helper.go index 9398ead71..da236bcae 100644 --- a/cli/helper.go +++ b/cli/helper.go @@ -2,15 +2,16 @@ package cli import ( "fmt" + "io" "os" - "github.com/urfave/cli/v2" + ufcli "github.com/urfave/cli/v2" "golang.org/x/xerrors" ) type PrintHelpErr struct { Err error - Ctx *cli.Context + Ctx *ufcli.Context } func (e *PrintHelpErr) Error() string { @@ -26,11 +27,11 @@ func (e *PrintHelpErr) Is(o error) bool { return ok } -func ShowHelp(cctx *cli.Context, err error) error { +func ShowHelp(cctx *ufcli.Context, err error) error { return &PrintHelpErr{Err: err, Ctx: cctx} } -func RunApp(app *cli.App) { +func RunApp(app *ufcli.App) { if err := app.Run(os.Args); err != nil { if os.Getenv("LOTUS_DEV") != "" { log.Warnf("%+v", err) @@ -39,8 +40,40 @@ func RunApp(app *cli.App) { } var phe *PrintHelpErr if xerrors.As(err, &phe) { - _ = cli.ShowCommandHelp(phe.Ctx, phe.Ctx.Command.Name) + _ = ufcli.ShowCommandHelp(phe.Ctx, phe.Ctx.Command.Name) } os.Exit(1) } } + +type AppFmt struct { + app *ufcli.App + Stdin io.Reader +} + +func NewAppFmt(a *ufcli.App) *AppFmt { + var stdin io.Reader + istdin, ok := a.Metadata["stdin"] + if ok { + stdin = istdin.(io.Reader) + } else { + stdin = os.Stdin + } + return &AppFmt{app: a, Stdin: stdin} +} + +func (a *AppFmt) Print(args ...interface{}) { + fmt.Fprint(a.app.Writer, args...) +} + +func (a *AppFmt) Println(args ...interface{}) { + fmt.Fprintln(a.app.Writer, args...) +} + +func (a *AppFmt) Printf(fmtstr string, args ...interface{}) { + fmt.Fprintf(a.app.Writer, fmtstr, args...) +} + +func (a *AppFmt) Scan(args ...interface{}) (int, error) { + return fmt.Fscan(a.Stdin, args...) +} diff --git a/cli/mpool.go b/cli/mpool.go index 8f3e937b6..4979f6ddc 100644 --- a/cli/mpool.go +++ b/cli/mpool.go @@ -19,6 +19,7 @@ import ( "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/node/config" ) var mpoolCmd = &cli.Command{ @@ -434,7 +435,12 @@ var mpoolReplaceCmd = &cli.Command{ msg.GasPremium = big.Max(retm.GasPremium, minRBF) msg.GasFeeCap = big.Max(retm.GasFeeCap, msg.GasPremium) - messagepool.CapGasFee(&msg, mss.Get().MaxFee) + + mff := func() (abi.TokenAmount, error) { + return abi.TokenAmount(config.DefaultDefaultMaxFee), nil + } + + messagepool.CapGasFee(mff, &msg, mss.Get().MaxFee) } else { msg.GasLimit = cctx.Int64("gas-limit") msg.GasPremium, err = types.BigFromString(cctx.String("gas-premium")) diff --git a/cli/multisig.go b/cli/multisig.go index b692fad5a..8abae5182 100644 --- a/cli/multisig.go +++ b/cli/multisig.go @@ -3,29 +3,31 @@ package cli import ( "bytes" "encoding/hex" + "encoding/json" "fmt" - "os" + "reflect" "sort" "strconv" "text/tabwriter" - builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" - "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/stmgr" + cbg "github.com/whyrusleeping/cbor-gen" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-address" + cid "github.com/ipfs/go-cid" cbor "github.com/ipfs/go-ipld-cbor" "github.com/urfave/cli/v2" "golang.org/x/xerrors" - init0 "github.com/filecoin-project/specs-actors/actors/builtin/init" - msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" + init2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/init" + msig2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig" "github.com/filecoin-project/lotus/api/apibstore" "github.com/filecoin-project/lotus/build" @@ -37,10 +39,18 @@ import ( var multisigCmd = &cli.Command{ Name: "msig", Usage: "Interact with a multisig wallet", + Flags: []cli.Flag{ + &cli.IntFlag{ + Name: "confidence", + Usage: "number of block confirmations to wait for", + Value: int(build.MessageConfidence), + }, + }, Subcommands: []*cli.Command{ msigCreateCmd, msigInspectCmd, msigProposeCmd, + msigRemoveProposeCmd, msigApproveCmd, msigAddProposeCmd, msigAddApproveCmd, @@ -52,6 +62,7 @@ var multisigCmd = &cli.Command{ msigLockApproveCmd, msigLockCancelCmd, msigVestedCmd, + msigProposeThresholdCmd, }, } @@ -141,24 +152,24 @@ var msigCreateCmd = &cli.Command{ } // wait for it to get mined into a block - wait, err := api.StateWaitMsg(ctx, msgCid, build.MessageConfidence) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) if err != nil { return err } // check it executed successfully if wait.Receipt.ExitCode != 0 { - fmt.Println("actor creation failed!") + fmt.Fprintln(cctx.App.Writer, "actor creation failed!") return err } // get address of newly created miner - var execreturn init0.ExecReturn + var execreturn init2.ExecReturn if err := execreturn.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil { return err } - fmt.Println("Created new multisig: ", execreturn.IDAddress, execreturn.RobustAddress) + fmt.Fprintln(cctx.App.Writer, "Created new multisig: ", execreturn.IDAddress, execreturn.RobustAddress) // TODO: maybe register this somewhere return nil @@ -174,6 +185,10 @@ var msigInspectCmd = &cli.Command{ Name: "vesting", Usage: "Include vesting details", }, + &cli.BoolFlag{ + Name: "decode-params", + Usage: "Decode parameters of transaction proposals", + }, }, Action: func(cctx *cli.Context) error { if !cctx.Args().Present() { @@ -204,6 +219,11 @@ var msigInspectCmd = &cli.Command{ return err } + ownId, err := api.StateLookupID(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + mstate, err := multisig.Load(store, act) if err != nil { return err @@ -213,25 +233,25 @@ var msigInspectCmd = &cli.Command{ return err } - fmt.Printf("Balance: %s\n", types.FIL(act.Balance)) - fmt.Printf("Spendable: %s\n", types.FIL(types.BigSub(act.Balance, locked))) + fmt.Fprintf(cctx.App.Writer, "Balance: %s\n", types.FIL(act.Balance)) + fmt.Fprintf(cctx.App.Writer, "Spendable: %s\n", types.FIL(types.BigSub(act.Balance, locked))) if cctx.Bool("vesting") { ib, err := mstate.InitialBalance() if err != nil { return err } - fmt.Printf("InitialBalance: %s\n", types.FIL(ib)) + fmt.Fprintf(cctx.App.Writer, "InitialBalance: %s\n", types.FIL(ib)) se, err := mstate.StartEpoch() if err != nil { return err } - fmt.Printf("StartEpoch: %d\n", se) + fmt.Fprintf(cctx.App.Writer, "StartEpoch: %d\n", se) ud, err := mstate.UnlockDuration() if err != nil { return err } - fmt.Printf("UnlockDuration: %d\n", ud) + fmt.Fprintf(cctx.App.Writer, "UnlockDuration: %d\n", ud) } signers, err := mstate.Signers() @@ -242,10 +262,21 @@ var msigInspectCmd = &cli.Command{ if err != nil { return err } - fmt.Printf("Threshold: %d / %d\n", threshold, len(signers)) - fmt.Println("Signers:") + fmt.Fprintf(cctx.App.Writer, "Threshold: %d / %d\n", threshold, len(signers)) + fmt.Fprintln(cctx.App.Writer, "Signers:") + + signerTable := tabwriter.NewWriter(cctx.App.Writer, 8, 4, 2, ' ', 0) + fmt.Fprintf(signerTable, "ID\tAddress\n") for _, s := range signers { - fmt.Printf("\t%s\n", s) + signerActor, err := api.StateAccountKey(ctx, s, types.EmptyTSK) + if err != nil { + fmt.Fprintf(signerTable, "%s\t%s\n", s, "N/A") + } else { + fmt.Fprintf(signerTable, "%s\t%s\n", s, signerActor) + } + } + if err := signerTable.Flush(); err != nil { + return xerrors.Errorf("flushing output: %+v", err) } pending := make(map[int64]multisig.Transaction) @@ -256,7 +287,8 @@ var msigInspectCmd = &cli.Command{ return xerrors.Errorf("reading pending transactions: %w", err) } - fmt.Println("Transactions: ", len(pending)) + decParams := cctx.Bool("decode-params") + fmt.Fprintln(cctx.App.Writer, "Transactions: ", len(pending)) if len(pending) > 0 { var txids []int64 for txid := range pending { @@ -266,11 +298,42 @@ var msigInspectCmd = &cli.Command{ return txids[i] < txids[j] }) - w := tabwriter.NewWriter(os.Stdout, 8, 4, 0, ' ', 0) + w := tabwriter.NewWriter(cctx.App.Writer, 8, 4, 2, ' ', 0) fmt.Fprintf(w, "ID\tState\tApprovals\tTo\tValue\tMethod\tParams\n") for _, txid := range txids { tx := pending[txid] - fmt.Fprintf(w, "%d\t%s\t%d\t%s\t%s\t%d\t%x\n", txid, "pending", len(tx.Approved), tx.To, types.FIL(tx.Value), tx.Method, tx.Params) + target := tx.To.String() + if tx.To == ownId { + target += " (self)" + } + targAct, err := api.StateGetActor(ctx, tx.To, types.EmptyTSK) + paramStr := fmt.Sprintf("%x", tx.Params) + + if err != nil { + if tx.Method == 0 { + fmt.Fprintf(w, "%d\t%s\t%d\t%s\t%s\t%s(%d)\t%s\n", txid, "pending", len(tx.Approved), target, types.FIL(tx.Value), "Send", tx.Method, paramStr) + } else { + fmt.Fprintf(w, "%d\t%s\t%d\t%s\t%s\t%s(%d)\t%s\n", txid, "pending", len(tx.Approved), target, types.FIL(tx.Value), "new account, unknown method", tx.Method, paramStr) + } + } else { + method := stmgr.MethodsMap[targAct.Code][tx.Method] + + if decParams && tx.Method != 0 { + ptyp := reflect.New(method.Params.Elem()).Interface().(cbg.CBORUnmarshaler) + if err := ptyp.UnmarshalCBOR(bytes.NewReader(tx.Params)); err != nil { + return xerrors.Errorf("failed to decode parameters of transaction %d: %w", txid, err) + } + + b, err := json.Marshal(ptyp) + if err != nil { + return xerrors.Errorf("could not json marshal parameter type: %w", err) + } + + paramStr = string(b) + } + + fmt.Fprintf(w, "%d\t%s\t%d\t%s\t%s\t%s(%d)\t%s\n", txid, "pending", len(tx.Approved), target, types.FIL(tx.Value), method.Name, tx.Method, paramStr) + } } if err := w.Flush(); err != nil { return xerrors.Errorf("flushing output: %+v", err) @@ -370,7 +433,7 @@ var msigProposeCmd = &cli.Command{ fmt.Println("send proposal in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, build.MessageConfidence) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) if err != nil { return err } @@ -379,7 +442,7 @@ var msigProposeCmd = &cli.Command{ return fmt.Errorf("proposal returned exit %d", wait.Receipt.ExitCode) } - var retval msig0.ProposeReturn + var retval msig2.ProposeReturn if err := retval.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil { return fmt.Errorf("failed to unmarshal propose return value: %w", err) } @@ -398,7 +461,7 @@ var msigProposeCmd = &cli.Command{ var msigApproveCmd = &cli.Command{ Name: "approve", Usage: "Approve a multisig message", - ArgsUsage: "[multisigAddress messageId proposerAddress destination value (optional)]", + ArgsUsage: " [proposerAddress destination value [methodId methodParams]]", Flags: []cli.Flag{ &cli.StringFlag{ Name: "from", @@ -406,14 +469,18 @@ var msigApproveCmd = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { - if cctx.Args().Len() < 5 { - return ShowHelp(cctx, fmt.Errorf("must pass multisig address, message ID, proposer address, destination, and value")) + if cctx.Args().Len() < 2 { + return ShowHelp(cctx, fmt.Errorf("must pass at least multisig address and message ID")) } if cctx.Args().Len() > 5 && cctx.Args().Len() != 7 { return ShowHelp(cctx, fmt.Errorf("usage: msig approve [ ]")) } + if cctx.Args().Len() > 2 && cctx.Args().Len() != 5 { + return ShowHelp(cctx, fmt.Errorf("usage: msig approve ")) + } + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err @@ -431,42 +498,121 @@ var msigApproveCmd = &cli.Command{ return err } - proposer, err := address.NewFromString(cctx.Args().Get(2)) - if err != nil { - return err + var from address.Address + if cctx.IsSet("from") { + f, err := address.NewFromString(cctx.String("from")) + if err != nil { + return err + } + from = f + } else { + defaddr, err := api.WalletDefaultAddress(ctx) + if err != nil { + return err + } + from = defaddr } - if proposer.Protocol() != address.ID { - proposer, err = api.StateLookupID(ctx, proposer, types.EmptyTSK) + var msgCid cid.Cid + if cctx.Args().Len() == 2 { + msgCid, err = api.MsigApprove(ctx, msig, txid, from) + if err != nil { + return err + } + } else { + proposer, err := address.NewFromString(cctx.Args().Get(2)) + if err != nil { + return err + } + + if proposer.Protocol() != address.ID { + proposer, err = api.StateLookupID(ctx, proposer, types.EmptyTSK) + if err != nil { + return err + } + } + + dest, err := address.NewFromString(cctx.Args().Get(3)) + if err != nil { + return err + } + + value, err := types.ParseFIL(cctx.Args().Get(4)) + if err != nil { + return err + } + + var method uint64 + var params []byte + if cctx.Args().Len() == 7 { + m, err := strconv.ParseUint(cctx.Args().Get(5), 10, 64) + if err != nil { + return err + } + method = m + + p, err := hex.DecodeString(cctx.Args().Get(6)) + if err != nil { + return err + } + params = p + } + + msgCid, err = api.MsigApproveTxnHash(ctx, msig, txid, proposer, dest, types.BigInt(value), from, method, params) if err != nil { return err } } - dest, err := address.NewFromString(cctx.Args().Get(3)) + fmt.Println("sent approval in message: ", msgCid) + + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) if err != nil { return err } - value, err := types.ParseFIL(cctx.Args().Get(4)) + if wait.Receipt.ExitCode != 0 { + return fmt.Errorf("approve returned exit %d", wait.Receipt.ExitCode) + } + + return nil + }, +} + +var msigRemoveProposeCmd = &cli.Command{ + Name: "propose-remove", + Usage: "Propose to remove a signer", + ArgsUsage: "[multisigAddress signer]", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "decrease-threshold", + Usage: "whether the number of required signers should be decreased", + }, + &cli.StringFlag{ + Name: "from", + Usage: "account to send the propose message from", + }, + }, + Action: func(cctx *cli.Context) error { + if cctx.Args().Len() != 2 { + return ShowHelp(cctx, fmt.Errorf("must pass multisig address and signer address")) + } + + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + + msig, err := address.NewFromString(cctx.Args().Get(0)) if err != nil { return err } - var method uint64 - var params []byte - if cctx.Args().Len() == 7 { - m, err := strconv.ParseUint(cctx.Args().Get(5), 10, 64) - if err != nil { - return err - } - method = m - - p, err := hex.DecodeString(cctx.Args().Get(6)) - if err != nil { - return err - } - params = p + addr, err := address.NewFromString(cctx.Args().Get(1)) + if err != nil { + return err } var from address.Address @@ -484,22 +630,29 @@ var msigApproveCmd = &cli.Command{ from = defaddr } - msgCid, err := api.MsigApproveTxnHash(ctx, msig, txid, proposer, dest, types.BigInt(value), from, method, params) + msgCid, err := api.MsigRemoveSigner(ctx, msig, from, addr, cctx.Bool("decrease-threshold")) if err != nil { return err } - fmt.Println("sent approval in message: ", msgCid) + fmt.Println("sent remove proposal in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, build.MessageConfidence) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) if err != nil { return err } if wait.Receipt.ExitCode != 0 { - return fmt.Errorf("approve returned exit %d", wait.Receipt.ExitCode) + return fmt.Errorf("add proposal returned exit %d", wait.Receipt.ExitCode) } + var ret multisig.ProposeReturn + err = ret.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)) + if err != nil { + return xerrors.Errorf("decoding proposal return: %w", err) + } + fmt.Printf("TxnID: %d", ret.TxnID) + return nil }, } @@ -560,9 +713,9 @@ var msigAddProposeCmd = &cli.Command{ return err } - fmt.Println("sent add proposal in message: ", msgCid) + fmt.Fprintln(cctx.App.Writer, "sent add proposal in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, build.MessageConfidence) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) if err != nil { return err } @@ -644,7 +797,7 @@ var msigAddApproveCmd = &cli.Command{ fmt.Println("sent add approval in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, build.MessageConfidence) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) if err != nil { return err } @@ -721,7 +874,7 @@ var msigAddCancelCmd = &cli.Command{ fmt.Println("sent add cancellation in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, build.MessageConfidence) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) if err != nil { return err } @@ -793,7 +946,7 @@ var msigSwapProposeCmd = &cli.Command{ fmt.Println("sent swap proposal in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, build.MessageConfidence) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) if err != nil { return err } @@ -875,7 +1028,7 @@ var msigSwapApproveCmd = &cli.Command{ fmt.Println("sent swap approval in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, build.MessageConfidence) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) if err != nil { return err } @@ -952,7 +1105,7 @@ var msigSwapCancelCmd = &cli.Command{ fmt.Println("sent swap cancellation in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, build.MessageConfidence) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) if err != nil { return err } @@ -1022,7 +1175,7 @@ var msigLockProposeCmd = &cli.Command{ from = defaddr } - params, actErr := actors.SerializeParams(&msig0.LockBalanceParams{ + params, actErr := actors.SerializeParams(&msig2.LockBalanceParams{ StartEpoch: abi.ChainEpoch(start), UnlockDuration: abi.ChainEpoch(duration), Amount: abi.NewTokenAmount(amount.Int64()), @@ -1032,14 +1185,14 @@ var msigLockProposeCmd = &cli.Command{ return actErr } - msgCid, err := api.MsigPropose(ctx, msig, msig, big.Zero(), from, uint64(builtin2.MethodsMultisig.LockBalance), params) + msgCid, err := api.MsigPropose(ctx, msig, msig, big.Zero(), from, uint64(multisig.Methods.LockBalance), params) if err != nil { return err } fmt.Println("sent lock proposal in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, build.MessageConfidence) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) if err != nil { return err } @@ -1119,7 +1272,7 @@ var msigLockApproveCmd = &cli.Command{ from = defaddr } - params, actErr := actors.SerializeParams(&msig0.LockBalanceParams{ + params, actErr := actors.SerializeParams(&msig2.LockBalanceParams{ StartEpoch: abi.ChainEpoch(start), UnlockDuration: abi.ChainEpoch(duration), Amount: abi.NewTokenAmount(amount.Int64()), @@ -1129,14 +1282,14 @@ var msigLockApproveCmd = &cli.Command{ return actErr } - msgCid, err := api.MsigApproveTxnHash(ctx, msig, txid, prop, msig, big.Zero(), from, uint64(builtin2.MethodsMultisig.LockBalance), params) + msgCid, err := api.MsigApproveTxnHash(ctx, msig, txid, prop, msig, big.Zero(), from, uint64(multisig.Methods.LockBalance), params) if err != nil { return err } fmt.Println("sent lock approval in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, build.MessageConfidence) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) if err != nil { return err } @@ -1211,7 +1364,7 @@ var msigLockCancelCmd = &cli.Command{ from = defaddr } - params, actErr := actors.SerializeParams(&msig0.LockBalanceParams{ + params, actErr := actors.SerializeParams(&msig2.LockBalanceParams{ StartEpoch: abi.ChainEpoch(start), UnlockDuration: abi.ChainEpoch(duration), Amount: abi.NewTokenAmount(amount.Int64()), @@ -1221,14 +1374,14 @@ var msigLockCancelCmd = &cli.Command{ return actErr } - msgCid, err := api.MsigCancel(ctx, msig, txid, msig, big.Zero(), from, uint64(builtin2.MethodsMultisig.LockBalance), params) + msgCid, err := api.MsigCancel(ctx, msig, txid, msig, big.Zero(), from, uint64(multisig.Methods.LockBalance), params) if err != nil { return err } fmt.Println("sent lock cancellation in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, build.MessageConfidence) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) if err != nil { return err } @@ -1302,3 +1455,78 @@ var msigVestedCmd = &cli.Command{ return nil }, } + +var msigProposeThresholdCmd = &cli.Command{ + Name: "propose-threshold", + Usage: "Propose setting a different signing threshold on the account", + ArgsUsage: "", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "from", + Usage: "account to send the proposal from", + }, + }, + Action: func(cctx *cli.Context) error { + if cctx.Args().Len() != 2 { + return ShowHelp(cctx, fmt.Errorf("must pass multisig address and new threshold value")) + } + + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + + msig, err := address.NewFromString(cctx.Args().Get(0)) + if err != nil { + return err + } + + newM, err := strconv.ParseUint(cctx.Args().Get(1), 10, 64) + if err != nil { + return err + } + + var from address.Address + if cctx.IsSet("from") { + f, err := address.NewFromString(cctx.String("from")) + if err != nil { + return err + } + from = f + } else { + defaddr, err := api.WalletDefaultAddress(ctx) + if err != nil { + return err + } + from = defaddr + } + + params, actErr := actors.SerializeParams(&msig2.ChangeNumApprovalsThresholdParams{ + NewThreshold: newM, + }) + + if actErr != nil { + return actErr + } + + msgCid, err := api.MsigPropose(ctx, msig, msig, types.NewInt(0), from, uint64(multisig.Methods.ChangeNumApprovalsThreshold), params) + if err != nil { + return fmt.Errorf("failed to propose change of threshold: %w", err) + } + + fmt.Println("sent change threshold proposal in message: ", msgCid) + + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) + if err != nil { + return err + } + + if wait.Receipt.ExitCode != 0 { + return fmt.Errorf("change threshold proposal returned exit %d", wait.Receipt.ExitCode) + } + + return nil + }, +} diff --git a/cli/multisig_test.go b/cli/multisig_test.go new file mode 100644 index 000000000..82472cd62 --- /dev/null +++ b/cli/multisig_test.go @@ -0,0 +1,22 @@ +package cli + +import ( + "context" + "os" + "testing" + "time" + + clitest "github.com/filecoin-project/lotus/cli/test" +) + +// TestMultisig does a basic test to exercise the multisig CLI +// commands +func TestMultisig(t *testing.T) { + _ = os.Setenv("BELLMAN_NO_GPU", "1") + clitest.QuietMiningLogs() + + blocktime := 5 * time.Millisecond + ctx := context.Background() + clientNode, _ := clitest.StartOneNodeOneMiner(ctx, t, blocktime) + clitest.RunMultisigTest(t, Commands, clientNode) +} diff --git a/cli/paych_test.go b/cli/paych_test.go index 862ca2e74..dac8411c5 100644 --- a/cli/paych_test.go +++ b/cli/paych_test.go @@ -1,9 +1,7 @@ package cli import ( - "bytes" "context" - "flag" "fmt" "os" "regexp" @@ -12,25 +10,21 @@ import ( "testing" "time" - "github.com/stretchr/testify/require" - "github.com/urfave/cli/v2" + clitest "github.com/filecoin-project/lotus/cli/test" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/multiformats/go-multiaddr" - "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin/paych" "github.com/filecoin-project/lotus/chain/actors/policy" + cbor "github.com/ipfs/go-ipld-cbor" + "github.com/stretchr/testify/require" "github.com/filecoin-project/lotus/api/apibstore" "github.com/filecoin-project/lotus/api/test" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/events" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/wallet" - builder "github.com/filecoin-project/lotus/node/test" ) func init() { @@ -43,24 +37,24 @@ func init() { // commands func TestPaymentChannels(t *testing.T) { _ = os.Setenv("BELLMAN_NO_GPU", "1") + clitest.QuietMiningLogs() blocktime := 5 * time.Millisecond ctx := context.Background() - nodes, addrs := startTwoNodesOneMiner(ctx, t, blocktime) + nodes, addrs := clitest.StartTwoNodesOneMiner(ctx, t, blocktime) paymentCreator := nodes[0] paymentReceiver := nodes[1] creatorAddr := addrs[0] receiverAddr := addrs[1] // Create mock CLI - mockCLI := newMockCLI(t) - creatorCLI := mockCLI.client(paymentCreator.ListenAddr) - receiverCLI := mockCLI.client(paymentReceiver.ListenAddr) + mockCLI := clitest.NewMockCLI(ctx, t, Commands) + creatorCLI := mockCLI.Client(paymentCreator.ListenAddr) + receiverCLI := mockCLI.Client(paymentReceiver.ListenAddr) // creator: paych add-funds channelAmt := "100000" - cmd := []string{creatorAddr.String(), receiverAddr.String(), channelAmt} - chstr := creatorCLI.runCmd(paychAddFundsCmd, cmd) + chstr := creatorCLI.RunCmd("paych", "add-funds", creatorAddr.String(), receiverAddr.String(), channelAmt) chAddr, err := address.NewFromString(chstr) require.NoError(t, err) @@ -68,16 +62,13 @@ func TestPaymentChannels(t *testing.T) { // creator: paych voucher create voucherAmt := 100 vamt := strconv.Itoa(voucherAmt) - cmd = []string{chAddr.String(), vamt} - voucher := creatorCLI.runCmd(paychVoucherCreateCmd, cmd) + voucher := creatorCLI.RunCmd("paych", "voucher", "create", chAddr.String(), vamt) // receiver: paych voucher add - cmd = []string{chAddr.String(), voucher} - receiverCLI.runCmd(paychVoucherAddCmd, cmd) + receiverCLI.RunCmd("paych", "voucher", "add", chAddr.String(), voucher) // creator: paych settle - cmd = []string{chAddr.String()} - creatorCLI.runCmd(paychSettleCmd, cmd) + creatorCLI.RunCmd("paych", "settle", chAddr.String()) // Wait for the chain to reach the settle height chState := getPaychState(ctx, t, paymentReceiver, chAddr) @@ -86,8 +77,7 @@ func TestPaymentChannels(t *testing.T) { waitForHeight(ctx, t, paymentReceiver, sa) // receiver: paych collect - cmd = []string{chAddr.String()} - receiverCLI.runCmd(paychCloseCmd, cmd) + receiverCLI.RunCmd("paych", "collect", chAddr.String()) } type voucherSpec struct { @@ -99,20 +89,21 @@ type voucherSpec struct { // TestPaymentChannelStatus tests the payment channel status CLI command func TestPaymentChannelStatus(t *testing.T) { _ = os.Setenv("BELLMAN_NO_GPU", "1") + clitest.QuietMiningLogs() blocktime := 5 * time.Millisecond ctx := context.Background() - nodes, addrs := startTwoNodesOneMiner(ctx, t, blocktime) + nodes, addrs := clitest.StartTwoNodesOneMiner(ctx, t, blocktime) paymentCreator := nodes[0] creatorAddr := addrs[0] receiverAddr := addrs[1] // Create mock CLI - mockCLI := newMockCLI(t) - creatorCLI := mockCLI.client(paymentCreator.ListenAddr) + mockCLI := clitest.NewMockCLI(ctx, t, Commands) + creatorCLI := mockCLI.Client(paymentCreator.ListenAddr) - cmd := []string{creatorAddr.String(), receiverAddr.String()} - out := creatorCLI.runCmd(paychStatusByFromToCmd, cmd) + // creator: paych status-by-from-to + out := creatorCLI.RunCmd("paych", "status-by-from-to", creatorAddr.String(), receiverAddr.String()) fmt.Println(out) noChannelState := "Channel does not exist" require.Regexp(t, regexp.MustCompile(noChannelState), out) @@ -121,14 +112,17 @@ func TestPaymentChannelStatus(t *testing.T) { create := make(chan string) go func() { // creator: paych add-funds - cmd := []string{creatorAddr.String(), receiverAddr.String(), fmt.Sprintf("%d", channelAmt)} - create <- creatorCLI.runCmd(paychAddFundsCmd, cmd) + create <- creatorCLI.RunCmd( + "paych", + "add-funds", + creatorAddr.String(), + receiverAddr.String(), + fmt.Sprintf("%d", channelAmt)) }() // Wait for the output to stop being "Channel does not exist" for regexp.MustCompile(noChannelState).MatchString(out) { - cmd := []string{creatorAddr.String(), receiverAddr.String()} - out = creatorCLI.runCmd(paychStatusByFromToCmd, cmd) + out = creatorCLI.RunCmd("paych", "status-by-from-to", creatorAddr.String(), receiverAddr.String()) } fmt.Println(out) @@ -148,8 +142,7 @@ func TestPaymentChannelStatus(t *testing.T) { // Wait for create channel to complete chstr := <-create - cmd = []string{chstr} - out = creatorCLI.runCmd(paychStatusCmd, cmd) + out = creatorCLI.RunCmd("paych", "status", chstr) fmt.Println(out) // Output should have the channel address require.Regexp(t, regexp.MustCompile("Channel.*"+chstr), out) @@ -161,11 +154,9 @@ func TestPaymentChannelStatus(t *testing.T) { // creator: paych voucher create voucherAmt := uint64(10) - cmd = []string{chAddr.String(), fmt.Sprintf("%d", voucherAmt)} - creatorCLI.runCmd(paychVoucherCreateCmd, cmd) + creatorCLI.RunCmd("paych", "voucher", "create", chAddr.String(), fmt.Sprintf("%d", voucherAmt)) - cmd = []string{chstr} - out = creatorCLI.runCmd(paychStatusCmd, cmd) + out = creatorCLI.RunCmd("paych", "status", chstr) fmt.Println(out) voucherAmtAtto := types.BigMul(types.NewInt(voucherAmt), types.NewInt(build.FilecoinPrecision)) voucherAmtStr := fmt.Sprintf("%d", voucherAmtAtto) @@ -177,24 +168,24 @@ func TestPaymentChannelStatus(t *testing.T) { // channel voucher commands func TestPaymentChannelVouchers(t *testing.T) { _ = os.Setenv("BELLMAN_NO_GPU", "1") + clitest.QuietMiningLogs() blocktime := 5 * time.Millisecond ctx := context.Background() - nodes, addrs := startTwoNodesOneMiner(ctx, t, blocktime) + nodes, addrs := clitest.StartTwoNodesOneMiner(ctx, t, blocktime) paymentCreator := nodes[0] paymentReceiver := nodes[1] creatorAddr := addrs[0] receiverAddr := addrs[1] // Create mock CLI - mockCLI := newMockCLI(t) - creatorCLI := mockCLI.client(paymentCreator.ListenAddr) - receiverCLI := mockCLI.client(paymentReceiver.ListenAddr) + mockCLI := clitest.NewMockCLI(ctx, t, Commands) + creatorCLI := mockCLI.Client(paymentCreator.ListenAddr) + receiverCLI := mockCLI.Client(paymentReceiver.ListenAddr) // creator: paych add-funds channelAmt := "100000" - cmd := []string{creatorAddr.String(), receiverAddr.String(), channelAmt} - chstr := creatorCLI.runCmd(paychAddFundsCmd, cmd) + chstr := creatorCLI.RunCmd("paych", "add-funds", creatorAddr.String(), receiverAddr.String(), channelAmt) chAddr, err := address.NewFromString(chstr) require.NoError(t, err) @@ -204,39 +195,33 @@ func TestPaymentChannelVouchers(t *testing.T) { // creator: paych voucher create // Note: implied --lane=0 voucherAmt1 := 100 - cmd = []string{chAddr.String(), strconv.Itoa(voucherAmt1)} - voucher1 := creatorCLI.runCmd(paychVoucherCreateCmd, cmd) + voucher1 := creatorCLI.RunCmd("paych", "voucher", "create", chAddr.String(), strconv.Itoa(voucherAmt1)) vouchers = append(vouchers, voucherSpec{serialized: voucher1, lane: 0, amt: voucherAmt1}) // creator: paych voucher create --lane=5 lane5 := "--lane=5" voucherAmt2 := 50 - cmd = []string{lane5, chAddr.String(), strconv.Itoa(voucherAmt2)} - voucher2 := creatorCLI.runCmd(paychVoucherCreateCmd, cmd) + voucher2 := creatorCLI.RunCmd("paych", "voucher", "create", lane5, chAddr.String(), strconv.Itoa(voucherAmt2)) vouchers = append(vouchers, voucherSpec{serialized: voucher2, lane: 5, amt: voucherAmt2}) // creator: paych voucher create --lane=5 voucherAmt3 := 70 - cmd = []string{lane5, chAddr.String(), strconv.Itoa(voucherAmt3)} - voucher3 := creatorCLI.runCmd(paychVoucherCreateCmd, cmd) + voucher3 := creatorCLI.RunCmd("paych", "voucher", "create", lane5, chAddr.String(), strconv.Itoa(voucherAmt3)) vouchers = append(vouchers, voucherSpec{serialized: voucher3, lane: 5, amt: voucherAmt3}) // creator: paych voucher create --lane=5 voucherAmt4 := 80 - cmd = []string{lane5, chAddr.String(), strconv.Itoa(voucherAmt4)} - voucher4 := creatorCLI.runCmd(paychVoucherCreateCmd, cmd) + voucher4 := creatorCLI.RunCmd("paych", "voucher", "create", lane5, chAddr.String(), strconv.Itoa(voucherAmt4)) vouchers = append(vouchers, voucherSpec{serialized: voucher4, lane: 5, amt: voucherAmt4}) // creator: paych voucher list --export - cmd = []string{"--export", chAddr.String()} - list := creatorCLI.runCmd(paychVoucherListCmd, cmd) + list := creatorCLI.RunCmd("paych", "voucher", "list", "--export", chAddr.String()) // Check that voucher list output is correct on creator checkVoucherOutput(t, list, vouchers) // creator: paych voucher best-spendable - cmd = []string{"--export", chAddr.String()} - bestSpendable := creatorCLI.runCmd(paychVoucherBestSpendableCmd, cmd) + bestSpendable := creatorCLI.RunCmd("paych", "voucher", "best-spendable", "--export", chAddr.String()) // Check that best spendable output is correct on creator bestVouchers := []voucherSpec{ @@ -246,31 +231,25 @@ func TestPaymentChannelVouchers(t *testing.T) { checkVoucherOutput(t, bestSpendable, bestVouchers) // receiver: paych voucher add - cmd = []string{chAddr.String(), voucher1} - receiverCLI.runCmd(paychVoucherAddCmd, cmd) + receiverCLI.RunCmd("paych", "voucher", "add", chAddr.String(), voucher1) // receiver: paych voucher add - cmd = []string{chAddr.String(), voucher2} - receiverCLI.runCmd(paychVoucherAddCmd, cmd) + receiverCLI.RunCmd("paych", "voucher", "add", chAddr.String(), voucher2) // receiver: paych voucher add - cmd = []string{chAddr.String(), voucher3} - receiverCLI.runCmd(paychVoucherAddCmd, cmd) + receiverCLI.RunCmd("paych", "voucher", "add", chAddr.String(), voucher3) // receiver: paych voucher add - cmd = []string{chAddr.String(), voucher4} - receiverCLI.runCmd(paychVoucherAddCmd, cmd) + receiverCLI.RunCmd("paych", "voucher", "add", chAddr.String(), voucher4) // receiver: paych voucher list --export - cmd = []string{"--export", chAddr.String()} - list = receiverCLI.runCmd(paychVoucherListCmd, cmd) + list = receiverCLI.RunCmd("paych", "voucher", "list", "--export", chAddr.String()) // Check that voucher list output is correct on receiver checkVoucherOutput(t, list, vouchers) // receiver: paych voucher best-spendable - cmd = []string{"--export", chAddr.String()} - bestSpendable = receiverCLI.runCmd(paychVoucherBestSpendableCmd, cmd) + bestSpendable = receiverCLI.RunCmd("paych", "voucher", "best-spendable", "--export", chAddr.String()) // Check that best spendable output is correct on receiver bestVouchers = []voucherSpec{ @@ -280,12 +259,10 @@ func TestPaymentChannelVouchers(t *testing.T) { checkVoucherOutput(t, bestSpendable, bestVouchers) // receiver: paych voucher submit - cmd = []string{chAddr.String(), voucher1} - receiverCLI.runCmd(paychVoucherSubmitCmd, cmd) + receiverCLI.RunCmd("paych", "voucher", "submit", chAddr.String(), voucher1) // receiver: paych voucher best-spendable - cmd = []string{"--export", chAddr.String()} - bestSpendable = receiverCLI.runCmd(paychVoucherBestSpendableCmd, cmd) + bestSpendable = receiverCLI.RunCmd("paych", "voucher", "best-spendable", "--export", chAddr.String()) // Check that best spendable output no longer includes submitted voucher bestVouchers = []voucherSpec{ @@ -296,12 +273,10 @@ func TestPaymentChannelVouchers(t *testing.T) { // There are three vouchers in lane 5: 50, 70, 80 // Submit the voucher for 50. Best spendable should still be 80. // receiver: paych voucher submit - cmd = []string{chAddr.String(), voucher2} - receiverCLI.runCmd(paychVoucherSubmitCmd, cmd) + receiverCLI.RunCmd("paych", "voucher", "submit", chAddr.String(), voucher2) // receiver: paych voucher best-spendable - cmd = []string{"--export", chAddr.String()} - bestSpendable = receiverCLI.runCmd(paychVoucherBestSpendableCmd, cmd) + bestSpendable = receiverCLI.RunCmd("paych", "voucher", "best-spendable", "--export", chAddr.String()) // Check that best spendable output still includes the voucher for 80 bestVouchers = []voucherSpec{ @@ -311,12 +286,10 @@ func TestPaymentChannelVouchers(t *testing.T) { // Submit the voucher for 80 // receiver: paych voucher submit - cmd = []string{chAddr.String(), voucher4} - receiverCLI.runCmd(paychVoucherSubmitCmd, cmd) + receiverCLI.RunCmd("paych", "voucher", "submit", chAddr.String(), voucher4) // receiver: paych voucher best-spendable - cmd = []string{"--export", chAddr.String()} - bestSpendable = receiverCLI.runCmd(paychVoucherBestSpendableCmd, cmd) + bestSpendable = receiverCLI.RunCmd("paych", "voucher", "best-spendable", "--export", chAddr.String()) // Check that best spendable output no longer includes submitted voucher bestVouchers = []voucherSpec{} @@ -327,22 +300,27 @@ func TestPaymentChannelVouchers(t *testing.T) { // is greater than what's left in the channel, voucher create fails func TestPaymentChannelVoucherCreateShortfall(t *testing.T) { _ = os.Setenv("BELLMAN_NO_GPU", "1") + clitest.QuietMiningLogs() blocktime := 5 * time.Millisecond ctx := context.Background() - nodes, addrs := startTwoNodesOneMiner(ctx, t, blocktime) + nodes, addrs := clitest.StartTwoNodesOneMiner(ctx, t, blocktime) paymentCreator := nodes[0] creatorAddr := addrs[0] receiverAddr := addrs[1] // Create mock CLI - mockCLI := newMockCLI(t) - creatorCLI := mockCLI.client(paymentCreator.ListenAddr) + mockCLI := clitest.NewMockCLI(ctx, t, Commands) + creatorCLI := mockCLI.Client(paymentCreator.ListenAddr) // creator: paych add-funds channelAmt := 100 - cmd := []string{creatorAddr.String(), receiverAddr.String(), fmt.Sprintf("%d", channelAmt)} - chstr := creatorCLI.runCmd(paychAddFundsCmd, cmd) + chstr := creatorCLI.RunCmd( + "paych", + "add-funds", + creatorAddr.String(), + receiverAddr.String(), + fmt.Sprintf("%d", channelAmt)) chAddr, err := address.NewFromString(chstr) require.NoError(t, err) @@ -350,15 +328,25 @@ func TestPaymentChannelVoucherCreateShortfall(t *testing.T) { // creator: paych voucher create --lane=1 voucherAmt1 := 60 lane1 := "--lane=1" - cmd = []string{lane1, chAddr.String(), strconv.Itoa(voucherAmt1)} - voucher1 := creatorCLI.runCmd(paychVoucherCreateCmd, cmd) + voucher1 := creatorCLI.RunCmd( + "paych", + "voucher", + "create", + lane1, + chAddr.String(), + strconv.Itoa(voucherAmt1)) fmt.Println(voucher1) // creator: paych voucher create --lane=2 lane2 := "--lane=2" voucherAmt2 := 70 - cmd = []string{lane2, chAddr.String(), strconv.Itoa(voucherAmt2)} - _, err = creatorCLI.runCmdRaw(paychVoucherCreateCmd, cmd) + _, err = creatorCLI.RunCmdRaw( + "paych", + "voucher", + "create", + lane2, + chAddr.String(), + strconv.Itoa(voucherAmt2)) // Should fail because channel doesn't have required amount require.Error(t, err) @@ -389,128 +377,6 @@ func checkVoucherOutput(t *testing.T, list string, vouchers []voucherSpec) { } } -func startTwoNodesOneMiner(ctx context.Context, t *testing.T, blocktime time.Duration) ([]test.TestNode, []address.Address) { - n, sn := builder.RPCMockSbBuilder(t, test.TwoFull, test.OneMiner) - - paymentCreator := n[0] - paymentReceiver := n[1] - miner := sn[0] - - // Get everyone connected - addrs, err := paymentCreator.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := paymentReceiver.NetConnect(ctx, addrs); err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrs); err != nil { - t.Fatal(err) - } - - // Start mining blocks - bm := test.NewBlockMiner(ctx, t, miner, blocktime) - bm.MineBlocks() - - // Send some funds to register the receiver - receiverAddr, err := paymentReceiver.WalletNew(ctx, wallet.ActSigType("secp256k1")) - if err != nil { - t.Fatal(err) - } - - test.SendFunds(ctx, t, paymentCreator, receiverAddr, abi.NewTokenAmount(1e18)) - - // Get the creator's address - creatorAddr, err := paymentCreator.WalletDefaultAddress(ctx) - if err != nil { - t.Fatal(err) - } - - // Create mock CLI - return n, []address.Address{creatorAddr, receiverAddr} -} - -type mockCLI struct { - t *testing.T - cctx *cli.Context - out *bytes.Buffer -} - -func newMockCLI(t *testing.T) *mockCLI { - // Create a CLI App with an --api-url flag so that we can specify which node - // the command should be executed against - app := cli.NewApp() - app.Flags = []cli.Flag{ - &cli.StringFlag{ - Name: "api-url", - Hidden: true, - }, - } - var out bytes.Buffer - app.Writer = &out - app.Setup() - - cctx := cli.NewContext(app, &flag.FlagSet{}, nil) - return &mockCLI{t: t, cctx: cctx, out: &out} -} - -func (c *mockCLI) client(addr multiaddr.Multiaddr) *mockCLIClient { - return &mockCLIClient{t: c.t, addr: addr, cctx: c.cctx, out: c.out} -} - -// mockCLIClient runs commands against a particular node -type mockCLIClient struct { - t *testing.T - addr multiaddr.Multiaddr - cctx *cli.Context - out *bytes.Buffer -} - -func (c *mockCLIClient) runCmd(cmd *cli.Command, input []string) string { - out, err := c.runCmdRaw(cmd, input) - require.NoError(c.t, err) - - return out -} - -func (c *mockCLIClient) runCmdRaw(cmd *cli.Command, input []string) (string, error) { - // prepend --api-url= - apiFlag := "--api-url=" + c.addr.String() - input = append([]string{apiFlag}, input...) - - fs := c.flagSet(cmd) - err := fs.Parse(input) - require.NoError(c.t, err) - - err = cmd.Action(cli.NewContext(c.cctx.App, fs, c.cctx)) - - // Get the output - str := strings.TrimSpace(c.out.String()) - c.out.Reset() - return str, err -} - -func (c *mockCLIClient) flagSet(cmd *cli.Command) *flag.FlagSet { - // Apply app level flags (so we can process --api-url flag) - fs := &flag.FlagSet{} - for _, f := range c.cctx.App.Flags { - err := f.Apply(fs) - if err != nil { - c.t.Fatal(err) - } - } - // Apply command level flags - for _, f := range cmd.Flags { - err := f.Apply(fs) - if err != nil { - c.t.Fatal(err) - } - } - return fs -} - // waitForHeight waits for the node to reach the given chain epoch func waitForHeight(ctx context.Context, t *testing.T, node test.TestNode, height abi.ChainEpoch) { atHeight := make(chan struct{}) diff --git a/cli/state.go b/cli/state.go index 453cde77f..427746155 100644 --- a/cli/state.go +++ b/cli/state.go @@ -15,13 +15,13 @@ import ( "strings" "time" + "github.com/fatih/color" "github.com/filecoin-project/lotus/chain/actors/builtin" - "github.com/multiformats/go-multiaddr" - "github.com/ipfs/go-cid" cbor "github.com/ipfs/go-ipld-cbor" "github.com/libp2p/go-libp2p-core/peer" + "github.com/multiformats/go-multiaddr" "github.com/multiformats/go-multihash" "github.com/urfave/cli/v2" cbg "github.com/whyrusleeping/cbor-gen" @@ -60,7 +60,7 @@ var stateCmd = &cli.Command{ stateSectorCmd, stateGetActorCmd, stateLookupIDCmd, - stateReplaySetCmd, + stateReplayCmd, stateSectorSizeCmd, stateReadStateCmd, stateListMessagesCmd, @@ -69,7 +69,6 @@ var stateCmd = &cli.Command{ stateGetDealSetCmd, stateWaitMsgCmd, stateSearchMsgCmd, - stateMsgCostCmd, stateMinerInfo, stateMarketCmd, stateExecTraceCmd, @@ -108,13 +107,18 @@ var stateMinerInfo = &cli.Command{ return err } + availableBalance, err := api.StateMinerAvailableBalance(ctx, addr, ts.Key()) + if err != nil { + return xerrors.Errorf("getting miner available balance: %w", err) + } + fmt.Printf("Available Balance: %s\n", types.FIL(availableBalance)) fmt.Printf("Owner:\t%s\n", mi.Owner) fmt.Printf("Worker:\t%s\n", mi.Worker) for i, controlAddress := range mi.ControlAddresses { fmt.Printf("Control %d: \t%s\n", i, controlAddress) } + fmt.Printf("PeerID:\t%s\n", mi.PeerId) - fmt.Printf("SectorSize:\t%s (%d)\n", types.SizeStr(types.NewInt(uint64(mi.SectorSize))), mi.SectorSize) fmt.Printf("Multiaddrs: \t") for _, addr := range mi.Multiaddrs { a, err := multiaddr.NewMultiaddrBytes(addr) @@ -123,6 +127,26 @@ var stateMinerInfo = &cli.Command{ } fmt.Printf("%s ", a) } + + fmt.Printf("SectorSize:\t%s (%d)\n", types.SizeStr(types.NewInt(uint64(mi.SectorSize))), mi.SectorSize) + pow, err := api.StateMinerPower(ctx, addr, ts.Key()) + if err != nil { + return err + } + + rpercI := types.BigDiv(types.BigMul(pow.MinerPower.RawBytePower, types.NewInt(1000000)), pow.TotalPower.RawBytePower) + qpercI := types.BigDiv(types.BigMul(pow.MinerPower.QualityAdjPower, types.NewInt(1000000)), pow.TotalPower.QualityAdjPower) + + fmt.Printf("Byte Power: %s / %s (%0.4f%%)\n", + color.BlueString(types.SizeStr(pow.MinerPower.RawBytePower)), + types.SizeStr(pow.TotalPower.RawBytePower), + float64(rpercI.Int64())/10000) + + fmt.Printf("Actual Power: %s / %s (%0.4f%%)\n", + color.GreenString(types.DeciStr(pow.MinerPower.QualityAdjPower)), + types.DeciStr(pow.TotalPower.QualityAdjPower), + float64(qpercI.Int64())/10000) + fmt.Println() cd, err := api.StateMinerProvingDeadline(ctx, addr, ts.Key()) @@ -384,20 +408,27 @@ var stateExecTraceCmd = &cli.Command{ }, } -var stateReplaySetCmd = &cli.Command{ +var stateReplayCmd = &cli.Command{ Name: "replay", - Usage: "Replay a particular message within a tipset", - ArgsUsage: "[tipsetKey messageCid]", + Usage: "Replay a particular message", + ArgsUsage: "", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "show-trace", + Usage: "print out full execution trace for given message", + }, + &cli.BoolFlag{ + Name: "detailed-gas", + Usage: "print out detailed gas costs for given message", + }, + }, Action: func(cctx *cli.Context) error { - if cctx.Args().Len() < 1 { - fmt.Println("usage: [tipset] ") - fmt.Println("The last cid passed will be used as the message CID") - fmt.Println("All preceding ones will be used as the tipset") + if cctx.Args().Len() != 1 { + fmt.Println("must provide cid of message to replay") return nil } - args := cctx.Args().Slice() - mcid, err := cid.Decode(args[len(args)-1]) + mcid, err := cid.Decode(cctx.Args().First()) if err != nil { return fmt.Errorf("message cid was invalid: %s", err) } @@ -410,52 +441,7 @@ var stateReplaySetCmd = &cli.Command{ ctx := ReqContext(cctx) - var ts *types.TipSet - { - var tscids []cid.Cid - for _, s := range args[:len(args)-1] { - c, err := cid.Decode(s) - if err != nil { - return fmt.Errorf("tipset cid was invalid: %s", err) - } - tscids = append(tscids, c) - } - - if len(tscids) > 0 { - var headers []*types.BlockHeader - for _, c := range tscids { - h, err := fapi.ChainGetBlock(ctx, c) - if err != nil { - return err - } - - headers = append(headers, h) - } - - ts, err = types.NewTipSet(headers) - if err != nil { - return err - } - } else { - var r *api.MsgLookup - r, err = fapi.StateWaitMsg(ctx, mcid, build.MessageConfidence) - if err != nil { - return xerrors.Errorf("finding message in chain: %w", err) - } - - childTs, err := fapi.ChainGetTipSet(ctx, r.TipSet) - if err != nil { - return xerrors.Errorf("loading tipset: %w", err) - } - ts, err = fapi.ChainGetTipSet(ctx, childTs.Parents()) - if err != nil { - return err - } - } - - } - - res, err := fapi.StateReplay(ctx, ts.Key(), mcid) + res, err := fapi.StateReplay(ctx, types.EmptyTSK, mcid) if err != nil { return xerrors.Errorf("replay call failed: %w", err) } @@ -464,10 +450,25 @@ var stateReplaySetCmd = &cli.Command{ fmt.Printf("Exit code: %d\n", res.MsgRct.ExitCode) fmt.Printf("Return: %x\n", res.MsgRct.Return) fmt.Printf("Gas Used: %d\n", res.MsgRct.GasUsed) + + if cctx.Bool("detailed-gas") { + fmt.Printf("Base Fee Burn: %d\n", res.GasCost.BaseFeeBurn) + fmt.Printf("Overestimaton Burn: %d\n", res.GasCost.OverEstimationBurn) + fmt.Printf("Miner Penalty: %d\n", res.GasCost.MinerPenalty) + fmt.Printf("Miner Tip: %d\n", res.GasCost.MinerTip) + fmt.Printf("Refund: %d\n", res.GasCost.Refund) + } + fmt.Printf("Total Message Cost: %d\n", res.GasCost.TotalCost) + if res.MsgRct.ExitCode != 0 { fmt.Printf("Error message: %q\n", res.Error) } + if cctx.Bool("show-trace") { + fmt.Printf("%s\t%s\t%s\t%d\t%x\t%d\t%x\n", res.Msg.From, res.Msg.To, res.Msg.Value, res.Msg.Method, res.Msg.Params, res.MsgRct.ExitCode, res.MsgRct.Return) + printInternalExecutions("\t", res.ExecutionTrace.Subcalls) + } + return nil }, } @@ -837,33 +838,66 @@ var stateListMessagesCmd = &cli.Command{ froma = a } - toh := cctx.Uint64("toheight") + toh := abi.ChainEpoch(cctx.Uint64("toheight")) ts, err := LoadTipSet(ctx, cctx, api) if err != nil { return err } - msgs, err := api.StateListMessages(ctx, &types.Message{To: toa, From: froma}, ts.Key(), abi.ChainEpoch(toh)) - if err != nil { - return err + if ts == nil { + head, err := api.ChainHead(ctx) + if err != nil { + return err + } + ts = head } - for _, c := range msgs { - if cctx.Bool("cids") { - fmt.Println(c.String()) - continue + windowSize := abi.ChainEpoch(100) + + cur := ts + for cur.Height() > toh { + if ctx.Err() != nil { + return ctx.Err() } - m, err := api.ChainGetMessage(ctx, c) + end := toh + if cur.Height()-windowSize > end { + end = cur.Height() - windowSize + } + + msgs, err := api.StateListMessages(ctx, &lapi.MessageMatch{To: toa, From: froma}, cur.Key(), end) if err != nil { return err } - b, err := json.MarshalIndent(m, "", " ") + + for _, c := range msgs { + if cctx.Bool("cids") { + fmt.Println(c.String()) + continue + } + + m, err := api.ChainGetMessage(ctx, c) + if err != nil { + return err + } + b, err := json.MarshalIndent(m, "", " ") + if err != nil { + return err + } + fmt.Println(string(b)) + } + + if end <= 0 { + break + } + + next, err := api.ChainGetTipSetByHeight(ctx, end-1, cur.Key()) if err != nil { return err } - fmt.Println(string(b)) + + cur = next } return nil @@ -1202,7 +1236,7 @@ func ComputeStateHTMLTempl(w io.Writer, ts *types.TipSet, o *api.ComputeStateOut "GetCode": getCode, "GetMethod": getMethod, "ToFil": toFil, - "JsonParams": jsonParams, + "JsonParams": JsonParams, "JsonReturn": jsonReturn, "IsSlow": isSlow, "IsVerySlow": isVerySlow, @@ -1289,13 +1323,12 @@ func sumGas(changes []*types.GasTrace) types.GasTrace { return out } -func jsonParams(code cid.Cid, method abi.MethodNum, params []byte) (string, error) { - methodMeta, found := stmgr.MethodsMap[code][method] - if !found { - return "", fmt.Errorf("method %d not found on actor %s", method, code) +func JsonParams(code cid.Cid, method abi.MethodNum, params []byte) (string, error) { + p, err := stmgr.GetParamType(code, method) + if err != nil { + return "", err } - re := reflect.New(methodMeta.Params.Elem()) - p := re.Interface().(cbg.CBORUnmarshaler) + if err := p.UnmarshalCBOR(bytes.NewReader(params)); err != nil { return "", err } @@ -1424,60 +1457,6 @@ var stateSearchMsgCmd = &cli.Command{ }, } -var stateMsgCostCmd = &cli.Command{ - Name: "msg-cost", - Usage: "Get the detailed gas costs of a message", - ArgsUsage: "[messageCid]", - Action: func(cctx *cli.Context) error { - if !cctx.Args().Present() { - return fmt.Errorf("must specify message cid to get gas costs for") - } - - api, closer, err := GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer closer() - - ctx := ReqContext(cctx) - - msg, err := cid.Decode(cctx.Args().First()) - if err != nil { - return err - } - - tsk := types.EmptyTSK - - ts, err := LoadTipSet(ctx, cctx, api) - if err != nil { - return err - } - - if ts != nil { - tsk = ts.Key() - } - - mgc, err := api.StateMsgGasCost(ctx, msg, tsk) - if err != nil { - return err - } - - if mgc != nil { - fmt.Printf("Message CID: %s", mgc.Message) - fmt.Printf("\nGas Used: %d", mgc.GasUsed) - fmt.Printf("\nBase Fee Burn: %d", mgc.BaseFeeBurn) - fmt.Printf("\nOverestimation Burn: %d", mgc.OverEstimationBurn) - fmt.Printf("\nMiner Tip: %d", mgc.MinerTip) - fmt.Printf("\nRefund: %d", mgc.Refund) - fmt.Printf("\nTotal Cost: %d", mgc.TotalCost) - fmt.Printf("\nMiner Penalty: %d", mgc.MinerPenalty) - } else { - fmt.Print("message was not found on chain") - } - return nil - }, -} - var stateCallCmd = &cli.Command{ Name: "call", Usage: "Invoke a method on an actor locally", @@ -1688,7 +1667,14 @@ func parseParamsForMethod(act cid.Cid, method uint64, args []string) ([]byte, er var stateCircSupplyCmd = &cli.Command{ Name: "circulating-supply", - Usage: "Get the current circulating supply of filecoin", + Usage: "Get the exact current circulating supply of Filecoin", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "vm-supply", + Usage: "calculates the approximation of the circulating supply used internally by the VM (instead of the exact amount)", + Value: false, + }, + }, Action: func(cctx *cli.Context) error { api, closer, err := GetFullNodeAPI(cctx) if err != nil { @@ -1703,16 +1689,26 @@ var stateCircSupplyCmd = &cli.Command{ return err } - circ, err := api.StateCirculatingSupply(ctx, ts.Key()) - if err != nil { - return err - } + if cctx.IsSet("vm-supply") { + circ, err := api.StateVMCirculatingSupplyInternal(ctx, ts.Key()) + if err != nil { + return err + } - fmt.Println("Circulating supply: ", types.FIL(circ.FilCirculating)) - fmt.Println("Mined: ", types.FIL(circ.FilMined)) - fmt.Println("Vested: ", types.FIL(circ.FilVested)) - fmt.Println("Burnt: ", types.FIL(circ.FilBurnt)) - fmt.Println("Locked: ", types.FIL(circ.FilLocked)) + fmt.Println("Circulating supply: ", types.FIL(circ.FilCirculating)) + fmt.Println("Mined: ", types.FIL(circ.FilMined)) + fmt.Println("Vested: ", types.FIL(circ.FilVested)) + fmt.Println("Burnt: ", types.FIL(circ.FilBurnt)) + fmt.Println("Locked: ", types.FIL(circ.FilLocked)) + } else { + circ, err := api.StateCirculatingSupply(ctx, ts.Key()) + if err != nil { + return err + } + + fmt.Println("Exact circulating supply: ", types.FIL(circ)) + return nil + } return nil }, diff --git a/cli/sync.go b/cli/sync.go index ea066cbda..c3f25eb1d 100644 --- a/cli/sync.go +++ b/cli/sync.go @@ -84,6 +84,12 @@ var syncStatusCmd = &cli.Command{ var syncWaitCmd = &cli.Command{ Name: "wait", Usage: "Wait for sync to be complete", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "watch", + Usage: "don't exit after node is synced", + }, + }, Action: func(cctx *cli.Context) error { napi, closer, err := GetFullNodeAPI(cctx) if err != nil { @@ -92,7 +98,7 @@ var syncWaitCmd = &cli.Command{ defer closer() ctx := ReqContext(cctx) - return SyncWait(ctx, napi) + return SyncWait(ctx, napi, cctx.Bool("watch")) }, } @@ -234,7 +240,7 @@ var syncCheckpointCmd = &cli.Command{ }, } -func SyncWait(ctx context.Context, napi api.FullNode) error { +func SyncWait(ctx context.Context, napi api.FullNode, watch bool) error { tick := time.Second / 4 lastLines := 0 @@ -311,7 +317,7 @@ func SyncWait(ctx context.Context, napi api.FullNode) error { _ = target // todo: maybe print? (creates a bunch of line wrapping issues with most tipsets) - if time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs) { + if !watch && time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs) { fmt.Println("\nDone!") return nil } diff --git a/cli/test/client.go b/cli/test/client.go new file mode 100644 index 000000000..95abd39c2 --- /dev/null +++ b/cli/test/client.go @@ -0,0 +1,119 @@ +package test + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strings" + "testing" + "time" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/api/test" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/specs-actors/v2/actors/builtin" + "github.com/stretchr/testify/require" + lcli "github.com/urfave/cli/v2" +) + +// RunClientTest exercises some of the client CLI commands +func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + // Create mock CLI + mockCLI := NewMockCLI(ctx, t, cmds) + clientCLI := mockCLI.Client(clientNode.ListenAddr) + + // Get the miner address + addrs, err := clientNode.StateListMiners(ctx, types.EmptyTSK) + require.NoError(t, err) + require.Len(t, addrs, 1) + + minerAddr := addrs[0] + fmt.Println("Miner:", minerAddr) + + // client query-ask + out := clientCLI.RunCmd("client", "query-ask", minerAddr.String()) + require.Regexp(t, regexp.MustCompile("Ask:"), out) + + // Create a deal (non-interactive) + // client deal 1000000attofil + res, _, err := test.CreateClientFile(ctx, clientNode, 1) + require.NoError(t, err) + dataCid := res.Root + price := "1000000attofil" + duration := fmt.Sprintf("%d", build.MinDealDuration) + out = clientCLI.RunCmd("client", "deal", dataCid.String(), minerAddr.String(), price, duration) + fmt.Println("client deal", out) + + // Create a deal (interactive) + // client deal + // + // (in days) + // + // "no" (verified client) + // "yes" (confirm deal) + res, _, err = test.CreateClientFile(ctx, clientNode, 2) + require.NoError(t, err) + dataCid2 := res.Root + duration = fmt.Sprintf("%d", build.MinDealDuration/builtin.EpochsInDay) + cmd := []string{"client", "deal"} + interactiveCmds := []string{ + dataCid2.String(), + duration, + minerAddr.String(), + "no", + "yes", + } + out = clientCLI.RunInteractiveCmd(cmd, interactiveCmds) + fmt.Println("client deal:\n", out) + + // Wait for provider to start sealing deal + dealStatus := "" + for { + // client list-deals + out = clientCLI.RunCmd("client", "list-deals") + fmt.Println("list-deals:\n", out) + + lines := strings.Split(out, "\n") + require.Len(t, lines, 2) + re := regexp.MustCompile(`\s+`) + parts := re.Split(lines[1], -1) + if len(parts) < 4 { + require.Fail(t, "bad list-deals output format") + } + dealStatus = parts[3] + fmt.Println(" Deal status:", dealStatus) + if dealComplete(t, dealStatus) { + break + } + + time.Sleep(time.Second) + } + + // Retrieve the first file from the miner + // client retrieve + tmpdir, err := ioutil.TempDir(os.TempDir(), "test-cli-client") + require.NoError(t, err) + path := filepath.Join(tmpdir, "outfile.dat") + out = clientCLI.RunCmd("client", "retrieve", dataCid.String(), path) + fmt.Println("retrieve:\n", out) + require.Regexp(t, regexp.MustCompile("Success"), out) +} + +func dealComplete(t *testing.T, dealStatus string) bool { + switch dealStatus { + case "StorageDealFailing", "StorageDealError": + t.Fatal(xerrors.Errorf("Storage deal failed with status: " + dealStatus)) + case "StorageDealStaged", "StorageDealSealing", "StorageDealActive", "StorageDealExpired", "StorageDealSlashed": + return true + } + + return false +} diff --git a/cli/test/mockcli.go b/cli/test/mockcli.go new file mode 100644 index 000000000..e8eb78f1b --- /dev/null +++ b/cli/test/mockcli.go @@ -0,0 +1,141 @@ +package test + +import ( + "bytes" + "context" + "flag" + "strings" + "testing" + + "github.com/multiformats/go-multiaddr" + "github.com/stretchr/testify/require" + lcli "github.com/urfave/cli/v2" +) + +type MockCLI struct { + t *testing.T + cmds []*lcli.Command + cctx *lcli.Context + out *bytes.Buffer +} + +func NewMockCLI(ctx context.Context, t *testing.T, cmds []*lcli.Command) *MockCLI { + // Create a CLI App with an --api-url flag so that we can specify which node + // the command should be executed against + app := &lcli.App{ + Flags: []lcli.Flag{ + &lcli.StringFlag{ + Name: "api-url", + Hidden: true, + }, + }, + Commands: cmds, + } + + var out bytes.Buffer + app.Writer = &out + app.Setup() + + cctx := lcli.NewContext(app, &flag.FlagSet{}, nil) + cctx.Context = ctx + return &MockCLI{t: t, cmds: cmds, cctx: cctx, out: &out} +} + +func (c *MockCLI) Client(addr multiaddr.Multiaddr) *MockCLIClient { + return &MockCLIClient{t: c.t, cmds: c.cmds, addr: addr, cctx: c.cctx, out: c.out} +} + +// MockCLIClient runs commands against a particular node +type MockCLIClient struct { + t *testing.T + cmds []*lcli.Command + addr multiaddr.Multiaddr + cctx *lcli.Context + out *bytes.Buffer +} + +func (c *MockCLIClient) RunCmd(input ...string) string { + out, err := c.RunCmdRaw(input...) + require.NoError(c.t, err) + + return out +} + +// Given an input, find the corresponding command or sub-command. +// eg "paych add-funds" +func (c *MockCLIClient) cmdByNameSub(input []string) (*lcli.Command, []string) { + name := input[0] + for _, cmd := range c.cmds { + if cmd.Name == name { + return c.findSubcommand(cmd, input[1:]) + } + } + return nil, []string{} +} + +func (c *MockCLIClient) findSubcommand(cmd *lcli.Command, input []string) (*lcli.Command, []string) { + // If there are no sub-commands, return the current command + if len(cmd.Subcommands) == 0 { + return cmd, input + } + + // Check each sub-command for a match against the name + subName := input[0] + for _, subCmd := range cmd.Subcommands { + if subCmd.Name == subName { + // Found a match, recursively search for sub-commands + return c.findSubcommand(subCmd, input[1:]) + } + } + return nil, []string{} +} + +func (c *MockCLIClient) RunCmdRaw(input ...string) (string, error) { + cmd, input := c.cmdByNameSub(input) + if cmd == nil { + panic("Could not find command " + input[0] + " " + input[1]) + } + + // prepend --api-url= + apiFlag := "--api-url=" + c.addr.String() + input = append([]string{apiFlag}, input...) + + fs := c.flagSet(cmd) + err := fs.Parse(input) + require.NoError(c.t, err) + + err = cmd.Action(lcli.NewContext(c.cctx.App, fs, c.cctx)) + + // Get the output + str := strings.TrimSpace(c.out.String()) + c.out.Reset() + return str, err +} + +func (c *MockCLIClient) flagSet(cmd *lcli.Command) *flag.FlagSet { + // Apply app level flags (so we can process --api-url flag) + fs := &flag.FlagSet{} + for _, f := range c.cctx.App.Flags { + err := f.Apply(fs) + if err != nil { + c.t.Fatal(err) + } + } + // Apply command level flags + for _, f := range cmd.Flags { + err := f.Apply(fs) + if err != nil { + c.t.Fatal(err) + } + } + return fs +} + +func (c *MockCLIClient) RunInteractiveCmd(cmd []string, interactive []string) string { + c.toStdin(strings.Join(interactive, "\n") + "\n") + return c.RunCmd(cmd...) +} + +func (c *MockCLIClient) toStdin(s string) { + c.cctx.App.Metadata["stdin"] = bytes.NewBufferString(s) +} diff --git a/cli/test/multisig.go b/cli/test/multisig.go new file mode 100644 index 000000000..5a60894e6 --- /dev/null +++ b/cli/test/multisig.go @@ -0,0 +1,97 @@ +package test + +import ( + "context" + "fmt" + "regexp" + "strings" + "testing" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/api/test" + "github.com/filecoin-project/lotus/chain/types" + "github.com/stretchr/testify/require" + lcli "github.com/urfave/cli/v2" +) + +func RunMultisigTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode) { + ctx := context.Background() + + // Create mock CLI + mockCLI := NewMockCLI(ctx, t, cmds) + clientCLI := mockCLI.Client(clientNode.ListenAddr) + + // Create some wallets on the node to use for testing multisig + var walletAddrs []address.Address + for i := 0; i < 4; i++ { + addr, err := clientNode.WalletNew(ctx, types.KTSecp256k1) + require.NoError(t, err) + + walletAddrs = append(walletAddrs, addr) + + test.SendFunds(ctx, t, clientNode, addr, types.NewInt(1e15)) + } + + // Create an msig with three of the addresses and threshold of two sigs + // msig create --required=2 --duration=50 --value=1000attofil + amtAtto := types.NewInt(1000) + threshold := 2 + paramDuration := "--duration=50" + paramRequired := fmt.Sprintf("--required=%d", threshold) + paramValue := fmt.Sprintf("--value=%dattofil", amtAtto) + out := clientCLI.RunCmd( + "msig", "create", + paramRequired, + paramDuration, + paramValue, + walletAddrs[0].String(), + walletAddrs[1].String(), + walletAddrs[2].String(), + ) + fmt.Println(out) + + // Extract msig robust address from output + expCreateOutPrefix := "Created new multisig:" + require.Regexp(t, regexp.MustCompile(expCreateOutPrefix), out) + parts := strings.Split(strings.TrimSpace(strings.Replace(out, expCreateOutPrefix, "", -1)), " ") + require.Len(t, parts, 2) + msigRobustAddr := parts[1] + fmt.Println("msig robust address:", msigRobustAddr) + + // Propose to add a new address to the msig + // msig add-propose --from= + paramFrom := fmt.Sprintf("--from=%s", walletAddrs[0]) + out = clientCLI.RunCmd( + "msig", "add-propose", + paramFrom, + msigRobustAddr, + walletAddrs[3].String(), + ) + fmt.Println(out) + + // msig inspect + out = clientCLI.RunCmd("msig", "inspect", "--vesting", "--decode-params", msigRobustAddr) + fmt.Println(out) + + // Expect correct balance + require.Regexp(t, regexp.MustCompile("Balance: 0.000000000000001 FIL"), out) + // Expect 1 transaction + require.Regexp(t, regexp.MustCompile(`Transactions:\s*1`), out) + // Expect transaction to be "AddSigner" + require.Regexp(t, regexp.MustCompile(`AddSigner`), out) + + // Approve adding the new address + // msig add-approve --from= 0 false + txnID := "0" + paramFrom = fmt.Sprintf("--from=%s", walletAddrs[1]) + out = clientCLI.RunCmd( + "msig", "add-approve", + paramFrom, + msigRobustAddr, + walletAddrs[0].String(), + txnID, + walletAddrs[3].String(), + "false", + ) + fmt.Println(out) +} diff --git a/cli/test/net.go b/cli/test/net.go new file mode 100644 index 000000000..836b81a8f --- /dev/null +++ b/cli/test/net.go @@ -0,0 +1,87 @@ +package test + +import ( + "context" + "testing" + "time" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/types" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/api/test" + test2 "github.com/filecoin-project/lotus/node/test" +) + +func StartOneNodeOneMiner(ctx context.Context, t *testing.T, blocktime time.Duration) (test.TestNode, address.Address) { + n, sn := test2.RPCMockSbBuilder(t, test.OneFull, test.OneMiner) + + full := n[0] + miner := sn[0] + + // Get everyone connected + addrs, err := full.NetAddrsListen(ctx) + if err != nil { + t.Fatal(err) + } + + if err := miner.NetConnect(ctx, addrs); err != nil { + t.Fatal(err) + } + + // Start mining blocks + bm := test.NewBlockMiner(ctx, t, miner, blocktime) + bm.MineBlocks() + + // Get the full node's wallet address + fullAddr, err := full.WalletDefaultAddress(ctx) + if err != nil { + t.Fatal(err) + } + + // Create mock CLI + return full, fullAddr +} + +func StartTwoNodesOneMiner(ctx context.Context, t *testing.T, blocktime time.Duration) ([]test.TestNode, []address.Address) { + n, sn := test2.RPCMockSbBuilder(t, test.TwoFull, test.OneMiner) + + fullNode1 := n[0] + fullNode2 := n[1] + miner := sn[0] + + // Get everyone connected + addrs, err := fullNode1.NetAddrsListen(ctx) + if err != nil { + t.Fatal(err) + } + + if err := fullNode2.NetConnect(ctx, addrs); err != nil { + t.Fatal(err) + } + + if err := miner.NetConnect(ctx, addrs); err != nil { + t.Fatal(err) + } + + // Start mining blocks + bm := test.NewBlockMiner(ctx, t, miner, blocktime) + bm.MineBlocks() + + // Send some funds to register the second node + fullNodeAddr2, err := fullNode2.WalletNew(ctx, types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + + test.SendFunds(ctx, t, fullNode1, fullNodeAddr2, abi.NewTokenAmount(1e18)) + + // Get the first node's address + fullNodeAddr1, err := fullNode1.WalletDefaultAddress(ctx) + if err != nil { + t.Fatal(err) + } + + // Create mock CLI + return n, []address.Address{fullNodeAddr1, fullNodeAddr2} +} diff --git a/cli/test/util.go b/cli/test/util.go new file mode 100644 index 000000000..e3930dc83 --- /dev/null +++ b/cli/test/util.go @@ -0,0 +1,12 @@ +package test + +import "github.com/ipfs/go-log/v2" + +func QuietMiningLogs() { + _ = log.SetLogLevel("miner", "ERROR") + _ = log.SetLogLevel("chainstore", "ERROR") + _ = log.SetLogLevel("chain", "ERROR") + _ = log.SetLogLevel("sub", "ERROR") + _ = log.SetLogLevel("storageminer", "ERROR") + _ = log.SetLogLevel("pubsub", "ERROR") +} diff --git a/cli/wallet.go b/cli/wallet.go index aa5b9bed3..f6368cbfa 100644 --- a/cli/wallet.go +++ b/cli/wallet.go @@ -13,11 +13,14 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/specs-actors/actors/builtin/market" + "github.com/filecoin-project/specs-actors/v2/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors" types "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/wallet" "github.com/filecoin-project/lotus/lib/tablewriter" ) @@ -35,6 +38,7 @@ var walletCmd = &cli.Command{ walletSign, walletVerify, walletDelete, + walletMarket, }, } @@ -55,7 +59,7 @@ var walletNew = &cli.Command{ t = "secp256k1" } - nk, err := api.WalletNew(ctx, wallet.ActSigType(t)) + nk, err := api.WalletNew(ctx, types.KeyType(t)) if err != nil { return err } @@ -75,6 +79,16 @@ var walletList = &cli.Command{ Usage: "Only print addresses", Aliases: []string{"a"}, }, + &cli.BoolFlag{ + Name: "id", + Usage: "Output ID addresses", + Aliases: []string{"i"}, + }, + &cli.BoolFlag{ + Name: "market", + Usage: "Output market balances", + Aliases: []string{"m"}, + }, }, Action: func(cctx *cli.Context) error { api, closer, err := GetFullNodeAPI(cctx) @@ -94,7 +108,10 @@ var walletList = &cli.Command{ tw := tablewriter.New( tablewriter.Col("Address"), + tablewriter.Col("ID"), tablewriter.Col("Balance"), + tablewriter.Col("Market(Avail)"), + tablewriter.Col("Market(Locked)"), tablewriter.Col("Nonce"), tablewriter.Col("Default"), tablewriter.NewLineCol("Error")) @@ -127,6 +144,23 @@ var walletList = &cli.Command{ row["Default"] = "X" } + if cctx.Bool("id") { + id, err := api.StateLookupID(ctx, addr, types.EmptyTSK) + if err != nil { + row["ID"] = "n/a" + } else { + row["ID"] = id + } + } + + if cctx.Bool("market") { + mbal, err := api.StateMarketBalance(ctx, addr, types.EmptyTSK) + if err == nil { + row["Market(Avail)"] = types.FIL(types.BigSub(mbal.Escrow, mbal.Locked)) + row["Market(Locked)"] = types.FIL(mbal.Locked) + } + } + tw.Write(row) } } @@ -329,9 +363,9 @@ var walletImport = &cli.Command{ ki.PrivateKey = gk.PrivateKey switch gk.SigType { case 1: - ki.Type = wallet.KTSecp256k1 + ki.Type = types.KTSecp256k1 case 2: - ki.Type = wallet.KTBLS + ki.Type = types.KTBLS default: return fmt.Errorf("unrecognized key type: %d", gk.SigType) } @@ -472,3 +506,107 @@ var walletDelete = &cli.Command{ return api.WalletDelete(ctx, addr) }, } + +var walletMarket = &cli.Command{ + Name: "market", + Usage: "Interact with market balances", + Subcommands: []*cli.Command{ + walletMarketWithdraw, + }, +} + +var walletMarketWithdraw = &cli.Command{ + Name: "withdraw", + Usage: "Withdraw funds from the Storage Market Actor", + ArgsUsage: "[amount (FIL) optional, otherwise will withdraw max available]", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "from", + Usage: "Specify address to withdraw funds from, otherwise it will use the default wallet address", + Aliases: []string{"f"}, + }, + &cli.StringFlag{ + Name: "address", + Usage: "Market address to withdraw from (account or miner actor address, defaults to --from address)", + Aliases: []string{"a"}, + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return xerrors.Errorf("getting node API: %w", err) + } + defer closer() + ctx := ReqContext(cctx) + + var from address.Address + if cctx.String("from") != "" { + from, err = address.NewFromString(cctx.String("from")) + if err != nil { + return xerrors.Errorf("parsing from address: %w", err) + } + } else { + from, err = api.WalletDefaultAddress(ctx) + if err != nil { + return xerrors.Errorf("getting default wallet address: %w", err) + } + } + + addr := from + if cctx.String("address") != "" { + addr, err = address.NewFromString(cctx.String("address")) + if err != nil { + return xerrors.Errorf("parsing market address: %w", err) + } + } + + bal, err := api.StateMarketBalance(ctx, addr, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting market balance for address %s: %w", addr.String(), err) + } + + avail := big.Subtract(bal.Escrow, bal.Locked) + amt := avail + + if cctx.Args().Present() { + f, err := types.ParseFIL(cctx.Args().First()) + if err != nil { + return xerrors.Errorf("parsing 'amount' argument: %w", err) + } + + amt = abi.TokenAmount(f) + } + + if amt.GreaterThan(avail) { + return xerrors.Errorf("can't withdraw more funds than available; requested: %s; available: %s", types.FIL(amt), types.FIL(avail)) + } + + if avail.IsZero() { + return xerrors.Errorf("zero unlocked funds available to withdraw") + } + + params, err := actors.SerializeParams(&market.WithdrawBalanceParams{ + ProviderOrClientAddress: addr, + Amount: amt, + }) + if err != nil { + return xerrors.Errorf("serializing params: %w", err) + } + + fmt.Printf("Submitting WithdrawBalance message for amount %s for address %s\n", types.FIL(amt), from.String()) + smsg, err := api.MpoolPushMessage(ctx, &types.Message{ + To: builtin.StorageMarketActorAddr, + From: from, + Value: types.NewInt(0), + Method: builtin.MethodsMarket.WithdrawBalance, + Params: params, + }, nil) + if err != nil { + return xerrors.Errorf("submitting WithdrawBalance message: %w", err) + } + + fmt.Printf("WithdrawBalance message cid: %s\n", smsg.Cid()) + + return nil + }, +} diff --git a/cmd/chain-noise/main.go b/cmd/chain-noise/main.go index 7b9824016..81586e1b2 100644 --- a/cmd/chain-noise/main.go +++ b/cmd/chain-noise/main.go @@ -7,8 +7,6 @@ import ( "os" "time" - "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/go-address" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" @@ -61,7 +59,7 @@ var runCmd = &cli.Command{ func sendSmallFundsTxs(ctx context.Context, api api.FullNode, from address.Address, rate int) error { var sendSet []address.Address for i := 0; i < 20; i++ { - naddr, err := api.WalletNew(ctx, crypto.SigTypeSecp256k1) + naddr, err := api.WalletNew(ctx, types.KTSecp256k1) if err != nil { return err } diff --git a/cmd/lotus-bench/caching_verifier.go b/cmd/lotus-bench/caching_verifier.go index 51ab696f7..5b434c762 100644 --- a/cmd/lotus-bench/caching_verifier.go +++ b/cmd/lotus-bench/caching_verifier.go @@ -7,7 +7,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" - "github.com/filecoin-project/specs-actors/actors/runtime/proof" + proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" "github.com/ipfs/go-datastore" "github.com/minio/blake2b-simd" cbg "github.com/whyrusleeping/cbor-gen" @@ -78,15 +78,16 @@ func (cv cachingVerifier) withCache(execute func() (bool, error), param cbg.CBOR } } -func (cv *cachingVerifier) VerifySeal(svi proof.SealVerifyInfo) (bool, error) { +func (cv *cachingVerifier) VerifySeal(svi proof2.SealVerifyInfo) (bool, error) { return cv.withCache(func() (bool, error) { return cv.backend.VerifySeal(svi) }, &svi) } -func (cv *cachingVerifier) VerifyWinningPoSt(ctx context.Context, info proof.WinningPoStVerifyInfo) (bool, error) { + +func (cv *cachingVerifier) VerifyWinningPoSt(ctx context.Context, info proof2.WinningPoStVerifyInfo) (bool, error) { return cv.backend.VerifyWinningPoSt(ctx, info) } -func (cv *cachingVerifier) VerifyWindowPoSt(ctx context.Context, info proof.WindowPoStVerifyInfo) (bool, error) { +func (cv *cachingVerifier) VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStVerifyInfo) (bool, error) { return cv.withCache(func() (bool, error) { return cv.backend.VerifyWindowPoSt(ctx, info) }, &info) diff --git a/cmd/lotus-bench/import.go b/cmd/lotus-bench/import.go index 1e6ce4352..a4200c447 100644 --- a/cmd/lotus-bench/import.go +++ b/cmd/lotus-bench/import.go @@ -26,7 +26,9 @@ import ( "github.com/filecoin-project/lotus/lib/blockstore" _ "github.com/filecoin-project/lotus/lib/sigs/bls" _ "github.com/filecoin-project/lotus/lib/sigs/secp" + metricsprometheus "github.com/ipfs/go-metrics-prometheus" "github.com/ipld/go-car" + "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" @@ -34,6 +36,7 @@ import ( bdg "github.com/dgraph-io/badger/v2" "github.com/ipfs/go-datastore" badger "github.com/ipfs/go-ds-badger2" + measure "github.com/ipfs/go-ds-measure" pebbleds "github.com/ipfs/go-ds-pebble" "github.com/urfave/cli/v2" @@ -89,8 +92,12 @@ var importBenchCmd = &cli.Command{ &cli.BoolFlag{ Name: "only-import", }, + &cli.BoolFlag{ + Name: "use-pebble", + }, }, Action: func(cctx *cli.Context) error { + metricsprometheus.Inject() //nolint:errcheck vm.BatchSealVerifyParallelism = cctx.Int("batch-seal-verify-threads") if !cctx.Args().Present() { fmt.Println("must pass car file of chain to benchmark importing") @@ -104,6 +111,7 @@ var importBenchCmd = &cli.Command{ defer cfi.Close() //nolint:errcheck // read only file go func() { + http.Handle("/debug/metrics/prometheus", promhttp.Handler()) http.ListenAndServe("localhost:6060", nil) //nolint:errcheck }() @@ -126,7 +134,7 @@ var importBenchCmd = &cli.Command{ bdgOpt.Options.DetectConflicts = false var bds datastore.Batching - if false { + if cctx.Bool("use-pebble") { cache := 512 bds, err = pebbleds.NewDatastore(tdir, &pebble.Options{ // Pebble has a single combined cache area and the write @@ -155,6 +163,8 @@ var importBenchCmd = &cli.Command{ } defer bds.Close() //nolint:errcheck + bds = measure.New("dsbench", bds) + bs := blockstore.NewBlockstore(bds) cacheOpts := blockstore.DefaultCacheOpts() cacheOpts.HasBloomFilterSize = 0 @@ -310,6 +320,21 @@ var importBenchCmd = &cli.Command{ pprof.StopCPUProfile() + if true { + resp, err := http.Get("http://localhost:6060/debug/metrics/prometheus") + if err != nil { + return err + } + + metricsfi, err := os.Create("import-bench.metrics") + if err != nil { + return err + } + + io.Copy(metricsfi, resp.Body) //nolint:errcheck + metricsfi.Close() //nolint:errcheck + } + return nil }, diff --git a/cmd/lotus-bench/main.go b/cmd/lotus-bench/main.go index d5708386c..62dd8c3fb 100644 --- a/cmd/lotus-bench/main.go +++ b/cmd/lotus-bench/main.go @@ -11,7 +11,7 @@ import ( "path/filepath" "time" - saproof "github.com/filecoin-project/specs-actors/actors/runtime/proof" + saproof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" "github.com/docker/go-units" logging "github.com/ipfs/go-log/v2" @@ -26,7 +26,7 @@ import ( lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper/basicfs" - "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" "github.com/filecoin-project/specs-storage/storage" lapi "github.com/filecoin-project/lotus/api" @@ -269,7 +269,7 @@ var sealBenchCmd = &cli.Command{ sectorNumber := c.Int("num-sectors") var sealTimings []SealingResult - var sealedSectors []saproof.SectorInfo + var sealedSectors []saproof2.SectorInfo if robench == "" { var err error @@ -305,7 +305,7 @@ var sealBenchCmd = &cli.Command{ } for _, s := range genm.Sectors { - sealedSectors = append(sealedSectors, saproof.SectorInfo{ + sealedSectors = append(sealedSectors, saproof2.SectorInfo{ SealedCID: s.CommR, SectorNumber: s.SectorID, SealProof: s.ProofType, @@ -339,7 +339,7 @@ var sealBenchCmd = &cli.Command{ return err } - candidates := make([]saproof.SectorInfo, len(fcandidates)) + candidates := make([]saproof2.SectorInfo, len(fcandidates)) for i, fcandidate := range fcandidates { candidates[i] = sealedSectors[fcandidate] } @@ -362,7 +362,7 @@ var sealBenchCmd = &cli.Command{ winnningpost2 := time.Now() - pvi1 := saproof.WinningPoStVerifyInfo{ + pvi1 := saproof2.WinningPoStVerifyInfo{ Randomness: abi.PoStRandomness(challenge[:]), Proofs: proof1, ChallengedSectors: candidates, @@ -378,7 +378,7 @@ var sealBenchCmd = &cli.Command{ verifyWinningPost1 := time.Now() - pvi2 := saproof.WinningPoStVerifyInfo{ + pvi2 := saproof2.WinningPoStVerifyInfo{ Randomness: abi.PoStRandomness(challenge[:]), Proofs: proof2, ChallengedSectors: candidates, @@ -410,7 +410,7 @@ var sealBenchCmd = &cli.Command{ windowpost2 := time.Now() - wpvi1 := saproof.WindowPoStVerifyInfo{ + wpvi1 := saproof2.WindowPoStVerifyInfo{ Randomness: challenge[:], Proofs: wproof1, ChallengedSectors: sealedSectors, @@ -426,7 +426,7 @@ var sealBenchCmd = &cli.Command{ verifyWindowpost1 := time.Now() - wpvi2 := saproof.WindowPoStVerifyInfo{ + wpvi2 := saproof2.WindowPoStVerifyInfo{ Randomness: challenge[:], Proofs: wproof2, ChallengedSectors: sealedSectors, @@ -498,10 +498,10 @@ type ParCfg struct { Commit int } -func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par ParCfg, mid abi.ActorID, sectorSize abi.SectorSize, ticketPreimage []byte, saveC2inp string, skipc2, skipunseal bool) ([]SealingResult, []saproof.SectorInfo, error) { +func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par ParCfg, mid abi.ActorID, sectorSize abi.SectorSize, ticketPreimage []byte, saveC2inp string, skipc2, skipunseal bool) ([]SealingResult, []saproof2.SectorInfo, error) { var pieces []abi.PieceInfo sealTimings := make([]SealingResult, numSectors) - sealedSectors := make([]saproof.SectorInfo, numSectors) + sealedSectors := make([]saproof2.SectorInfo, numSectors) preCommit2Sema := make(chan struct{}, par.PreCommit2) commitSema := make(chan struct{}, par.Commit) @@ -570,7 +570,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par precommit2 := time.Now() <-preCommit2Sema - sealedSectors[i] = saproof.SectorInfo{ + sealedSectors[i] = saproof2.SectorInfo{ SealProof: sb.SealProofType(), SectorNumber: i, SealedCID: cids.Sealed, @@ -622,7 +622,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par <-commitSema if !skipc2 { - svi := saproof.SealVerifyInfo{ + svi := saproof2.SealVerifyInfo{ SectorID: sid, SealedCID: cids.Sealed, SealProof: sb.SealProofType(), @@ -647,7 +647,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par if !skipunseal { log.Infof("[%d] Unsealing sector", i) { - p, done, err := sbfs.AcquireSector(context.TODO(), sid, stores.FTUnsealed, stores.FTNone, stores.PathSealing) + p, done, err := sbfs.AcquireSector(context.TODO(), sid, storiface.FTUnsealed, storiface.FTNone, storiface.PathSealing) if err != nil { return xerrors.Errorf("acquire unsealed sector for removing: %w", err) } diff --git a/cmd/lotus-chainwatch/processor/processor.go b/cmd/lotus-chainwatch/processor/processor.go index bce2b9fb7..1f8b246ed 100644 --- a/cmd/lotus-chainwatch/processor/processor.go +++ b/cmd/lotus-chainwatch/processor/processor.go @@ -15,7 +15,7 @@ import ( logging "github.com/ipfs/go-log/v2" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/specs-actors/actors/builtin" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/types" @@ -134,10 +134,10 @@ func (p *Processor) Start(ctx context.Context) { log.Fatalw("Failed to collect actor changes", "error", err) } log.Infow("Collected Actor Changes", - "MarketChanges", len(actorChanges[builtin.StorageMarketActorCodeID]), - "MinerChanges", len(actorChanges[builtin.StorageMinerActorCodeID]), - "RewardChanges", len(actorChanges[builtin.RewardActorCodeID]), - "AccountChanges", len(actorChanges[builtin.AccountActorCodeID]), + "MarketChanges", len(actorChanges[builtin2.StorageMarketActorCodeID]), + "MinerChanges", len(actorChanges[builtin2.StorageMinerActorCodeID]), + "RewardChanges", len(actorChanges[builtin2.RewardActorCodeID]), + "AccountChanges", len(actorChanges[builtin2.AccountActorCodeID]), "nullRounds", len(nullRounds)) grp := sync.WaitGroup{} @@ -145,7 +145,7 @@ func (p *Processor) Start(ctx context.Context) { grp.Add(1) go func() { defer grp.Done() - if err := p.HandleMarketChanges(ctx, actorChanges[builtin.StorageMarketActorCodeID]); err != nil { + if err := p.HandleMarketChanges(ctx, actorChanges[builtin2.StorageMarketActorCodeID]); err != nil { log.Errorf("Failed to handle market changes: %w", err) return } @@ -154,7 +154,7 @@ func (p *Processor) Start(ctx context.Context) { grp.Add(1) go func() { defer grp.Done() - if err := p.HandleMinerChanges(ctx, actorChanges[builtin.StorageMinerActorCodeID]); err != nil { + if err := p.HandleMinerChanges(ctx, actorChanges[builtin2.StorageMinerActorCodeID]); err != nil { log.Errorf("Failed to handle miner changes: %w", err) return } @@ -163,7 +163,7 @@ func (p *Processor) Start(ctx context.Context) { grp.Add(1) go func() { defer grp.Done() - if err := p.HandleRewardChanges(ctx, actorChanges[builtin.RewardActorCodeID], nullRounds); err != nil { + if err := p.HandleRewardChanges(ctx, actorChanges[builtin2.RewardActorCodeID], nullRounds); err != nil { log.Errorf("Failed to handle reward changes: %w", err) return } @@ -172,7 +172,7 @@ func (p *Processor) Start(ctx context.Context) { grp.Add(1) go func() { defer grp.Done() - if err := p.HandlePowerChanges(ctx, actorChanges[builtin.StoragePowerActorCodeID]); err != nil { + if err := p.HandlePowerChanges(ctx, actorChanges[builtin2.StoragePowerActorCodeID]); err != nil { log.Errorf("Failed to handle power actor changes: %w", err) return } diff --git a/cmd/lotus-chainwatch/syncer/sync.go b/cmd/lotus-chainwatch/syncer/sync.go index 609b71088..37af9cce0 100644 --- a/cmd/lotus-chainwatch/syncer/sync.go +++ b/cmd/lotus-chainwatch/syncer/sync.go @@ -316,7 +316,7 @@ limit 1 } func (s *Syncer) storeCirculatingSupply(ctx context.Context, tipset *types.TipSet) error { - supply, err := s.node.StateCirculatingSupply(ctx, tipset.Key()) + supply, err := s.node.StateVMCirculatingSupplyInternal(ctx, tipset.Key()) if err != nil { return err } diff --git a/cmd/lotus-gateway/api.go b/cmd/lotus-gateway/api.go index d5fac0a06..875eaac7d 100644 --- a/cmd/lotus-gateway/api.go +++ b/cmd/lotus-gateway/api.go @@ -6,17 +6,25 @@ import ( "time" "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/dline" + "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/lib/sigs" + _ "github.com/filecoin-project/lotus/lib/sigs/bls" + _ "github.com/filecoin-project/lotus/lib/sigs/secp" "github.com/filecoin-project/lotus/node/impl/full" "github.com/ipfs/go-cid" ) const ( - LookbackCap = time.Hour - stateWaitLookbackLimit = abi.ChainEpoch(20) + LookbackCap = time.Hour * 24 + StateWaitLookbackLimit = abi.ChainEpoch(20) ) var ( @@ -26,21 +34,57 @@ var ( // gatewayDepsAPI defines the API methods that the GatewayAPI depends on // (to make it easy to mock for tests) type gatewayDepsAPI interface { - ChainHead(ctx context.Context) (*types.TipSet, error) + Version(context.Context) (api.Version, error) + ChainGetBlockMessages(context.Context, cid.Cid) (*api.BlockMessages, error) + ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) + ChainGetNode(ctx context.Context, p string) (*api.IpldObject, error) ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) + ChainHasObj(context.Context, cid.Cid) (bool, error) + ChainHead(ctx context.Context) (*types.TipSet, error) + ChainNotify(context.Context) (<-chan []*api.HeadChange, error) + ChainReadObj(context.Context, cid.Cid) ([]byte, error) GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) MpoolPushUntrusted(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) + StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error) + StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error) StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) + StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) + StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error) + StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error) + StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error) StateWaitMsgLimited(ctx context.Context, msg cid.Cid, confidence uint64, h abi.ChainEpoch) (*api.MsgLookup, error) + StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*api.ActorState, error) + StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) + StateMinerFaults(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) + StateMinerRecoveries(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) + StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error) + StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) ([]api.Deadline, error) + StateMinerAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) + StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) + StateCirculatingSupply(context.Context, types.TipSetKey) (abi.TokenAmount, error) + StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) + StateVMCirculatingSupplyInternal(context.Context, types.TipSetKey) (api.CirculatingSupply, error) } type GatewayAPI struct { - api gatewayDepsAPI + api gatewayDepsAPI + lookbackCap time.Duration + stateWaitLookbackLimit abi.ChainEpoch +} + +// NewGatewayAPI creates a new GatewayAPI with the default lookback cap +func NewGatewayAPI(api gatewayDepsAPI) *GatewayAPI { + return newGatewayAPI(api, LookbackCap, StateWaitLookbackLimit) +} + +// used by the tests +func newGatewayAPI(api gatewayDepsAPI, lookbackCap time.Duration, stateWaitLookbackLimit abi.ChainEpoch) *GatewayAPI { + return &GatewayAPI{api: api, lookbackCap: lookbackCap, stateWaitLookbackLimit: stateWaitLookbackLimit} } func (a *GatewayAPI) checkTipsetKey(ctx context.Context, tsk types.TipSetKey) error { @@ -76,27 +120,53 @@ func (a *GatewayAPI) checkTipsetHeight(ts *types.TipSet, h abi.ChainEpoch) error } func (a *GatewayAPI) checkTimestamp(at time.Time) error { - if time.Since(at) > LookbackCap { + if time.Since(at) > a.lookbackCap { return ErrLookbackTooLong } return nil } +func (a *GatewayAPI) Version(ctx context.Context) (api.Version, error) { + return a.api.Version(ctx) +} + +func (a *GatewayAPI) ChainGetBlockMessages(ctx context.Context, c cid.Cid) (*api.BlockMessages, error) { + return a.api.ChainGetBlockMessages(ctx, c) +} + +func (a *GatewayAPI) ChainHasObj(ctx context.Context, c cid.Cid) (bool, error) { + return a.api.ChainHasObj(ctx, c) +} + func (a *GatewayAPI) ChainHead(ctx context.Context) (*types.TipSet, error) { // TODO: cache and invalidate cache when timestamp is up (or have internal ChainNotify) return a.api.ChainHead(ctx) } +func (a *GatewayAPI) ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) { + return a.api.ChainGetMessage(ctx, mc) +} + func (a *GatewayAPI) ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) { return a.api.ChainGetTipSet(ctx, tsk) } func (a *GatewayAPI) ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) { - ts, err := a.api.ChainGetTipSet(ctx, tsk) - if err != nil { - return nil, err + var ts *types.TipSet + if tsk.IsEmpty() { + head, err := a.api.ChainHead(ctx) + if err != nil { + return nil, err + } + ts = head + } else { + gts, err := a.api.ChainGetTipSet(ctx, tsk) + if err != nil { + return nil, err + } + ts = gts } // Check if the tipset key refers to a tipset that's too far in the past @@ -112,6 +182,18 @@ func (a *GatewayAPI) ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoc return a.api.ChainGetTipSetByHeight(ctx, h, tsk) } +func (a *GatewayAPI) ChainGetNode(ctx context.Context, p string) (*api.IpldObject, error) { + return a.api.ChainGetNode(ctx, p) +} + +func (a *GatewayAPI) ChainNotify(ctx context.Context) (<-chan []*api.HeadChange, error) { + return a.api.ChainNotify(ctx) +} + +func (a *GatewayAPI) ChainReadObj(ctx context.Context, c cid.Cid) ([]byte, error) { + return a.api.ChainReadObj(ctx, c) +} + func (a *GatewayAPI) GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) { if err := a.checkTipsetKey(ctx, tsk); err != nil { return nil, err @@ -152,6 +234,14 @@ func (a *GatewayAPI) StateAccountKey(ctx context.Context, addr address.Address, return a.api.StateAccountKey(ctx, addr, tsk) } +func (a *GatewayAPI) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) { + if err := a.checkTipsetKey(ctx, tsk); err != nil { + return api.DealCollateralBounds{}, err + } + + return a.api.StateDealProviderCollateralBounds(ctx, size, verified, tsk) +} + func (a *GatewayAPI) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) { if err := a.checkTipsetKey(ctx, tsk); err != nil { return nil, err @@ -160,6 +250,22 @@ func (a *GatewayAPI) StateGetActor(ctx context.Context, actor address.Address, t return a.api.StateGetActor(ctx, actor, tsk) } +func (a *GatewayAPI) StateGetReceipt(ctx context.Context, c cid.Cid, tsk types.TipSetKey) (*types.MessageReceipt, error) { + if err := a.checkTipsetKey(ctx, tsk); err != nil { + return nil, err + } + + return a.api.StateGetReceipt(ctx, c, tsk) +} + +func (a *GatewayAPI) StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) { + if err := a.checkTipsetKey(ctx, tsk); err != nil { + return nil, err + } + + return a.api.StateListMiners(ctx, tsk) +} + func (a *GatewayAPI) StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) { if err := a.checkTipsetKey(ctx, tsk); err != nil { return address.Undef, err @@ -168,8 +274,113 @@ func (a *GatewayAPI) StateLookupID(ctx context.Context, addr address.Address, ts return a.api.StateLookupID(ctx, addr, tsk) } +func (a *GatewayAPI) StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error) { + if err := a.checkTipsetKey(ctx, tsk); err != nil { + return api.MarketBalance{}, err + } + + return a.api.StateMarketBalance(ctx, addr, tsk) +} + +func (a *GatewayAPI) StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error) { + if err := a.checkTipsetKey(ctx, tsk); err != nil { + return nil, err + } + + return a.api.StateMarketStorageDeal(ctx, dealId, tsk) +} + +func (a *GatewayAPI) StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (network.Version, error) { + if err := a.checkTipsetKey(ctx, tsk); err != nil { + return network.VersionMax, err + } + + return a.api.StateNetworkVersion(ctx, tsk) +} + func (a *GatewayAPI) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error) { - return a.api.StateWaitMsgLimited(ctx, msg, confidence, stateWaitLookbackLimit) + return a.api.StateWaitMsgLimited(ctx, msg, confidence, a.stateWaitLookbackLimit) +} + +func (a *GatewayAPI) StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*api.ActorState, error) { + if err := a.checkTipsetKey(ctx, tsk); err != nil { + return nil, err + } + return a.api.StateReadState(ctx, actor, tsk) +} + +func (a *GatewayAPI) StateMinerPower(ctx context.Context, m address.Address, tsk types.TipSetKey) (*api.MinerPower, error) { + if err := a.checkTipsetKey(ctx, tsk); err != nil { + return nil, err + } + return a.api.StateMinerPower(ctx, m, tsk) +} + +func (a *GatewayAPI) StateMinerFaults(ctx context.Context, m address.Address, tsk types.TipSetKey) (bitfield.BitField, error) { + if err := a.checkTipsetKey(ctx, tsk); err != nil { + return bitfield.BitField{}, err + } + return a.api.StateMinerFaults(ctx, m, tsk) +} +func (a *GatewayAPI) StateMinerRecoveries(ctx context.Context, m address.Address, tsk types.TipSetKey) (bitfield.BitField, error) { + if err := a.checkTipsetKey(ctx, tsk); err != nil { + return bitfield.BitField{}, err + } + return a.api.StateMinerRecoveries(ctx, m, tsk) +} + +func (a *GatewayAPI) StateMinerInfo(ctx context.Context, m address.Address, tsk types.TipSetKey) (miner.MinerInfo, error) { + if err := a.checkTipsetKey(ctx, tsk); err != nil { + return miner.MinerInfo{}, err + } + return a.api.StateMinerInfo(ctx, m, tsk) +} + +func (a *GatewayAPI) StateMinerDeadlines(ctx context.Context, m address.Address, tsk types.TipSetKey) ([]api.Deadline, error) { + if err := a.checkTipsetKey(ctx, tsk); err != nil { + return nil, err + } + return a.api.StateMinerDeadlines(ctx, m, tsk) +} + +func (a *GatewayAPI) StateMinerAvailableBalance(ctx context.Context, m address.Address, tsk types.TipSetKey) (types.BigInt, error) { + if err := a.checkTipsetKey(ctx, tsk); err != nil { + return types.BigInt{}, err + } + return a.api.StateMinerAvailableBalance(ctx, m, tsk) +} + +func (a *GatewayAPI) StateMinerProvingDeadline(ctx context.Context, m address.Address, tsk types.TipSetKey) (*dline.Info, error) { + if err := a.checkTipsetKey(ctx, tsk); err != nil { + return nil, err + } + return a.api.StateMinerProvingDeadline(ctx, m, tsk) +} + +func (a *GatewayAPI) StateCirculatingSupply(ctx context.Context, tsk types.TipSetKey) (abi.TokenAmount, error) { + if err := a.checkTipsetKey(ctx, tsk); err != nil { + return types.BigInt{}, err + } + return a.api.StateCirculatingSupply(ctx, tsk) + +} + +func (a *GatewayAPI) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) { + if err := a.checkTipsetKey(ctx, tsk); err != nil { + return nil, err + } + return a.api.StateVerifiedClientStatus(ctx, addr, tsk) +} + +func (a *GatewayAPI) StateVMCirculatingSupplyInternal(ctx context.Context, tsk types.TipSetKey) (api.CirculatingSupply, error) { + if err := a.checkTipsetKey(ctx, tsk); err != nil { + return api.CirculatingSupply{}, err + } + return a.api.StateVMCirculatingSupplyInternal(ctx, tsk) +} + +func (a *GatewayAPI) WalletVerify(ctx context.Context, k address.Address, msg []byte, sig *crypto.Signature) (bool, error) { + return sigs.Verify(sig, k, msg) == nil, nil } var _ api.GatewayAPI = (*GatewayAPI)(nil) diff --git a/cmd/lotus-gateway/api_test.go b/cmd/lotus-gateway/api_test.go index f34f887f5..23d2cbf3a 100644 --- a/cmd/lotus-gateway/api_test.go +++ b/cmd/lotus-gateway/api_test.go @@ -6,6 +6,9 @@ import ( "testing" "time" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/build" "github.com/stretchr/testify/require" @@ -88,7 +91,7 @@ func TestGatewayAPIChainGetTipSetByHeight(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { mock := &mockGatewayDepsAPI{} - a := &GatewayAPI{api: mock} + a := NewGatewayAPI(mock) // Create tipsets from genesis up to tskh and return the highest ts := mock.createTipSets(tt.args.tskh, tt.args.genesisTS) @@ -107,6 +110,45 @@ func TestGatewayAPIChainGetTipSetByHeight(t *testing.T) { type mockGatewayDepsAPI struct { lk sync.RWMutex tipsets []*types.TipSet + + gatewayDepsAPI // satisfies all interface requirements but will panic if + // methods are called. easier than filling out with panic stubs IMO +} + +func (m *mockGatewayDepsAPI) ChainHasObj(context.Context, cid.Cid) (bool, error) { + panic("implement me") +} + +func (m *mockGatewayDepsAPI) ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) { + panic("implement me") +} + +func (m *mockGatewayDepsAPI) ChainReadObj(ctx context.Context, c cid.Cid) ([]byte, error) { + panic("implement me") +} + +func (m *mockGatewayDepsAPI) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) { + panic("implement me") +} + +func (m *mockGatewayDepsAPI) StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) { + panic("implement me") +} + +func (m *mockGatewayDepsAPI) StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error) { + panic("implement me") +} + +func (m *mockGatewayDepsAPI) StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error) { + panic("implement me") +} + +func (m *mockGatewayDepsAPI) StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error) { + panic("implement me") +} + +func (m *mockGatewayDepsAPI) StateNetworkVersion(ctx context.Context, key types.TipSetKey) (network.Version, error) { + panic("implement me") } func (m *mockGatewayDepsAPI) ChainHead(ctx context.Context) (*types.TipSet, error) { @@ -189,3 +231,7 @@ func (m *mockGatewayDepsAPI) StateLookupID(ctx context.Context, addr address.Add func (m *mockGatewayDepsAPI) StateWaitMsgLimited(ctx context.Context, msg cid.Cid, confidence uint64, h abi.ChainEpoch) (*api.MsgLookup, error) { panic("implement me") } + +func (m *mockGatewayDepsAPI) StateReadState(ctx context.Context, act address.Address, ts types.TipSetKey) (*api.ActorState, error) { + panic("implement me") +} diff --git a/cmd/lotus-gateway/endtoend_test.go b/cmd/lotus-gateway/endtoend_test.go index 206034968..1e1e5e229 100644 --- a/cmd/lotus-gateway/endtoend_test.go +++ b/cmd/lotus-gateway/endtoend_test.go @@ -4,12 +4,16 @@ import ( "bytes" "context" "fmt" + "math" "os" "testing" "time" - init0 "github.com/filecoin-project/specs-actors/actors/builtin/init" - "github.com/filecoin-project/specs-actors/actors/builtin/multisig" + "github.com/filecoin-project/lotus/cli" + clitest "github.com/filecoin-project/lotus/cli/test" + + init2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/init" + multisig2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig" "github.com/stretchr/testify/require" "golang.org/x/xerrors" @@ -21,27 +25,34 @@ import ( "github.com/filecoin-project/lotus/api/client" "github.com/filecoin-project/lotus/api/test" "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/wallet" "github.com/filecoin-project/lotus/node" builder "github.com/filecoin-project/lotus/node/test" ) +const maxLookbackCap = time.Duration(math.MaxInt64) +const maxStateWaitLookbackLimit = stmgr.LookbackNoLimit + func init() { policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048)) policy.SetMinVerifiedDealSize(abi.NewStoragePower(256)) } -// TestEndToEnd tests that API calls can be made on a lite node that is -// connected through a gateway to a full API node -func TestEndToEnd(t *testing.T) { +// TestWalletMsig tests that API calls to wallet and msig can be made on a lite +// node that is connected through a gateway to a full API node +func TestWalletMsig(t *testing.T) { _ = os.Setenv("BELLMAN_NO_GPU", "1") + clitest.QuietMiningLogs() blocktime := 5 * time.Millisecond ctx := context.Background() - full, lite, closer := startNodes(ctx, t, blocktime) - defer closer() + nodes := startNodes(ctx, t, blocktime, maxLookbackCap, maxStateWaitLookbackLimit) + defer nodes.closer() + + lite := nodes.lite + full := nodes.full // The full node starts with a wallet fullWalletAddr, err := full.WalletDefaultAddress(ctx) @@ -53,15 +64,15 @@ func TestEndToEnd(t *testing.T) { fmt.Println(balance) // Create a wallet on the lite node - liteWalletAddr, err := lite.WalletNew(ctx, wallet.ActSigType("secp256k1")) + liteWalletAddr, err := lite.WalletNew(ctx, types.KTSecp256k1) require.NoError(t, err) // Send some funds from the full node to the lite node - err = sendFunds(ctx, t, full, fullWalletAddr, liteWalletAddr, types.NewInt(1e18)) + err = sendFunds(ctx, full, fullWalletAddr, liteWalletAddr, types.NewInt(1e18)) require.NoError(t, err) // Send some funds from the lite node back to the full node - err = sendFunds(ctx, t, lite, liteWalletAddr, fullWalletAddr, types.NewInt(100)) + err = sendFunds(ctx, lite, liteWalletAddr, fullWalletAddr, types.NewInt(100)) require.NoError(t, err) // Sign some data with the lite node wallet address @@ -77,12 +88,12 @@ func TestEndToEnd(t *testing.T) { // Create some wallets on the lite node to use for testing multisig var walletAddrs []address.Address for i := 0; i < 4; i++ { - addr, err := lite.WalletNew(ctx, wallet.ActSigType("secp256k1")) + addr, err := lite.WalletNew(ctx, types.KTSecp256k1) require.NoError(t, err) walletAddrs = append(walletAddrs, addr) - err = sendFunds(ctx, t, lite, liteWalletAddr, addr, types.NewInt(1e15)) + err = sendFunds(ctx, lite, liteWalletAddr, addr, types.NewInt(1e15)) require.NoError(t, err) } @@ -96,7 +107,7 @@ func TestEndToEnd(t *testing.T) { require.NoError(t, err) require.EqualValues(t, 0, res.Receipt.ExitCode) - var execReturn init0.ExecReturn + var execReturn init2.ExecReturn err = execReturn.UnmarshalCBOR(bytes.NewReader(res.Receipt.Return)) require.NoError(t, err) @@ -116,7 +127,7 @@ func TestEndToEnd(t *testing.T) { require.NoError(t, err) require.EqualValues(t, 0, res.Receipt.ExitCode) - var proposeReturn multisig.ProposeReturn + var proposeReturn multisig2.ProposeReturn err = proposeReturn.UnmarshalCBOR(bytes.NewReader(res.Receipt.Return)) require.NoError(t, err) @@ -130,36 +141,89 @@ func TestEndToEnd(t *testing.T) { require.NoError(t, err) require.EqualValues(t, 0, res.Receipt.ExitCode) - var approveReturn multisig.ApproveReturn + var approveReturn multisig2.ApproveReturn err = approveReturn.UnmarshalCBOR(bytes.NewReader(res.Receipt.Return)) require.NoError(t, err) require.True(t, approveReturn.Applied) } -func sendFunds(ctx context.Context, t *testing.T, fromNode test.TestNode, fromAddr address.Address, toAddr address.Address, amt types.BigInt) error { - msg := &types.Message{ - From: fromAddr, - To: toAddr, - Value: amt, - } +// TestMsigCLI tests that msig CLI calls can be made +// on a lite node that is connected through a gateway to a full API node +func TestMsigCLI(t *testing.T) { + _ = os.Setenv("BELLMAN_NO_GPU", "1") + clitest.QuietMiningLogs() - sm, err := fromNode.MpoolPushMessage(ctx, msg, nil) - if err != nil { - return err - } + blocktime := 5 * time.Millisecond + ctx := context.Background() + nodes := startNodesWithFunds(ctx, t, blocktime, maxLookbackCap, maxStateWaitLookbackLimit) + defer nodes.closer() - res, err := fromNode.StateWaitMsg(ctx, sm.Cid(), 1) - if err != nil { - return err - } - if res.Receipt.ExitCode != 0 { - return xerrors.Errorf("send funds failed with exit code %d", res.Receipt.ExitCode) - } - - return nil + lite := nodes.lite + clitest.RunMultisigTest(t, cli.Commands, lite) } -func startNodes(ctx context.Context, t *testing.T, blocktime time.Duration) (test.TestNode, test.TestNode, jsonrpc.ClientCloser) { +func TestDealFlow(t *testing.T) { + _ = os.Setenv("BELLMAN_NO_GPU", "1") + clitest.QuietMiningLogs() + + blocktime := 5 * time.Millisecond + ctx := context.Background() + nodes := startNodesWithFunds(ctx, t, blocktime, maxLookbackCap, maxStateWaitLookbackLimit) + defer nodes.closer() + + test.MakeDeal(t, ctx, 6, nodes.lite, nodes.miner, false, false) +} + +func TestCLIDealFlow(t *testing.T) { + _ = os.Setenv("BELLMAN_NO_GPU", "1") + clitest.QuietMiningLogs() + + blocktime := 5 * time.Millisecond + ctx := context.Background() + nodes := startNodesWithFunds(ctx, t, blocktime, maxLookbackCap, maxStateWaitLookbackLimit) + defer nodes.closer() + + clitest.RunClientTest(t, cli.Commands, nodes.lite) +} + +type testNodes struct { + lite test.TestNode + full test.TestNode + miner test.TestStorageNode + closer jsonrpc.ClientCloser +} + +func startNodesWithFunds( + ctx context.Context, + t *testing.T, + blocktime time.Duration, + lookbackCap time.Duration, + stateWaitLookbackLimit abi.ChainEpoch, +) *testNodes { + nodes := startNodes(ctx, t, blocktime, lookbackCap, stateWaitLookbackLimit) + + // The full node starts with a wallet + fullWalletAddr, err := nodes.full.WalletDefaultAddress(ctx) + require.NoError(t, err) + + // Create a wallet on the lite node + liteWalletAddr, err := nodes.lite.WalletNew(ctx, types.KTSecp256k1) + require.NoError(t, err) + + // Send some funds from the full node to the lite node + err = sendFunds(ctx, nodes.full, fullWalletAddr, liteWalletAddr, types.NewInt(1e18)) + require.NoError(t, err) + + return nodes +} + +func startNodes( + ctx context.Context, + t *testing.T, + blocktime time.Duration, + lookbackCap time.Duration, + stateWaitLookbackLimit abi.ChainEpoch, +) *testNodes { var closer jsonrpc.ClientCloser // Create one miner and two full nodes. @@ -176,7 +240,8 @@ func startNodes(ctx context.Context, t *testing.T, blocktime time.Duration) (tes fullNode := nodes[0] // Create a gateway server in front of the full node - _, addr, err := builder.CreateRPCServer(&GatewayAPI{api: fullNode}) + gapiImpl := newGatewayAPI(fullNode, lookbackCap, stateWaitLookbackLimit) + _, addr, err := builder.CreateRPCServer(gapiImpl) require.NoError(t, err) // Create a gateway client API that connects to the gateway server @@ -203,9 +268,39 @@ func startNodes(ctx context.Context, t *testing.T, blocktime time.Duration) (tes err = miner.NetConnect(ctx, fullAddr) require.NoError(t, err) + // Connect the miner and the lite node (so that the lite node can send + // data to the miner) + liteAddr, err := lite.NetAddrsListen(ctx) + require.NoError(t, err) + err = miner.NetConnect(ctx, liteAddr) + require.NoError(t, err) + // Start mining blocks bm := test.NewBlockMiner(ctx, t, miner, blocktime) bm.MineBlocks() - return full, lite, closer + return &testNodes{lite: lite, full: full, miner: miner, closer: closer} +} + +func sendFunds(ctx context.Context, fromNode test.TestNode, fromAddr address.Address, toAddr address.Address, amt types.BigInt) error { + msg := &types.Message{ + From: fromAddr, + To: toAddr, + Value: amt, + } + + sm, err := fromNode.MpoolPushMessage(ctx, msg, nil) + if err != nil { + return err + } + + res, err := fromNode.StateWaitMsg(ctx, sm.Cid(), 1) + if err != nil { + return err + } + if res.Receipt.ExitCode != 0 { + return xerrors.Errorf("send funds failed with exit code %d", res.Receipt.ExitCode) + } + + return nil } diff --git a/cmd/lotus-gateway/main.go b/cmd/lotus-gateway/main.go index c19599084..3fed88468 100644 --- a/cmd/lotus-gateway/main.go +++ b/cmd/lotus-gateway/main.go @@ -7,10 +7,15 @@ import ( "os" "github.com/filecoin-project/go-jsonrpc" + "go.opencensus.io/tag" + "github.com/filecoin-project/lotus/build" lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/lib/lotuslog" + "github.com/filecoin-project/lotus/metrics" + logging "github.com/ipfs/go-log" + "go.opencensus.io/stats/view" "github.com/gorilla/mux" "github.com/urfave/cli/v2" @@ -64,6 +69,13 @@ var runCmd = &cli.Command{ ctx, cancel := context.WithCancel(ctx) defer cancel() + // Register all metric views + if err := view.Register( + metrics.DefaultViews..., + ); err != nil { + log.Fatalf("Cannot register the view: %v", err) + } + api, closer, err := lcli.GetFullNodeAPI(cctx) if err != nil { return err @@ -76,7 +88,7 @@ var runCmd = &cli.Command{ log.Info("Setting up API endpoint at " + address) rpcServer := jsonrpc.NewServer() - rpcServer.Register("Filecoin", &GatewayAPI{api: api}) + rpcServer.Register("Filecoin", metrics.MetricedGatewayAPI(NewGatewayAPI(api))) mux.Handle("/rpc/v0", rpcServer) mux.PathPrefix("/").Handler(http.DefaultServeMux) @@ -89,6 +101,7 @@ var runCmd = &cli.Command{ srv := &http.Server{ Handler: mux, BaseContext: func(listener net.Listener) context.Context { + ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-gateway")) return ctx }, } diff --git a/cmd/lotus-keygen/main.go b/cmd/lotus-keygen/main.go index 4b971cf48..d296cb5da 100644 --- a/cmd/lotus-keygen/main.go +++ b/cmd/lotus-keygen/main.go @@ -5,7 +5,7 @@ import ( "fmt" "os" - "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/wallet" _ "github.com/filecoin-project/lotus/lib/sigs/bls" _ "github.com/filecoin-project/lotus/lib/sigs/secp" @@ -30,12 +30,12 @@ func main() { return err } - var kt crypto.SigType + var kt types.KeyType switch cctx.String("type") { case "bls": - kt = crypto.SigTypeBLS + kt = types.KTBLS case "secp256k1": - kt = crypto.SigTypeSecp256k1 + kt = types.KTSecp256k1 default: return fmt.Errorf("unrecognized key type: %q", cctx.String("type")) } diff --git a/cmd/lotus-pcr/main.go b/cmd/lotus-pcr/main.go index 5491e4af2..8ee79b44a 100644 --- a/cmd/lotus-pcr/main.go +++ b/cmd/lotus-pcr/main.go @@ -6,19 +6,19 @@ import ( "context" "encoding/csv" "fmt" + "io" "io/ioutil" "net/http" _ "net/http/pprof" "os" "path/filepath" "strconv" + "strings" "time" "github.com/filecoin-project/lotus/chain/actors/builtin" - builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" - - miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" "github.com/filecoin-project/go-state-types/network" @@ -251,6 +251,15 @@ var recoverMinersCmd = &cli.Command{ } defer closer() + r, err := NewRepo(cctx.String("repo")) + if err != nil { + return err + } + + if err := r.Open(); err != nil { + return err + } + from, err := address.NewFromString(cctx.String("from")) if err != nil { return xerrors.Errorf("parsing source address (provide correct --from flag!): %w", err) @@ -267,6 +276,12 @@ var recoverMinersCmd = &cli.Command{ minerRecoveryCutoff := uint64(cctx.Int("miner-recovery-cutoff")) minerRecoveryBonus := uint64(cctx.Int("miner-recovery-bonus")) + blockmap := make(map[address.Address]struct{}) + + for _, addr := range r.Blocklist() { + blockmap[addr] = struct{}{} + } + rf := &refunder{ api: api, wallet: from, @@ -274,6 +289,7 @@ var recoverMinersCmd = &cli.Command{ minerRecoveryRefundPercent: minerRecoveryRefundPercent, minerRecoveryCutoff: types.FromFil(minerRecoveryCutoff), minerRecoveryBonus: types.FromFil(minerRecoveryBonus), + blockmap: blockmap, } refundTipset, err := api.ChainHead(ctx) @@ -466,6 +482,12 @@ var runCmd = &cli.Command{ return err } + blockmap := make(map[address.Address]struct{}) + + for _, addr := range r.Blocklist() { + blockmap[addr] = struct{}{} + } + rf := &refunder{ api: api, wallet: from, @@ -480,13 +502,18 @@ var runCmd = &cli.Command{ publishStorageDealsEnabled: publishStorageDealsEnabled, preFeeCapMax: types.BigInt(preFeeCapMax), proveFeeCapMax: types.BigInt(proveFeeCapMax), + blockmap: blockmap, } - var refunds *MinersRefund = NewMinersRefund() - var rounds int = 0 + var refunds = NewMinersRefund() + var rounds = 0 nextMinerRecovery := r.MinerRecoveryHeight() + minerRecoveryPeriod for tipset := range tipsetsCh { + for k := range rf.blockmap { + fmt.Printf("%s\n", k) + } + refunds, err = rf.ProcessTipset(ctx, tipset, refunds) if err != nil { return err @@ -634,6 +661,7 @@ type refunder struct { windowedPoStEnabled bool publishStorageDealsEnabled bool threshold big.Int + blockmap map[address.Address]struct{} preFeeCapMax big.Int proveFeeCapMax big.Int @@ -738,6 +766,11 @@ func (r *refunder) EnsureMinerMinimums(ctx context.Context, tipset *types.TipSet } for _, maddr := range miners { + if _, found := r.blockmap[maddr]; found { + log.Debugw("skipping blocked miner", "height", tipset.Height(), "key", tipset.Key(), "miner", maddr) + continue + } + mact, err := r.api.StateGetActor(ctx, maddr, types.EmptyTSK) if err != nil { log.Errorw("failed", "err", err, "height", tipset.Height(), "key", tipset.Key(), "miner", maddr) @@ -871,7 +904,7 @@ func (r *refunder) processTipsetStorageMarketActor(ctx context.Context, tipset * var messageMethod string switch m.Method { - case builtin0.MethodsMarket.PublishStorageDeals: + case market.Methods.PublishStorageDeals: if !r.publishStorageDealsEnabled { return false, messageMethod, types.NewInt(0), nil } @@ -884,6 +917,8 @@ func (r *refunder) processTipsetStorageMarketActor(ctx context.Context, tipset * } refundValue = types.BigMul(types.NewInt(uint64(recp.GasUsed)), tipset.Blocks()[0].ParentBaseFee) + default: + return false, messageMethod, types.NewInt(0), nil } return true, messageMethod, refundValue, nil @@ -895,8 +930,13 @@ func (r *refunder) processTipsetStorageMinerActor(ctx context.Context, tipset *t refundValue := types.NewInt(0) var messageMethod string + if _, found := r.blockmap[m.To]; found { + log.Debugw("skipping blocked miner", "height", tipset.Height(), "key", tipset.Key(), "miner", m.To) + return false, messageMethod, types.NewInt(0), nil + } + switch m.Method { - case builtin0.MethodsMiner.SubmitWindowedPoSt: + case miner.Methods.SubmitWindowedPoSt: if !r.windowedPoStEnabled { return false, messageMethod, types.NewInt(0), nil } @@ -909,7 +949,7 @@ func (r *refunder) processTipsetStorageMinerActor(ctx context.Context, tipset *t } refundValue = types.BigMul(types.NewInt(uint64(recp.GasUsed)), tipset.Blocks()[0].ParentBaseFee) - case builtin0.MethodsMiner.ProveCommitSector: + case miner.Methods.ProveCommitSector: if !r.proveCommitEnabled { return false, messageMethod, types.NewInt(0), nil } @@ -926,9 +966,14 @@ func (r *refunder) processTipsetStorageMinerActor(ctx context.Context, tipset *t return false, messageMethod, types.NewInt(0), nil } + if tipset.Blocks()[0].ParentBaseFee.GreaterThan(r.proveFeeCapMax) { + log.Debugw("skipping high base fee message", "method", messageMethod, "cid", msg.Cid, "miner", m.To, "basefee", tipset.Blocks()[0].ParentBaseFee, "fee_cap_max", r.proveFeeCapMax) + return false, messageMethod, types.NewInt(0), nil + } + var sn abi.SectorNumber - var proveCommitSector miner0.ProveCommitSectorParams + var proveCommitSector miner2.ProveCommitSectorParams if err := proveCommitSector.UnmarshalCBOR(bytes.NewBuffer(m.Params)); err != nil { log.Warnw("failed to decode provecommit params", "err", err, "method", messageMethod, "cid", msg.Cid, "miner", m.To) return false, messageMethod, types.NewInt(0), nil @@ -965,7 +1010,7 @@ func (r *refunder) processTipsetStorageMinerActor(ctx context.Context, tipset *t if r.refundPercent > 0 { refundValue = types.BigMul(types.BigDiv(refundValue, types.NewInt(100)), types.NewInt(uint64(r.refundPercent))) } - case builtin0.MethodsMiner.PreCommitSector: + case miner.Methods.PreCommitSector: if !r.preCommitEnabled { return false, messageMethod, types.NewInt(0), nil } @@ -982,6 +1027,11 @@ func (r *refunder) processTipsetStorageMinerActor(ctx context.Context, tipset *t return false, messageMethod, types.NewInt(0), nil } + if tipset.Blocks()[0].ParentBaseFee.GreaterThan(r.preFeeCapMax) { + log.Debugw("skipping high base fee message", "method", messageMethod, "cid", msg.Cid, "miner", m.To, "basefee", tipset.Blocks()[0].ParentBaseFee, "fee_cap_max", r.preFeeCapMax) + return false, messageMethod, types.NewInt(0), nil + } + var precommitInfo miner.SectorPreCommitInfo if err := precommitInfo.UnmarshalCBOR(bytes.NewBuffer(m.Params)); err != nil { log.Warnw("failed to decode precommit params", "err", err, "method", messageMethod, "cid", msg.Cid, "miner", m.To) @@ -1165,6 +1215,7 @@ type Repo struct { lastHeight abi.ChainEpoch lastMinerRecoveryHeight abi.ChainEpoch path string + blocklist []address.Address } func NewRepo(path string) (*Repo, error) { @@ -1220,6 +1271,10 @@ func (r *Repo) Open() error { return err } + if err := r.loadBlockList(); err != nil { + return err + } + return nil } @@ -1245,6 +1300,51 @@ func loadChainEpoch(fn string) (abi.ChainEpoch, error) { return abi.ChainEpoch(height), nil } +func (r *Repo) loadBlockList() error { + var err error + fpath := filepath.Join(r.path, "blocklist") + f, err := os.OpenFile(fpath, os.O_RDWR|os.O_CREATE, 0644) + if err != nil { + return err + } + defer func() { + err = f.Close() + }() + + blocklist := []address.Address{} + input := bufio.NewReader(f) + for { + stra, errR := input.ReadString('\n') + stra = strings.TrimSpace(stra) + + if len(stra) == 0 { + if errR == io.EOF { + break + } + continue + } + + addr, err := address.NewFromString(stra) + if err != nil { + return err + } + + blocklist = append(blocklist, addr) + + if errR != nil && errR != io.EOF { + return err + } + + if errR == io.EOF { + break + } + } + + r.blocklist = blocklist + + return nil +} + func (r *Repo) loadHeight() error { var err error r.lastHeight, err = loadChainEpoch(filepath.Join(r.path, "height")) @@ -1257,6 +1357,10 @@ func (r *Repo) loadMinerRecoveryHeight() error { return err } +func (r *Repo) Blocklist() []address.Address { + return r.blocklist +} + func (r *Repo) Height() abi.ChainEpoch { return r.lastHeight } diff --git a/cmd/lotus-seal-worker/cli.go b/cmd/lotus-seal-worker/cli.go new file mode 100644 index 000000000..b1501fca7 --- /dev/null +++ b/cmd/lotus-seal-worker/cli.go @@ -0,0 +1,51 @@ +package main + +import ( + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + lcli "github.com/filecoin-project/lotus/cli" +) + +var setCmd = &cli.Command{ + Name: "set", + Usage: "Manage worker settings", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "enabled", + Usage: "enable/disable new task processing", + Value: true, + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetWorkerAPI(cctx) + if err != nil { + return err + } + defer closer() + + ctx := lcli.ReqContext(cctx) + + if err := api.SetEnabled(ctx, cctx.Bool("enabled")); err != nil { + return xerrors.Errorf("SetEnabled: %w", err) + } + + return nil + }, +} + +var waitQuietCmd = &cli.Command{ + Name: "wait-quiet", + Usage: "Block until all running tasks exit", + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetWorkerAPI(cctx) + if err != nil { + return err + } + defer closer() + + ctx := lcli.ReqContext(cctx) + + return api.WaitQuiet(ctx) + }, +} diff --git a/cmd/lotus-seal-worker/info.go b/cmd/lotus-seal-worker/info.go index 9b08a0c80..3388d8a59 100644 --- a/cmd/lotus-seal-worker/info.go +++ b/cmd/lotus-seal-worker/info.go @@ -32,6 +32,18 @@ var infoCmd = &cli.Command{ cli.VersionPrinter(cctx) fmt.Println() + sess, err := api.ProcessSession(ctx) + if err != nil { + return xerrors.Errorf("getting session: %w", err) + } + fmt.Printf("Session: %s\n", sess) + + enabled, err := api.Enabled(ctx) + if err != nil { + return xerrors.Errorf("checking worker status: %w", err) + } + fmt.Printf("Enabled: %t", enabled) + info, err := api.Info(ctx) if err != nil { return xerrors.Errorf("getting info: %w", err) diff --git a/cmd/lotus-seal-worker/main.go b/cmd/lotus-seal-worker/main.go index d2c57e680..520964867 100644 --- a/cmd/lotus-seal-worker/main.go +++ b/cmd/lotus-seal-worker/main.go @@ -10,19 +10,22 @@ import ( "os" "path/filepath" "strings" - "syscall" "time" "github.com/google/uuid" "github.com/gorilla/mux" + "github.com/ipfs/go-datastore/namespace" logging "github.com/ipfs/go-log/v2" manet "github.com/multiformats/go-multiaddr/net" "github.com/urfave/cli/v2" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" "golang.org/x/xerrors" "github.com/filecoin-project/go-jsonrpc" "github.com/filecoin-project/go-jsonrpc/auth" paramfetch "github.com/filecoin-project/go-paramfetch" + "github.com/filecoin-project/go-statestore" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api/apistruct" @@ -34,6 +37,8 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/stores" "github.com/filecoin-project/lotus/lib/lotuslog" "github.com/filecoin-project/lotus/lib/rpcenc" + "github.com/filecoin-project/lotus/metrics" + "github.com/filecoin-project/lotus/node/modules" "github.com/filecoin-project/lotus/node/repo" ) @@ -53,6 +58,8 @@ func main() { runCmd, infoCmd, storageCmd, + setCmd, + waitQuietCmd, } app := &cli.App{ @@ -170,15 +177,18 @@ var runCmd = &cli.Command{ } // Connect to storage-miner + ctx := lcli.ReqContext(cctx) + var nodeApi api.StorageMiner var closer func() var err error for { - nodeApi, closer, err = lcli.GetStorageMinerAPI(cctx, - jsonrpc.WithNoReconnect(), - jsonrpc.WithTimeout(30*time.Second)) + nodeApi, closer, err = lcli.GetStorageMinerAPI(cctx, lcli.StorageMinerUseHttp) if err == nil { - break + _, err = nodeApi.Version(ctx) + if err == nil { + break + } } fmt.Printf("\r\x1b[0KConnecting to miner API... (%s)", err) time.Sleep(time.Second) @@ -186,10 +196,16 @@ var runCmd = &cli.Command{ } defer closer() - ctx := lcli.ReqContext(cctx) ctx, cancel := context.WithCancel(ctx) defer cancel() + // Register all metric views + if err := view.Register( + metrics.DefaultViews..., + ); err != nil { + log.Fatalf("Cannot register the view: %v", err) + } + v, err := nodeApi.Version(ctx) if err != nil { return err @@ -199,8 +215,6 @@ var runCmd = &cli.Command{ } log.Infof("Remote version %s", v) - watchMinerConn(ctx, cctx, nodeApi) - // Check params act, err := nodeApi.ActorAddress(ctx) @@ -308,6 +322,15 @@ var runCmd = &cli.Command{ if err != nil { return err } + defer func() { + if err := lr.Close(); err != nil { + log.Error("closing repo", err) + } + }() + ds, err := lr.Datastore("/metadata") + if err != nil { + return err + } log.Info("Opening local storage; connecting to master") const unspecifiedAddress = "0.0.0.0" @@ -347,12 +370,14 @@ var runCmd = &cli.Command{ // Create / expose the worker + wsts := statestore.New(namespace.Wrap(ds, modules.WorkerCallsPrefix)) + workerApi := &worker{ LocalWorker: sectorstorage.NewLocalWorker(sectorstorage.WorkerConfig{ SealProof: spt, TaskTypes: taskTypes, NoSwap: cctx.Bool("no-swap"), - }, remote, localStore, nodeApi), + }, remote, localStore, nodeApi, nodeApi, wsts), localStore: localStore, ls: lr, } @@ -363,7 +388,7 @@ var runCmd = &cli.Command{ readerHandler, readerServerOpt := rpcenc.ReaderParamDecoder() rpcServer := jsonrpc.NewServer(readerServerOpt) - rpcServer.Register("Filecoin", apistruct.PermissionedWorkerAPI(workerApi)) + rpcServer.Register("Filecoin", apistruct.PermissionedWorkerAPI(metrics.MetricedWorkerAPI(workerApi))) mux.Handle("/rpc/v0", rpcServer) mux.Handle("/rpc/streams/v0/push/{uuid}", readerHandler) @@ -378,6 +403,7 @@ var runCmd = &cli.Command{ srv := &http.Server{ Handler: ah, BaseContext: func(listener net.Listener) context.Context { + ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-worker")) return ctx }, } @@ -422,13 +448,80 @@ var runCmd = &cli.Command{ } } - log.Info("Waiting for tasks") + minerSession, err := nodeApi.Session(ctx) + if err != nil { + return xerrors.Errorf("getting miner session: %w", err) + } + + waitQuietCh := func() chan struct{} { + out := make(chan struct{}) + go func() { + workerApi.LocalWorker.WaitQuiet() + close(out) + }() + return out + } go func() { - if err := nodeApi.WorkerConnect(ctx, "ws://"+address+"/rpc/v0"); err != nil { - log.Errorf("Registering worker failed: %+v", err) - cancel() - return + heartbeats := time.NewTicker(stores.HeartbeatInterval) + defer heartbeats.Stop() + + var redeclareStorage bool + var readyCh chan struct{} + for { + // If we're reconnecting, redeclare storage first + if redeclareStorage { + log.Info("Redeclaring local storage") + + if err := localStore.Redeclare(ctx); err != nil { + log.Errorf("Redeclaring local storage failed: %+v", err) + + select { + case <-ctx.Done(): + return // graceful shutdown + case <-heartbeats.C: + } + continue + } + } + + // TODO: we could get rid of this, but that requires tracking resources for restarted tasks correctly + if readyCh == nil { + log.Info("Making sure no local tasks are running") + readyCh = waitQuietCh() + } + + for { + curSession, err := nodeApi.Session(ctx) + if err != nil { + log.Errorf("heartbeat: checking remote session failed: %+v", err) + } else { + if curSession != minerSession { + minerSession = curSession + break + } + } + + select { + case <-readyCh: + if err := nodeApi.WorkerConnect(ctx, "http://"+address+"/rpc/v0"); err != nil { + log.Errorf("Registering worker failed: %+v", err) + cancel() + return + } + + log.Info("Worker registered successfully, waiting for tasks") + + readyCh = nil + case <-heartbeats.C: + case <-ctx.Done(): + return // graceful shutdown + } + } + + log.Errorf("LOTUS-MINER CONNECTION LOST") + + redeclareStorage = true } }() @@ -436,55 +529,6 @@ var runCmd = &cli.Command{ }, } -func watchMinerConn(ctx context.Context, cctx *cli.Context, nodeApi api.StorageMiner) { - go func() { - closing, err := nodeApi.Closing(ctx) - if err != nil { - log.Errorf("failed to get remote closing channel: %+v", err) - } - - select { - case <-closing: - case <-ctx.Done(): - } - - if ctx.Err() != nil { - return // graceful shutdown - } - - log.Warnf("Connection with miner node lost, restarting") - - exe, err := os.Executable() - if err != nil { - log.Errorf("getting executable for auto-restart: %+v", err) - } - - _ = log.Sync() - - // TODO: there are probably cleaner/more graceful ways to restart, - // but this is good enough for now (FSM can recover from the mess this creates) - //nolint:gosec - if err := syscall.Exec(exe, []string{exe, - fmt.Sprintf("--worker-repo=%s", cctx.String("worker-repo")), - fmt.Sprintf("--miner-repo=%s", cctx.String("miner-repo")), - fmt.Sprintf("--enable-gpu-proving=%t", cctx.Bool("enable-gpu-proving")), - "run", - fmt.Sprintf("--listen=%s", cctx.String("listen")), - fmt.Sprintf("--no-local-storage=%t", cctx.Bool("no-local-storage")), - fmt.Sprintf("--no-swap=%t", cctx.Bool("no-swap")), - fmt.Sprintf("--addpiece=%t", cctx.Bool("addpiece")), - fmt.Sprintf("--precommit1=%t", cctx.Bool("precommit1")), - fmt.Sprintf("--unseal=%t", cctx.Bool("unseal")), - fmt.Sprintf("--precommit2=%t", cctx.Bool("precommit2")), - fmt.Sprintf("--commit=%t", cctx.Bool("commit")), - fmt.Sprintf("--parallel-fetch-limit=%d", cctx.Int("parallel-fetch-limit")), - fmt.Sprintf("--timeout=%s", cctx.String("timeout")), - }, os.Environ()); err != nil { - fmt.Println(err) - } - }() -} - func extractRoutableIP(timeout time.Duration) (string, error) { minerMultiAddrKey := "MINER_API_INFO" deprecatedMinerMultiAddrKey := "STORAGE_API_INFO" diff --git a/cmd/lotus-seal-worker/rpc.go b/cmd/lotus-seal-worker/rpc.go index 8aa9093c2..f4e8494d0 100644 --- a/cmd/lotus-seal-worker/rpc.go +++ b/cmd/lotus-seal-worker/rpc.go @@ -2,15 +2,16 @@ package main import ( "context" + "sync/atomic" + "github.com/google/uuid" "github.com/mitchellh/go-homedir" "golang.org/x/xerrors" - "github.com/filecoin-project/specs-storage/storage" - "github.com/filecoin-project/lotus/build" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) type worker struct { @@ -18,6 +19,8 @@ type worker struct { localStore *stores.Local ls stores.LocalStorage + + disabled int64 } func (w *worker) Version(context.Context) (build.Version, error) { @@ -43,4 +46,34 @@ func (w *worker) StorageAddLocal(ctx context.Context, path string) error { return nil } -var _ storage.Sealer = &worker{} +func (w *worker) SetEnabled(ctx context.Context, enabled bool) error { + disabled := int64(1) + if enabled { + disabled = 0 + } + atomic.StoreInt64(&w.disabled, disabled) + return nil +} + +func (w *worker) Enabled(ctx context.Context) (bool, error) { + return atomic.LoadInt64(&w.disabled) == 0, nil +} + +func (w *worker) WaitQuiet(ctx context.Context) error { + w.LocalWorker.WaitQuiet() // uses WaitGroup under the hood so no ctx :/ + return nil +} + +func (w *worker) ProcessSession(ctx context.Context) (uuid.UUID, error) { + return w.LocalWorker.Session(ctx) +} + +func (w *worker) Session(ctx context.Context) (uuid.UUID, error) { + if atomic.LoadInt64(&w.disabled) == 1 { + return uuid.UUID{}, xerrors.Errorf("worker disabled") + } + + return w.LocalWorker.Session(ctx) +} + +var _ storiface.WorkerCalls = &worker{} diff --git a/cmd/lotus-seed/seed/seed.go b/cmd/lotus-seed/seed/seed.go index 5e911991d..ab8e5a52a 100644 --- a/cmd/lotus-seed/seed/seed.go +++ b/cmd/lotus-seed/seed/seed.go @@ -21,15 +21,16 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/lotus/extern/sector-storage/zerocomm" - "github.com/filecoin-project/specs-actors/actors/builtin/market" + + market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/wallet" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper/basicfs" "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" "github.com/filecoin-project/lotus/genesis" ) @@ -93,7 +94,7 @@ func PreSeal(maddr address.Address, spt abi.RegisteredSealProof, offset abi.Sect return nil, nil, err } } else { - minerAddr, err = wallet.GenerateKey(crypto.SigTypeBLS) + minerAddr, err = wallet.GenerateKey(types.KTBLS) if err != nil { return nil, nil, err } @@ -187,7 +188,7 @@ func presealSector(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, sid abi.Sector } func presealSectorFake(sbfs *basicfs.Provider, sid abi.SectorID, spt abi.RegisteredSealProof, ssize abi.SectorSize) (*genesis.PreSeal, error) { - paths, done, err := sbfs.AcquireSector(context.TODO(), sid, 0, stores.FTSealed|stores.FTCache, stores.PathSealing) + paths, done, err := sbfs.AcquireSector(context.TODO(), sid, 0, storiface.FTSealed|storiface.FTCache, storiface.PathSealing) if err != nil { return nil, xerrors.Errorf("acquire unsealed sector: %w", err) } @@ -211,7 +212,7 @@ func presealSectorFake(sbfs *basicfs.Provider, sid abi.SectorID, spt abi.Registe } func cleanupUnsealed(sbfs *basicfs.Provider, sid abi.SectorID) error { - paths, done, err := sbfs.AcquireSector(context.TODO(), sid, stores.FTUnsealed, stores.FTNone, stores.PathSealing) + paths, done, err := sbfs.AcquireSector(context.TODO(), sid, storiface.FTUnsealed, storiface.FTNone, storiface.PathSealing) if err != nil { return err } @@ -253,7 +254,7 @@ func WriteGenesisMiner(maddr address.Address, sbroot string, gm *genesis.Miner, func createDeals(m *genesis.Miner, k *wallet.Key, maddr address.Address, ssize abi.SectorSize) error { for i, sector := range m.Sectors { - proposal := &market.DealProposal{ + proposal := &market2.DealProposal{ PieceCID: sector.CommD, PieceSize: abi.PaddedPieceSize(ssize), Client: k.Address, diff --git a/cmd/lotus-shed/balances.go b/cmd/lotus-shed/balances.go index c2099bb2d..b12c069f5 100644 --- a/cmd/lotus-shed/balances.go +++ b/cmd/lotus-shed/balances.go @@ -5,6 +5,10 @@ import ( "fmt" "strconv" + "github.com/filecoin-project/lotus/chain/gen/genesis" + + _init "github.com/filecoin-project/lotus/chain/actors/builtin/init" + "github.com/docker/go-units" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin/multisig" @@ -136,6 +140,9 @@ var chainBalanceStateCmd = &cli.Command{ &cli.BoolFlag{ Name: "miner-info", }, + &cli.BoolFlag{ + Name: "robust-addresses", + }, }, Action: func(cctx *cli.Context) error { ctx := context.TODO() @@ -187,6 +194,33 @@ var chainBalanceStateCmd = &cli.Command{ minerInfo := cctx.Bool("miner-info") + robustMap := make(map[address.Address]address.Address) + if cctx.Bool("robust-addresses") { + iact, err := tree.GetActor(_init.Address) + if err != nil { + return xerrors.Errorf("failed to load init actor: %w", err) + } + + ist, err := _init.Load(store, iact) + if err != nil { + return xerrors.Errorf("failed to load init actor state: %w", err) + } + + err = ist.ForEachActor(func(id abi.ActorID, addr address.Address) error { + idAddr, err := address.NewIDAddress(uint64(id)) + if err != nil { + return xerrors.Errorf("failed to write to addr map: %w", err) + } + + robustMap[idAddr] = addr + + return nil + }) + if err != nil { + return xerrors.Errorf("failed to invert init address map: %w", err) + } + } + var infos []accountInfo err = tree.ForEach(func(addr address.Address, act *types.Actor) error { @@ -201,6 +235,23 @@ var chainBalanceStateCmd = &cli.Command{ VestingAmount: types.FIL(big.NewInt(0)), } + if cctx.Bool("robust-addresses") { + robust, found := robustMap[addr] + if found { + ai.Address = robust + } else { + id, err := address.IDFromAddress(addr) + if err != nil { + return xerrors.Errorf("failed to get ID address: %w", err) + } + + // TODO: This is not the correctest way to determine whether a robust address should exist + if id >= genesis.MinerStart { + return xerrors.Errorf("address doesn't have a robust address: %s", addr) + } + } + } + if minerInfo && builtin.IsStorageMinerActor(act.Code) { pow, _, _, err := stmgr.GetPowerRaw(ctx, sm, sroot, addr) if err != nil { @@ -372,7 +423,7 @@ var chainPledgeCmd = &cli.Command{ pledgeCollateral = c } - circ, err := sm.GetCirculatingSupplyDetailed(ctx, abi.ChainEpoch(epoch), state) + circ, err := sm.GetVMCirculatingSupplyDetailed(ctx, abi.ChainEpoch(epoch), state) if err != nil { return err } diff --git a/cmd/lotus-shed/datastore.go b/cmd/lotus-shed/datastore.go index c6bac6815..83422e77b 100644 --- a/cmd/lotus-shed/datastore.go +++ b/cmd/lotus-shed/datastore.go @@ -1,17 +1,22 @@ package main import ( + "bufio" "encoding/json" "fmt" + "io" "os" "strings" "github.com/docker/go-units" "github.com/ipfs/go-datastore" dsq "github.com/ipfs/go-datastore/query" + badgerds "github.com/ipfs/go-ds-badger2" logging "github.com/ipfs/go-log" + "github.com/mitchellh/go-homedir" "github.com/polydawn/refmt/cbor" "github.com/urfave/cli/v2" + "go.uber.org/multierr" "golang.org/x/xerrors" "github.com/filecoin-project/lotus/lib/backupds" @@ -25,6 +30,7 @@ var datastoreCmd = &cli.Command{ datastoreBackupCmd, datastoreListCmd, datastoreGetCmd, + datastoreRewriteCmd, }, } @@ -288,3 +294,65 @@ func printVal(enc string, val []byte) error { return nil } + +var datastoreRewriteCmd = &cli.Command{ + Name: "rewrite", + Description: "rewrites badger datastore to compact it and possibly change params", + ArgsUsage: "source destination", + Action: func(cctx *cli.Context) error { + if cctx.NArg() != 2 { + return xerrors.Errorf("expected 2 arguments, got %d", cctx.NArg()) + } + fromPath, err := homedir.Expand(cctx.Args().Get(0)) + if err != nil { + return xerrors.Errorf("cannot get fromPath: %w", err) + } + toPath, err := homedir.Expand(cctx.Args().Get(1)) + if err != nil { + return xerrors.Errorf("cannot get toPath: %w", err) + } + + opts := repo.ChainBadgerOptions() + opts.Options = opts.Options.WithSyncWrites(false) + to, err := badgerds.NewDatastore(toPath, &opts) + if err != nil { + return xerrors.Errorf("opennig 'to' datastore: %w", err) + } + + opts.Options = opts.Options.WithReadOnly(false) + from, err := badgerds.NewDatastore(fromPath, &opts) + if err != nil { + return xerrors.Errorf("opennig 'from' datastore: %w", err) + } + + pr, pw := io.Pipe() + errCh := make(chan error) + go func() { + bw := bufio.NewWriterSize(pw, 64<<20) + _, err := from.DB.Backup(bw, 0) + _ = bw.Flush() + _ = pw.CloseWithError(err) + errCh <- err + }() + go func() { + err := to.DB.Load(pr, 256) + errCh <- err + }() + + err = <-errCh + if err != nil { + select { + case nerr := <-errCh: + err = multierr.Append(err, nerr) + default: + } + return err + } + + err = <-errCh + if err != nil { + return err + } + return multierr.Append(from.Close(), to.Close()) + }, +} diff --git a/cmd/lotus-shed/election.go b/cmd/lotus-shed/election.go new file mode 100644 index 000000000..ffe30d163 --- /dev/null +++ b/cmd/lotus-shed/election.go @@ -0,0 +1,70 @@ +package main + +import ( + "encoding/binary" + "fmt" + "math/rand" + + "github.com/filecoin-project/lotus/chain/types" + lcli "github.com/filecoin-project/lotus/cli" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" +) + +var electionCmd = &cli.Command{ + Name: "election", + Usage: "commands related to leader election", + Subcommands: []*cli.Command{ + electionRunDummy, + }, +} + +var electionRunDummy = &cli.Command{ + Name: "run-dummy", + Usage: "runs dummy elections with given power", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "network-power", + }, + &cli.StringFlag{ + Name: "miner-power", + }, + &cli.Uint64Flag{ + Name: "seed", + Value: 0, + }, + }, + Action: func(cctx *cli.Context) error { + ctx := lcli.ReqContext(cctx) + minerPow, err := types.BigFromString(cctx.String("miner-power")) + if err != nil { + return xerrors.Errorf("decoding miner-power: %w", err) + } + networkPow, err := types.BigFromString(cctx.String("network-power")) + if err != nil { + return xerrors.Errorf("decoding miner-power: %w", err) + } + + ep := &types.ElectionProof{} + ep.VRFProof = make([]byte, 32) + seed := cctx.Uint64("seed") + if seed == 0 { + seed = rand.Uint64() + } + binary.BigEndian.PutUint64(ep.VRFProof, seed) + + i := uint64(0) + for { + if ctx.Err() != nil { + return ctx.Err() + } + binary.BigEndian.PutUint64(ep.VRFProof[8:], i) + j := ep.ComputeWinCount(minerPow, networkPow) + _, err := fmt.Printf("%t, %d\n", j != 0, j) + if err != nil { + return err + } + i++ + } + }, +} diff --git a/cmd/lotus-shed/frozen-miners.go b/cmd/lotus-shed/frozen-miners.go new file mode 100644 index 000000000..6b843f0d6 --- /dev/null +++ b/cmd/lotus-shed/frozen-miners.go @@ -0,0 +1,85 @@ +package main + +import ( + "fmt" + + "github.com/filecoin-project/go-state-types/abi" + lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" +) + +var frozenMinersCmd = &cli.Command{ + Name: "frozen-miners", + Description: "information about miner actors with late or frozen deadline crons", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "tipset", + Usage: "specify tipset state to search on (pass comma separated array of cids)", + }, + &cli.BoolFlag{ + Name: "future", + Usage: "print info of miners with last deadline cron in the future (normal for v0 and early v2 actors)", + }, + }, + Action: func(c *cli.Context) error { + api, acloser, err := lcli.GetFullNodeAPI(c) + if err != nil { + return err + } + defer acloser() + ctx := lcli.ReqContext(c) + + ts, err := lcli.LoadTipSet(ctx, c, api) + if err != nil { + return err + } + if ts == nil { + ts, err = api.ChainHead(ctx) + if err != nil { + return err + } + } + + queryEpoch := ts.Height() + + mAddrs, err := api.StateListMiners(ctx, ts.Key()) + if err != nil { + return err + } + + for _, mAddr := range mAddrs { + st, err := api.StateReadState(ctx, mAddr, ts.Key()) + if err != nil { + return err + } + minerState, ok := st.State.(map[string]interface{}) + if !ok { + return xerrors.Errorf("internal error: failed to cast miner state to expected map type") + } + + ppsIface := minerState["ProvingPeriodStart"] + pps := int64(ppsIface.(float64)) + dlIdxIface := minerState["CurrentDeadline"] + dlIdx := uint64(dlIdxIface.(float64)) + latestDeadline := abi.ChainEpoch(pps) + abi.ChainEpoch(int64(dlIdx))*miner.WPoStChallengeWindow + nextDeadline := latestDeadline + miner.WPoStChallengeWindow + + // Need +1 because last epoch of the deadline queryEpoch = x + 59 cron gets run and + // state is left with latestDeadline = x + 60 + if c.Bool("future") && latestDeadline > queryEpoch+1 { + fmt.Printf("%s -- last deadline start in future epoch %d > query epoch %d + 1\n", mAddr, latestDeadline, queryEpoch) + } + + // Equality is an error because last epoch of the deadline queryEpoch = x + 59. Cron + // should get run and bump latestDeadline = x + 60 so nextDeadline = x + 120 + if queryEpoch >= nextDeadline { + fmt.Printf("%s -- next deadline start in non-future epoch %d <= query epoch %d\n", mAddr, nextDeadline, queryEpoch) + } + + } + + return nil + }, +} diff --git a/cmd/lotus-shed/keyinfo.go b/cmd/lotus-shed/keyinfo.go index fdd1fcb49..4dcd10cbf 100644 --- a/cmd/lotus-shed/keyinfo.go +++ b/cmd/lotus-shed/keyinfo.go @@ -32,10 +32,10 @@ import ( _ "github.com/filecoin-project/lotus/lib/sigs/secp" ) -var validTypes = []string{wallet.KTBLS, wallet.KTSecp256k1, lp2p.KTLibp2pHost} +var validTypes = []types.KeyType{types.KTBLS, types.KTSecp256k1, lp2p.KTLibp2pHost} type keyInfoOutput struct { - Type string + Type types.KeyType Address string PublicKey string } @@ -86,7 +86,7 @@ var keyinfoVerifyCmd = &cli.Command{ return xerrors.Errorf("decoding key: '%s': %w", fileName, err) } - if string(name) != keyInfo.Type { + if types.KeyType(name) != keyInfo.Type { return fmt.Errorf("%s of type %s is incorrect", fileName, keyInfo.Type) } case modules.KTJwtHmacSecret: @@ -98,7 +98,7 @@ var keyinfoVerifyCmd = &cli.Command{ if string(name) != modules.JWTSecretName { return fmt.Errorf("%s of type %s is incorrect", fileName, keyInfo.Type) } - case wallet.KTSecp256k1, wallet.KTBLS: + case types.KTSecp256k1, types.KTBLS: keystore := wallet.NewMemKeyStore() w, err := wallet.NewWallet(keystore) if err != nil { @@ -214,7 +214,7 @@ var keyinfoImportCmd = &cli.Command{ fmt.Printf("%s\n", peerid.String()) break - case wallet.KTSecp256k1, wallet.KTBLS: + case types.KTSecp256k1, types.KTBLS: w, err := wallet.NewWallet(keystore) if err != nil { return err @@ -317,7 +317,7 @@ var keyinfoInfoCmd = &cli.Command{ kio.PublicKey = base64.StdEncoding.EncodeToString(pkBytes) break - case wallet.KTSecp256k1, wallet.KTBLS: + case types.KTSecp256k1, types.KTBLS: kio.Type = keyInfo.Type key, err := wallet.NewKey(keyInfo) @@ -366,7 +366,7 @@ var keyinfoNewCmd = &cli.Command{ return fmt.Errorf("please specify a type to generate") } - keyType := cctx.Args().First() + keyType := types.KeyType(cctx.Args().First()) flagOutput := cctx.String("output") if i := SliceIndex(len(validTypes), func(i int) bool { @@ -404,8 +404,8 @@ var keyinfoNewCmd = &cli.Command{ keyInfo = ki break - case wallet.KTSecp256k1, wallet.KTBLS: - key, err := wallet.GenerateKey(wallet.ActSigType(keyType)) + case types.KTSecp256k1, types.KTBLS: + key, err := wallet.GenerateKey(keyType) if err != nil { return err } @@ -418,7 +418,7 @@ var keyinfoNewCmd = &cli.Command{ filename := flagOutput filename = strings.ReplaceAll(filename, "", keyAddr) - filename = strings.ReplaceAll(filename, "", keyType) + filename = strings.ReplaceAll(filename, "", string(keyType)) file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) if err != nil { diff --git a/cmd/lotus-shed/ledger.go b/cmd/lotus-shed/ledger.go new file mode 100644 index 000000000..ecb13ec64 --- /dev/null +++ b/cmd/lotus-shed/ledger.go @@ -0,0 +1,255 @@ +package main + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + "github.com/urfave/cli/v2" + ledgerfil "github.com/whyrusleeping/ledger-filecoin-go" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/types" + ledgerwallet "github.com/filecoin-project/lotus/chain/wallet/ledger" + lcli "github.com/filecoin-project/lotus/cli" +) + +var ledgerCmd = &cli.Command{ + Name: "ledger", + Usage: "Ledger interactions", + Flags: []cli.Flag{}, + Subcommands: []*cli.Command{ + ledgerListAddressesCmd, + ledgerKeyInfoCmd, + ledgerSignTestCmd, + }, +} + +const hdHard = 0x80000000 + +var ledgerListAddressesCmd = &cli.Command{ + Name: "list", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "print-balances", + Usage: "print balances", + Aliases: []string{"b"}, + }, + }, + Action: func(cctx *cli.Context) error { + var api api.FullNode + if cctx.Bool("print-balances") { + a, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + + api = a + + defer closer() + } + ctx := lcli.ReqContext(cctx) + + fl, err := ledgerfil.FindLedgerFilecoinApp() + if err != nil { + return err + } + + end := 20 + for i := 0; i < end; i++ { + if err := ctx.Err(); err != nil { + return err + } + + p := []uint32{hdHard | 44, hdHard | 461, hdHard, 0, uint32(i)} + pubk, err := fl.GetPublicKeySECP256K1(p) + if err != nil { + return err + } + + addr, err := address.NewSecp256k1Address(pubk) + if err != nil { + return err + } + + if cctx.Bool("print-balances") && api != nil { // api check makes linter happier + a, err := api.StateGetActor(ctx, addr, types.EmptyTSK) + if err != nil { + if strings.Contains(err.Error(), "actor not found") { + a = nil + } else { + return err + } + } + + balance := big.Zero() + if a != nil { + balance = a.Balance + end = i + 20 + 1 + } + + fmt.Printf("%s %s %s\n", addr, printHDPath(p), types.FIL(balance)) + } else { + fmt.Printf("%s %s\n", addr, printHDPath(p)) + } + + } + + return nil + }, +} + +func parseHDPath(s string) ([]uint32, error) { + parts := strings.Split(s, "/") + if parts[0] != "m" { + return nil, fmt.Errorf("expected HD path to start with 'm'") + } + + var out []uint32 + for _, p := range parts[1:] { + var hard bool + if strings.HasSuffix(p, "'") { + p = p[:len(p)-1] + hard = true + } + + v, err := strconv.ParseUint(p, 10, 32) + if err != nil { + return nil, err + } + if v >= hdHard { + return nil, fmt.Errorf("path element %s too large", p) + } + + if hard { + v += hdHard + } + out = append(out, uint32(v)) + } + return out, nil +} + +func printHDPath(pth []uint32) string { + s := "m" + for _, p := range pth { + s += "/" + + hard := p&hdHard != 0 + p &^= hdHard // remove hdHard bit + + s += fmt.Sprint(p) + if hard { + s += "'" + } + } + + return s +} + +var ledgerKeyInfoCmd = &cli.Command{ + Name: "key-info", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "verbose", + Aliases: []string{"v"}, + }, + }, + Action: func(cctx *cli.Context) error { + if !cctx.Args().Present() { + return cli.ShowCommandHelp(cctx, cctx.Command.Name) + } + + fl, err := ledgerfil.FindLedgerFilecoinApp() + if err != nil { + return err + } + + p, err := parseHDPath(cctx.Args().First()) + if err != nil { + return err + } + + pubk, _, addr, err := fl.GetAddressPubKeySECP256K1(p) + if err != nil { + return err + } + + if cctx.Bool("verbose") { + fmt.Println(addr) + fmt.Println(pubk) + } + + a, err := address.NewFromString(addr) + if err != nil { + return err + } + + var pd ledgerwallet.LedgerKeyInfo + pd.Address = a + pd.Path = p + + b, err := json.Marshal(pd) + if err != nil { + return err + } + + var ki types.KeyInfo + ki.Type = types.KTSecp256k1Ledger + ki.PrivateKey = b + + out, err := json.Marshal(ki) + if err != nil { + return err + } + + fmt.Println(string(out)) + + return nil + }, +} + +var ledgerSignTestCmd = &cli.Command{ + Name: "sign", + Action: func(cctx *cli.Context) error { + if !cctx.Args().Present() { + return cli.ShowCommandHelp(cctx, cctx.Command.Name) + } + + fl, err := ledgerfil.FindLedgerFilecoinApp() + if err != nil { + return err + } + + p, err := parseHDPath(cctx.Args().First()) + if err != nil { + return err + } + + addr, err := address.NewFromString("f1xc3hws5n6y5m3m44gzb3gyjzhups6wzmhe663ji") + if err != nil { + return err + } + + m := &types.Message{ + To: addr, + From: addr, + } + + b, err := m.ToStorageBlock() + if err != nil { + return err + } + + sig, err := fl.SignSECP256K1(p, b.RawData()) + if err != nil { + return err + } + + fmt.Println(sig.SignatureBytes()) + + return nil + }, +} diff --git a/cmd/lotus-shed/main.go b/cmd/lotus-shed/main.go index 4542551db..eef357596 100644 --- a/cmd/lotus-shed/main.go +++ b/cmd/lotus-shed/main.go @@ -1,6 +1,7 @@ package main import ( + "fmt" "os" logging "github.com/ipfs/go-log/v2" @@ -18,6 +19,7 @@ func main() { base32Cmd, base16Cmd, bitFieldCmd, + frozenMinersCmd, keyinfoCmd, jwtCmd, noncefix, @@ -41,6 +43,10 @@ func main() { syncCmd, stateTreePruneCmd, datastoreCmd, + ledgerCmd, + sectorsCmd, + msgCmd, + electionCmd, } app := &cli.App{ @@ -55,6 +61,13 @@ func main() { Hidden: true, Value: "~/.lotus", // TODO: Consider XDG_DATA_HOME }, + &cli.StringFlag{ + Name: "miner-repo", + Aliases: []string{"storagerepo"}, + EnvVars: []string{"LOTUS_MINER_PATH", "LOTUS_STORAGE_PATH"}, + Value: "~/.lotusminer", // TODO: Consider XDG_DATA_HOME + Usage: fmt.Sprintf("Specify miner repo path. flag storagerepo and env LOTUS_STORAGE_PATH are DEPRECATION, will REMOVE SOON"), + }, &cli.StringFlag{ Name: "log-level", Value: "info", diff --git a/cmd/lotus-shed/mempool-stats.go b/cmd/lotus-shed/mempool-stats.go index d70cd4b71..bc4a801f0 100644 --- a/cmd/lotus-shed/mempool-stats.go +++ b/cmd/lotus-shed/mempool-stats.go @@ -14,11 +14,10 @@ import ( "go.opencensus.io/stats/view" "go.opencensus.io/tag" - builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/go-address" lapi "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" ) @@ -145,7 +144,7 @@ var mpoolStatsCmd = &cli.Command{ seen: time.Now(), } - if u.Message.Message.Method == builtin0.MethodsMiner.SubmitWindowedPoSt { + if u.Message.Message.Method == miner.Methods.SubmitWindowedPoSt { miner, err := isMiner(u.Message.Message.To) if err != nil { diff --git a/cmd/lotus-shed/msg.go b/cmd/lotus-shed/msg.go new file mode 100644 index 000000000..63cfc86b9 --- /dev/null +++ b/cmd/lotus-shed/msg.go @@ -0,0 +1,280 @@ +package main + +import ( + "bytes" + "encoding/base64" + "encoding/hex" + "encoding/json" + "fmt" + + "github.com/fatih/color" + + "github.com/ipfs/go-cid" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/types" + lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig" +) + +var msgCmd = &cli.Command{ + Name: "msg", + Usage: "Translate message between various formats", + ArgsUsage: "Message in any form", + Action: func(cctx *cli.Context) error { + if cctx.Args().Len() != 1 { + return xerrors.Errorf("expected 1 argument") + } + + msg, err := messageFromString(cctx, cctx.Args().First()) + if err != nil { + return err + } + + switch msg := msg.(type) { + case *types.SignedMessage: + return printSignedMessage(cctx, msg) + case *types.Message: + return printMessage(cctx, msg) + default: + return xerrors.Errorf("this error message can't be printed") + } + }, +} + +func printSignedMessage(cctx *cli.Context, smsg *types.SignedMessage) error { + color.Green("Signed:") + color.Blue("CID: %s\n", smsg.Cid()) + + b, err := smsg.Serialize() + if err != nil { + return err + } + color.Magenta("HEX: %x\n", b) + color.Blue("B64: %s\n", base64.StdEncoding.EncodeToString(b)) + jm, err := json.MarshalIndent(smsg, "", " ") + if err != nil { + return xerrors.Errorf("marshaling as json: %w", err) + } + + color.Magenta("JSON: %s\n", string(jm)) + fmt.Println() + fmt.Println("---") + color.Green("Signed Message Details:") + fmt.Printf("Signature(hex): %x\n", smsg.Signature.Data) + fmt.Printf("Signature(b64): %s\n", base64.StdEncoding.EncodeToString(smsg.Signature.Data)) + + sigtype, err := smsg.Signature.Type.Name() + if err != nil { + sigtype = err.Error() + } + fmt.Printf("Signature type: %d (%s)\n", smsg.Signature.Type, sigtype) + + fmt.Println("-------") + return printMessage(cctx, &smsg.Message) +} + +func printMessage(cctx *cli.Context, msg *types.Message) error { + if msg.Version != 0x6d736967 { + color.Green("Unsigned:") + color.Yellow("CID: %s\n", msg.Cid()) + + b, err := msg.Serialize() + if err != nil { + return err + } + color.Cyan("HEX: %x\n", b) + color.Yellow("B64: %s\n", base64.StdEncoding.EncodeToString(b)) + + jm, err := json.MarshalIndent(msg, "", " ") + if err != nil { + return xerrors.Errorf("marshaling as json: %w", err) + } + + color.Cyan("JSON: %s\n", string(jm)) + fmt.Println() + } else { + color.Green("Msig Propose:") + pp := &multisig.ProposeParams{ + To: msg.To, + Value: msg.Value, + Method: msg.Method, + Params: msg.Params, + } + var b bytes.Buffer + if err := pp.MarshalCBOR(&b); err != nil { + return err + } + + color.Cyan("HEX: %x\n", b.Bytes()) + color.Yellow("B64: %s\n", base64.StdEncoding.EncodeToString(b.Bytes())) + jm, err := json.MarshalIndent(pp, "", " ") + if err != nil { + return xerrors.Errorf("marshaling as json: %w", err) + } + + color.Cyan("JSON: %s\n", string(jm)) + fmt.Println() + } + + fmt.Println("---") + color.Green("Message Details:") + fmt.Println("Value:", types.FIL(msg.Value)) + fmt.Println("Max Fees:", types.FIL(msg.RequiredFunds())) + fmt.Println("Max Total Cost:", types.FIL(big.Add(msg.RequiredFunds(), msg.Value))) + + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + + defer closer() + ctx := lcli.ReqContext(cctx) + + toact, err := api.StateGetActor(ctx, msg.To, types.EmptyTSK) + if err != nil { + return nil + } + + fmt.Println("Method:", stmgr.MethodsMap[toact.Code][msg.Method].Name) + p, err := lcli.JsonParams(toact.Code, msg.Method, msg.Params) + if err != nil { + return err + } + + fmt.Println("Params:", p) + + return nil +} + +func messageFromString(cctx *cli.Context, smsg string) (types.ChainMsg, error) { + // a CID is least likely to just decode + if c, err := cid.Parse(smsg); err == nil { + return messageFromCID(cctx, c) + } + + // try baseX serializations next + { + // hex first, some hay strings may be decodable as b64 + if b, err := hex.DecodeString(smsg); err == nil { + return messageFromBytes(cctx, b) + } + + // b64 next + if b, err := base64.StdEncoding.DecodeString(smsg); err == nil { + return messageFromBytes(cctx, b) + } + + // b64u?? + if b, err := base64.URLEncoding.DecodeString(smsg); err == nil { + return messageFromBytes(cctx, b) + } + } + + // maybe it's json? + if _, err := messageFromJson(cctx, []byte(smsg)); err == nil { + return nil, err + } + + // declare defeat + return nil, xerrors.Errorf("couldn't decode the message") +} + +func messageFromJson(cctx *cli.Context, msgb []byte) (types.ChainMsg, error) { + // Unsigned + { + var msg types.Message + if err := json.Unmarshal(msgb, &msg); err == nil { + if msg.To != address.Undef { + return &msg, nil + } + } + } + + // Signed + { + var msg types.SignedMessage + if err := json.Unmarshal(msgb, &msg); err == nil { + if msg.Message.To != address.Undef { + return &msg, nil + } + } + } + + return nil, xerrors.New("probably not a json-serialized message") +} + +func messageFromBytes(cctx *cli.Context, msgb []byte) (types.ChainMsg, error) { + // Signed + { + var msg types.SignedMessage + if err := msg.UnmarshalCBOR(bytes.NewReader(msgb)); err == nil { + return &msg, nil + } + } + + // Unsigned + { + var msg types.Message + if err := msg.UnmarshalCBOR(bytes.NewReader(msgb)); err == nil { + return &msg, nil + } + } + + // Multisig propose? + { + var pp multisig.ProposeParams + if err := pp.UnmarshalCBOR(bytes.NewReader(msgb)); err == nil { + i, err := address.NewIDAddress(0) + if err != nil { + return nil, err + } + + return &types.Message{ + // Hack(-ish) + Version: 0x6d736967, + From: i, + + To: pp.To, + Value: pp.Value, + + Method: pp.Method, + Params: pp.Params, + + GasFeeCap: big.Zero(), + GasPremium: big.Zero(), + }, nil + } + } + + // Encoded json??? + { + if msg, err := messageFromJson(cctx, msgb); err == nil { + return msg, nil + } + } + + return nil, xerrors.New("probably not a cbor-serialized message") +} + +func messageFromCID(cctx *cli.Context, c cid.Cid) (types.ChainMsg, error) { + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return nil, err + } + + defer closer() + ctx := lcli.ReqContext(cctx) + + msgb, err := api.ChainReadObj(ctx, c) + if err != nil { + return nil, err + } + + return messageFromBytes(cctx, msgb) +} diff --git a/cmd/lotus-shed/proofs.go b/cmd/lotus-shed/proofs.go index 2379d8599..e75aeed14 100644 --- a/cmd/lotus-shed/proofs.go +++ b/cmd/lotus-shed/proofs.go @@ -4,7 +4,7 @@ import ( "encoding/hex" "fmt" - saproof "github.com/filecoin-project/specs-actors/actors/runtime/proof" + proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" "github.com/urfave/cli/v2" @@ -84,7 +84,7 @@ var verifySealProofCmd = &cli.Command{ snum := abi.SectorNumber(cctx.Uint64("sector-id")) - ok, err := ffi.VerifySeal(saproof.SealVerifyInfo{ + ok, err := ffi.VerifySeal(proof2.SealVerifyInfo{ SectorID: abi.SectorID{ Miner: abi.ActorID(mid), Number: snum, diff --git a/cmd/lotus-shed/sectors.go b/cmd/lotus-shed/sectors.go new file mode 100644 index 000000000..2e78469fa --- /dev/null +++ b/cmd/lotus-shed/sectors.go @@ -0,0 +1,133 @@ +package main + +import ( + "fmt" + "strconv" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/urfave/cli/v2" + + miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" + lcli "github.com/filecoin-project/lotus/cli" +) + +var sectorsCmd = &cli.Command{ + Name: "sectors", + Usage: "Tools for interacting with sectors", + Flags: []cli.Flag{}, + Subcommands: []*cli.Command{ + terminateSectorCmd, + }, +} + +var terminateSectorCmd = &cli.Command{ + Name: "terminate", + Usage: "Forcefully terminate a sector (WARNING: This means losing power and pay a one-time termination penalty(including collateral) for the terminated sector)", + ArgsUsage: "[sectorNum1 sectorNum2 ...]", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "really-do-it", + Usage: "pass this flag if you know what you are doing", + }, + }, + Action: func(cctx *cli.Context) error { + if cctx.Args().Len() < 1 { + return fmt.Errorf("at least one sector must be specified") + } + + if !cctx.Bool("really-do-it") { + return fmt.Errorf("this is a command for advanced users, only use it if you are sure of what you are doing") + } + + nodeApi, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + api, acloser, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer acloser() + + ctx := lcli.ReqContext(cctx) + + maddr, err := api.ActorAddress(ctx) + if err != nil { + return err + } + + mi, err := nodeApi.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + terminationDeclarationParams := []miner2.TerminationDeclaration{} + + for _, sn := range cctx.Args().Slice() { + sectorNum, err := strconv.ParseUint(sn, 10, 64) + if err != nil { + return fmt.Errorf("could not parse sector number: %w", err) + } + + sectorbit := bitfield.New() + sectorbit.Set(sectorNum) + + loca, err := nodeApi.StateSectorPartition(ctx, maddr, abi.SectorNumber(sectorNum), types.EmptyTSK) + if err != nil { + return fmt.Errorf("get state sector partition %s", err) + } + + para := miner2.TerminationDeclaration{ + Deadline: loca.Deadline, + Partition: loca.Partition, + Sectors: sectorbit, + } + + terminationDeclarationParams = append(terminationDeclarationParams, para) + } + + terminateSectorParams := &miner2.TerminateSectorsParams{ + Terminations: terminationDeclarationParams, + } + + sp, err := actors.SerializeParams(terminateSectorParams) + if err != nil { + return xerrors.Errorf("serializing params: %w", err) + } + + smsg, err := nodeApi.MpoolPushMessage(ctx, &types.Message{ + From: mi.Owner, + To: maddr, + Method: miner.Methods.TerminateSectors, + + Value: big.Zero(), + Params: sp, + }, nil) + if err != nil { + return xerrors.Errorf("mpool push message: %w", err) + } + + fmt.Println("sent termination message:", smsg.Cid()) + + wait, err := nodeApi.StateWaitMsg(ctx, smsg.Cid(), uint64(cctx.Int("confidence"))) + if err != nil { + return err + } + + if wait.Receipt.ExitCode != 0 { + return fmt.Errorf("terminate sectors message returned exit %d", wait.Receipt.ExitCode) + } + + return nil + }, +} diff --git a/cmd/lotus-shed/sync.go b/cmd/lotus-shed/sync.go index bfe7cc8b7..65d2b6d6f 100644 --- a/cmd/lotus-shed/sync.go +++ b/cmd/lotus-shed/sync.go @@ -2,6 +2,15 @@ package main import ( "fmt" + "strconv" + + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + + "github.com/filecoin-project/go-address" + + "github.com/filecoin-project/go-state-types/abi" "github.com/ipfs/go-cid" @@ -16,6 +25,7 @@ var syncCmd = &cli.Command{ Flags: []cli.Flag{}, Subcommands: []*cli.Command{ syncValidateCmd, + syncScrapePowerCmd, }, } @@ -62,3 +72,113 @@ var syncValidateCmd = &cli.Command{ return nil }, } + +var syncScrapePowerCmd = &cli.Command{ + Name: "scrape-power", + Usage: "given a height and a tipset, reports what percentage of mining power had a winning ticket between the tipset and height", + ArgsUsage: "[height tipsetkey]", + Action: func(cctx *cli.Context) error { + if cctx.Args().Len() < 1 { + fmt.Println("usage: [blockCid1 blockCid2...]") + fmt.Println("Any CIDs passed after the height will be used as the tipset key") + fmt.Println("If no block CIDs are provided, chain head will be used") + return nil + } + + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + + defer closer() + ctx := lcli.ReqContext(cctx) + + if cctx.Args().Len() < 1 { + fmt.Println("usage: ...") + fmt.Println("At least one block cid must be provided") + return nil + } + + h, err := strconv.ParseInt(cctx.Args().Get(0), 10, 0) + if err != nil { + return err + } + + height := abi.ChainEpoch(h) + + var ts *types.TipSet + var startTsk types.TipSetKey + if cctx.NArg() > 1 { + var tscids []cid.Cid + args := cctx.Args().Slice() + + for _, s := range args[1:] { + c, err := cid.Decode(s) + if err != nil { + return fmt.Errorf("block cid was invalid: %s", err) + } + tscids = append(tscids, c) + } + + startTsk = types.NewTipSetKey(tscids...) + ts, err = api.ChainGetTipSet(ctx, startTsk) + if err != nil { + return err + } + } else { + ts, err = api.ChainHead(ctx) + if err != nil { + return err + } + + startTsk = ts.Key() + } + + if ts.Height() < height { + return fmt.Errorf("start tipset's height < stop height: %d < %d", ts.Height(), height) + } + + miners := make(map[address.Address]struct{}) + for ts.Height() >= height { + for _, blk := range ts.Blocks() { + _, found := miners[blk.Miner] + if !found { + // do the thing + miners[blk.Miner] = struct{}{} + } + } + + ts, err = api.ChainGetTipSet(ctx, ts.Parents()) + if err != nil { + return err + } + } + + totalWonPower := power.Claim{ + RawBytePower: big.Zero(), + QualityAdjPower: big.Zero(), + } + for miner := range miners { + mp, err := api.StateMinerPower(ctx, miner, startTsk) + if err != nil { + return err + } + + totalWonPower = power.AddClaims(totalWonPower, mp.MinerPower) + } + + totalPower, err := api.StateMinerPower(ctx, address.Undef, startTsk) + if err != nil { + return err + } + + qpercI := types.BigDiv(types.BigMul(totalWonPower.QualityAdjPower, types.NewInt(1000000)), totalPower.TotalPower.QualityAdjPower) + + fmt.Println("Number of winning miners: ", len(miners)) + fmt.Println("QAdjPower of winning miners: ", totalWonPower.QualityAdjPower) + fmt.Println("QAdjPower of all miners: ", totalPower.TotalPower.QualityAdjPower) + fmt.Println("Percentage of winning QAdjPower: ", float64(qpercI.Int64())/10000) + + return nil + }, +} diff --git a/cmd/lotus-shed/verifreg.go b/cmd/lotus-shed/verifreg.go index 860498302..df1f0d990 100644 --- a/cmd/lotus-shed/verifreg.go +++ b/cmd/lotus-shed/verifreg.go @@ -11,8 +11,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" - builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" - verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" + verifreg2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/verifreg" "github.com/filecoin-project/lotus/api/apibstore" "github.com/filecoin-project/lotus/build" @@ -63,7 +62,7 @@ var verifRegAddVerifierCmd = &cli.Command{ } // TODO: ActorUpgrade: Abstract - params, err := actors.SerializeParams(&verifreg0.AddVerifierParams{Address: verifier, Allowance: allowance}) + params, err := actors.SerializeParams(&verifreg2.AddVerifierParams{Address: verifier, Allowance: allowance}) if err != nil { return err } @@ -80,7 +79,7 @@ var verifRegAddVerifierCmd = &cli.Command{ return err } - smsg, err := api.MsigPropose(ctx, vrk, verifreg.Address, big.Zero(), sender, uint64(builtin0.MethodsVerifiedRegistry.AddVerifier), params) + smsg, err := api.MsigPropose(ctx, vrk, verifreg.Address, big.Zero(), sender, uint64(verifreg.Methods.AddVerifier), params) if err != nil { return err } @@ -136,7 +135,7 @@ var verifRegVerifyClientCmd = &cli.Command{ return err } - params, err := actors.SerializeParams(&verifreg0.AddVerifiedClientParams{Address: target, Allowance: allowance}) + params, err := actors.SerializeParams(&verifreg2.AddVerifiedClientParams{Address: target, Allowance: allowance}) if err != nil { return err } @@ -151,7 +150,7 @@ var verifRegVerifyClientCmd = &cli.Command{ msg := &types.Message{ To: verifreg.Address, From: fromk, - Method: builtin0.MethodsVerifiedRegistry.AddVerifiedClient, + Method: verifreg.Methods.AddVerifiedClient, Params: params, } diff --git a/cmd/lotus-storage-miner/actor.go b/cmd/lotus-storage-miner/actor.go index fa320289e..69486eaf5 100644 --- a/cmd/lotus-storage-miner/actor.go +++ b/cmd/lotus-storage-miner/actor.go @@ -5,8 +5,7 @@ import ( "os" "strings" - "github.com/filecoin-project/lotus/build" - builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + cbor "github.com/ipfs/go-ipld-cbor" "github.com/fatih/color" "github.com/libp2p/go-libp2p-core/peer" @@ -18,10 +17,13 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/specs-actors/actors/builtin" - miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + "github.com/filecoin-project/lotus/api/apibstore" + "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/lib/tablewriter" @@ -34,9 +36,12 @@ var actorCmd = &cli.Command{ Subcommands: []*cli.Command{ actorSetAddrsCmd, actorWithdrawCmd, + actorRepayDebtCmd, actorSetPeeridCmd, actorSetOwnerCmd, actorControl, + actorProposeChangeWorker, + actorConfirmChangeWorker, }, } @@ -92,7 +97,7 @@ var actorSetAddrsCmd = &cli.Command{ return err } - params, err := actors.SerializeParams(&miner0.ChangeMultiaddrsParams{NewMultiaddrs: addrs}) + params, err := actors.SerializeParams(&miner2.ChangeMultiaddrsParams{NewMultiaddrs: addrs}) if err != nil { return err } @@ -104,7 +109,7 @@ var actorSetAddrsCmd = &cli.Command{ From: minfo.Worker, Value: types.NewInt(0), GasLimit: gasLimit, - Method: builtin.MethodsMiner.ChangeMultiaddrs, + Method: miner.Methods.ChangeMultiaddrs, Params: params, }, nil) if err != nil { @@ -157,7 +162,7 @@ var actorSetPeeridCmd = &cli.Command{ return err } - params, err := actors.SerializeParams(&miner0.ChangePeerIDParams{NewID: abi.PeerID(pid)}) + params, err := actors.SerializeParams(&miner2.ChangePeerIDParams{NewID: abi.PeerID(pid)}) if err != nil { return err } @@ -169,7 +174,7 @@ var actorSetPeeridCmd = &cli.Command{ From: minfo.Worker, Value: types.NewInt(0), GasLimit: gasLimit, - Method: builtin.MethodsMiner.ChangePeerID, + Method: miner.Methods.ChangePeerID, Params: params, }, nil) if err != nil { @@ -230,7 +235,7 @@ var actorWithdrawCmd = &cli.Command{ } } - params, err := actors.SerializeParams(&miner0.WithdrawBalanceParams{ + params, err := actors.SerializeParams(&miner2.WithdrawBalanceParams{ AmountRequested: amount, // Default to attempting to withdraw all the extra funds in the miner actor }) if err != nil { @@ -241,7 +246,7 @@ var actorWithdrawCmd = &cli.Command{ To: maddr, From: mi.Owner, Value: types.NewInt(0), - Method: builtin.MethodsMiner.WithdrawBalance, + Method: miner.Methods.WithdrawBalance, Params: params, }, nil) if err != nil { @@ -254,6 +259,105 @@ var actorWithdrawCmd = &cli.Command{ }, } +var actorRepayDebtCmd = &cli.Command{ + Name: "repay-debt", + Usage: "pay down a miner's debt", + ArgsUsage: "[amount (FIL)]", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "from", + Usage: "optionally specify the account to send funds from", + }, + }, + Action: func(cctx *cli.Context) error { + nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + + api, acloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer acloser() + + ctx := lcli.ReqContext(cctx) + + maddr, err := nodeApi.ActorAddress(ctx) + if err != nil { + return err + } + + mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + var amount abi.TokenAmount + if cctx.Args().Present() { + f, err := types.ParseFIL(cctx.Args().First()) + if err != nil { + return xerrors.Errorf("parsing 'amount' argument: %w", err) + } + + amount = abi.TokenAmount(f) + } else { + mact, err := api.StateGetActor(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + store := adt.WrapStore(ctx, cbor.NewCborStore(apibstore.NewAPIBlockstore(api))) + + mst, err := miner.Load(store, mact) + if err != nil { + return err + } + + amount, err = mst.FeeDebt() + if err != nil { + return err + } + + } + + fromAddr := mi.Worker + if from := cctx.String("from"); from != "" { + addr, err := address.NewFromString(from) + if err != nil { + return err + } + + fromAddr = addr + } + + fromId, err := api.StateLookupID(ctx, fromAddr, types.EmptyTSK) + if err != nil { + return err + } + + if !mi.IsController(fromId) { + return xerrors.Errorf("sender isn't a controller of miner: %s", fromId) + } + + smsg, err := api.MpoolPushMessage(ctx, &types.Message{ + To: maddr, + From: fromId, + Value: amount, + Method: miner.Methods.RepayDebt, + Params: nil, + }, nil) + if err != nil { + return err + } + + fmt.Printf("Sent repay debt message %s\n", smsg.Cid()) + + return nil + }, +} + var actorControl = &cli.Command{ Name: "control", Usage: "Manage control addresses", @@ -455,7 +559,7 @@ var actorControlSet = &cli.Command{ return nil } - cwp := &miner0.ChangeWorkerAddressParams{ + cwp := &miner2.ChangeWorkerAddressParams{ NewWorker: mi.Worker, NewControlAddrs: toSet, } @@ -468,7 +572,7 @@ var actorControlSet = &cli.Command{ smsg, err := api.MpoolPushMessage(ctx, &types.Message{ From: mi.Owner, To: maddr, - Method: builtin.MethodsMiner.ChangeWorkerAddress, + Method: miner.Methods.ChangeWorkerAddress, Value: big.Zero(), Params: sp, @@ -546,7 +650,7 @@ var actorSetOwnerCmd = &cli.Command{ smsg, err := api.MpoolPushMessage(ctx, &types.Message{ From: mi.Owner, To: maddr, - Method: builtin2.MethodsMiner.ChangeOwnerAddress, + Method: miner.Methods.ChangeOwnerAddress, Value: big.Zero(), Params: sp, }, nil) @@ -571,7 +675,7 @@ var actorSetOwnerCmd = &cli.Command{ smsg, err = api.MpoolPushMessage(ctx, &types.Message{ From: newAddr, To: maddr, - Method: builtin2.MethodsMiner.ChangeOwnerAddress, + Method: miner.Methods.ChangeOwnerAddress, Value: big.Zero(), Params: sp, }, nil) @@ -596,3 +700,221 @@ var actorSetOwnerCmd = &cli.Command{ return nil }, } + +var actorProposeChangeWorker = &cli.Command{ + Name: "propose-change-worker", + Usage: "Propose a worker address change", + ArgsUsage: "[address]", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "really-do-it", + Usage: "Actually send transaction performing the action", + Value: false, + }, + }, + Action: func(cctx *cli.Context) error { + if !cctx.Args().Present() { + return fmt.Errorf("must pass address of new worker address") + } + + nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + + api, acloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer acloser() + + ctx := lcli.ReqContext(cctx) + + na, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return err + } + + newAddr, err := api.StateLookupID(ctx, na, types.EmptyTSK) + if err != nil { + return err + } + + maddr, err := nodeApi.ActorAddress(ctx) + if err != nil { + return err + } + + mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + if mi.NewWorker.Empty() { + if mi.Worker == newAddr { + return fmt.Errorf("worker address already set to %s", na) + } + } else { + if mi.NewWorker == newAddr { + return fmt.Errorf("change to worker address %s already pending", na) + } + } + + if !cctx.Bool("really-do-it") { + fmt.Fprintln(cctx.App.Writer, "Pass --really-do-it to actually execute this action") + return nil + } + + cwp := &miner2.ChangeWorkerAddressParams{ + NewWorker: newAddr, + NewControlAddrs: mi.ControlAddresses, + } + + sp, err := actors.SerializeParams(cwp) + if err != nil { + return xerrors.Errorf("serializing params: %w", err) + } + + smsg, err := api.MpoolPushMessage(ctx, &types.Message{ + From: mi.Owner, + To: maddr, + Method: miner.Methods.ChangeWorkerAddress, + Value: big.Zero(), + Params: sp, + }, nil) + if err != nil { + return xerrors.Errorf("mpool push: %w", err) + } + + fmt.Fprintln(cctx.App.Writer, "Propose Message CID:", smsg.Cid()) + + // wait for it to get mined into a block + wait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence) + if err != nil { + return err + } + + // check it executed successfully + if wait.Receipt.ExitCode != 0 { + fmt.Fprintln(cctx.App.Writer, "Propose worker change failed!") + return err + } + + mi, err = api.StateMinerInfo(ctx, maddr, wait.TipSet) + if err != nil { + return err + } + if mi.NewWorker != newAddr { + return fmt.Errorf("Proposed worker address change not reflected on chain: expected '%s', found '%s'", na, mi.NewWorker) + } + + fmt.Fprintf(cctx.App.Writer, "Worker key change to %s successfully proposed.\n", na) + fmt.Fprintf(cctx.App.Writer, "Call 'confirm-change-worker' at or after height %d to complete.\n", mi.WorkerChangeEpoch) + + return nil + }, +} + +var actorConfirmChangeWorker = &cli.Command{ + Name: "confirm-change-worker", + Usage: "Confirm a worker address change", + ArgsUsage: "[address]", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "really-do-it", + Usage: "Actually send transaction performing the action", + Value: false, + }, + }, + Action: func(cctx *cli.Context) error { + if !cctx.Args().Present() { + return fmt.Errorf("must pass address of new worker address") + } + + nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + + api, acloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer acloser() + + ctx := lcli.ReqContext(cctx) + + na, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return err + } + + newAddr, err := api.StateLookupID(ctx, na, types.EmptyTSK) + if err != nil { + return err + } + + maddr, err := nodeApi.ActorAddress(ctx) + if err != nil { + return err + } + + mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + if mi.NewWorker.Empty() { + return xerrors.Errorf("no worker key change proposed") + } else if mi.NewWorker != newAddr { + return xerrors.Errorf("worker key %s does not match current worker key proposal %s", newAddr, mi.NewWorker) + } + + if head, err := api.ChainHead(ctx); err != nil { + return xerrors.Errorf("failed to get the chain head: %w", err) + } else if head.Height() < mi.WorkerChangeEpoch { + return xerrors.Errorf("worker key change cannot be confirmed until %d, current height is %d", mi.WorkerChangeEpoch, head.Height()) + } + + if !cctx.Bool("really-do-it") { + fmt.Fprintln(cctx.App.Writer, "Pass --really-do-it to actually execute this action") + return nil + } + + smsg, err := api.MpoolPushMessage(ctx, &types.Message{ + From: mi.Owner, + To: maddr, + Method: miner.Methods.ConfirmUpdateWorkerKey, + Value: big.Zero(), + }, nil) + if err != nil { + return xerrors.Errorf("mpool push: %w", err) + } + + fmt.Fprintln(cctx.App.Writer, "Confirm Message CID:", smsg.Cid()) + + // wait for it to get mined into a block + wait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence) + if err != nil { + return err + } + + // check it executed successfully + if wait.Receipt.ExitCode != 0 { + fmt.Fprintln(cctx.App.Writer, "Worker change failed!") + return err + } + + mi, err = api.StateMinerInfo(ctx, maddr, wait.TipSet) + if err != nil { + return err + } + if mi.Worker != newAddr { + return fmt.Errorf("Confirmed worker address change not reflected on chain: expected '%s', found '%s'", newAddr, mi.Worker) + } + + return nil + }, +} diff --git a/cmd/lotus-storage-miner/actor_test.go b/cmd/lotus-storage-miner/actor_test.go new file mode 100644 index 000000000..949171699 --- /dev/null +++ b/cmd/lotus-storage-miner/actor_test.go @@ -0,0 +1,164 @@ +package main + +import ( + "bytes" + "context" + "flag" + "fmt" + "regexp" + "strconv" + "sync/atomic" + "testing" + "time" + + logging "github.com/ipfs/go-log/v2" + "github.com/stretchr/testify/require" + "github.com/urfave/cli/v2" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/api/test" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/lib/lotuslog" + "github.com/filecoin-project/lotus/node/repo" + builder "github.com/filecoin-project/lotus/node/test" +) + +func TestWorkerKeyChange(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + _ = logging.SetLogLevel("*", "INFO") + + policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048)) + policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) + policy.SetMinVerifiedDealSize(abi.NewStoragePower(256)) + + lotuslog.SetupLogLevels() + logging.SetLogLevel("miner", "ERROR") + logging.SetLogLevel("chainstore", "ERROR") + logging.SetLogLevel("chain", "ERROR") + logging.SetLogLevel("pubsub", "ERROR") + logging.SetLogLevel("sub", "ERROR") + logging.SetLogLevel("storageminer", "ERROR") + + blocktime := 1 * time.Millisecond + + n, sn := builder.MockSbBuilder(t, []test.FullNodeOpts{test.FullNodeWithUpgradeAt(1), test.FullNodeWithUpgradeAt(1)}, test.OneMiner) + + client1 := n[0] + client2 := n[1] + + // Connect the nodes. + addrinfo, err := client1.NetAddrsListen(ctx) + require.NoError(t, err) + err = client2.NetConnect(ctx, addrinfo) + require.NoError(t, err) + + output := bytes.NewBuffer(nil) + run := func(cmd *cli.Command, args ...string) error { + app := cli.NewApp() + app.Metadata = map[string]interface{}{ + "repoType": repo.StorageMiner, + "testnode-full": n[0], + "testnode-storage": sn[0], + } + app.Writer = output + build.RunningNodeType = build.NodeMiner + + fs := flag.NewFlagSet("", flag.ContinueOnError) + for _, f := range cmd.Flags { + if err := f.Apply(fs); err != nil { + return err + } + } + require.NoError(t, fs.Parse(args)) + + cctx := cli.NewContext(app, fs, nil) + return cmd.Action(cctx) + } + + // setup miner + mine := int64(1) + done := make(chan struct{}) + go func() { + defer close(done) + for atomic.LoadInt64(&mine) == 1 { + time.Sleep(blocktime) + if err := sn[0].MineOne(ctx, test.MineNext); err != nil { + t.Error(err) + } + } + }() + defer func() { + atomic.AddInt64(&mine, -1) + fmt.Println("shutting down mining") + <-done + }() + + newKey, err := client1.WalletNew(ctx, types.KTBLS) + require.NoError(t, err) + + // Initialize wallet. + test.SendFunds(ctx, t, client1, newKey, abi.NewTokenAmount(0)) + + require.NoError(t, run(actorProposeChangeWorker, "--really-do-it", newKey.String())) + + result := output.String() + output.Reset() + + require.Contains(t, result, fmt.Sprintf("Worker key change to %s successfully proposed.", newKey)) + + epochRe := regexp.MustCompile("at or after height (?P[0-9]+) to complete") + matches := epochRe.FindStringSubmatch(result) + require.NotNil(t, matches) + targetEpoch, err := strconv.Atoi(matches[1]) + require.NoError(t, err) + require.NotZero(t, targetEpoch) + + // Too early. + require.Error(t, run(actorConfirmChangeWorker, "--really-do-it", newKey.String())) + output.Reset() + + for { + head, err := client1.ChainHead(ctx) + require.NoError(t, err) + if head.Height() >= abi.ChainEpoch(targetEpoch) { + break + } + build.Clock.Sleep(10 * blocktime) + } + require.NoError(t, run(actorConfirmChangeWorker, "--really-do-it", newKey.String())) + output.Reset() + + head, err := client1.ChainHead(ctx) + require.NoError(t, err) + + // Wait for finality (worker key switch). + targetHeight := head.Height() + policy.ChainFinality + for { + head, err := client1.ChainHead(ctx) + require.NoError(t, err) + if head.Height() >= targetHeight { + break + } + build.Clock.Sleep(10 * blocktime) + } + + // Make sure the other node can catch up. + for i := 0; i < 20; i++ { + head, err := client2.ChainHead(ctx) + require.NoError(t, err) + if head.Height() >= targetHeight { + return + } + build.Clock.Sleep(10 * blocktime) + } + t.Fatal("failed to reach target epoch on the second miner") +} diff --git a/cmd/lotus-storage-miner/info.go b/cmd/lotus-storage-miner/info.go index 213d62e6e..b16b4dde8 100644 --- a/cmd/lotus-storage-miner/info.go +++ b/cmd/lotus-storage-miner/info.go @@ -59,6 +59,24 @@ func infoCmdAct(cctx *cli.Context) error { ctx := lcli.ReqContext(cctx) + fmt.Print("Full node: ") + + head, err := api.ChainHead(ctx) + if err != nil { + return err + } + + switch { + case time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs*3/2): // within 1.5 epochs + fmt.Printf("[%s]", color.GreenString("sync ok")) + case time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs*5): // within 5 epochs + fmt.Printf("[%s]", color.YellowString("sync slow (%s behind)", time.Now().Sub(time.Unix(int64(head.MinTimestamp()), 0)).Truncate(time.Second))) + default: + fmt.Printf("[%s]", color.RedString("sync behind! (%s behind)", time.Now().Sub(time.Unix(int64(head.MinTimestamp()), 0)).Truncate(time.Second))) + } + + fmt.Println() + maddr, err := getActorAddress(ctx, nodeApi, cctx.String("actor")) if err != nil { return err diff --git a/cmd/lotus-storage-miner/info_all.go b/cmd/lotus-storage-miner/info_all.go index 517553028..408f9b5c7 100644 --- a/cmd/lotus-storage-miner/info_all.go +++ b/cmd/lotus-storage-miner/info_all.go @@ -137,7 +137,7 @@ var infoAllCmd = &cli.Command{ } if err := sectorsStatusCmd.Action(cli.NewContext(cctx.App, fs, cctx)); err != nil { - return err + fmt.Println("ERROR: ", err) } fmt.Printf("\n##: Sector %d Storage Location\n", s) @@ -148,7 +148,7 @@ var infoAllCmd = &cli.Command{ } if err := storageFindCmd.Action(cli.NewContext(cctx.App, fs, cctx)); err != nil { - return err + fmt.Println("ERROR: ", err) } } diff --git a/cmd/lotus-storage-miner/init.go b/cmd/lotus-storage-miner/init.go index 0f830023f..a7fcd722a 100644 --- a/cmd/lotus-storage-miner/init.go +++ b/cmd/lotus-storage-miner/init.go @@ -12,11 +12,10 @@ import ( "path/filepath" "strconv" - "github.com/filecoin-project/go-state-types/big" - "github.com/docker/go-units" "github.com/google/uuid" "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" "github.com/libp2p/go-libp2p-core/crypto" "github.com/libp2p/go-libp2p-core/peer" "github.com/mitchellh/go-homedir" @@ -27,26 +26,29 @@ import ( cborutil "github.com/filecoin-project/go-cbor-util" paramfetch "github.com/filecoin-project/go-paramfetch" "github.com/filecoin-project/go-state-types/abi" - crypto2 "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-statestore" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/sector-storage/stores" - builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" - market0 "github.com/filecoin-project/specs-actors/actors/builtin/market" - miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" - power0 "github.com/filecoin-project/specs-actors/actors/builtin/power" + market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" + miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + power2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/power" lapi "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/gen/slashfilter" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" sealing "github.com/filecoin-project/lotus/extern/storage-sealing" "github.com/filecoin-project/lotus/genesis" "github.com/filecoin-project/lotus/journal" - "github.com/filecoin-project/lotus/miner" + storageminer "github.com/filecoin-project/lotus/miner" "github.com/filecoin-project/lotus/node/modules" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/repo" @@ -83,7 +85,7 @@ var initCmd = &cli.Command{ &cli.StringFlag{ Name: "sector-size", Usage: "specify sector size to use", - Value: units.BytesSize(float64(build.DefaultSectorSize())), + Value: units.BytesSize(float64(policy.GetDefaultSectorSize())), }, &cli.StringSliceFlag{ Name: "pre-sealed-sectors", @@ -156,7 +158,7 @@ var initCmd = &cli.Command{ log.Info("Checking full node sync status") if !cctx.Bool("genesis-miner") && !cctx.Bool("nosync") { - if err := lcli.SyncWait(ctx, api); err != nil { + if err := lcli.SyncWait(ctx, api, false); err != nil { return xerrors.Errorf("sync wait: %w", err) } } @@ -376,7 +378,7 @@ func migratePreSealMeta(ctx context.Context, api lapi.FullNode, metadata string, return mds.Put(datastore.NewKey(modules.StorageCounterDSPrefix), buf[:size]) } -func findMarketDealID(ctx context.Context, api lapi.FullNode, deal market0.DealProposal) (abi.DealID, error) { +func findMarketDealID(ctx context.Context, api lapi.FullNode, deal market2.DealProposal) (abi.DealID, error) { // TODO: find a better way // (this is only used by genesis miners) @@ -446,6 +448,9 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api lapi.FullNode, return err } + wsts := statestore.New(namespace.Wrap(mds, modules.WorkerCallsPrefix)) + smsts := statestore.New(namespace.Wrap(mds, modules.ManagerWorkPrefix)) + smgr, err := sectorstorage.New(ctx, lr, stores.NewIndex(), &ffiwrapper.Config{ SealProofType: spt, }, sectorstorage.SealerConfig{ @@ -455,7 +460,7 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api lapi.FullNode, AllowPreCommit2: true, AllowCommit: true, AllowUnseal: true, - }, nil, sa) + }, nil, sa, wsts, smsts) if err != nil { return err } @@ -469,7 +474,7 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api lapi.FullNode, return fmt.Errorf("failed to open filesystem journal: %w", err) } - m := miner.NewMiner(api, epp, a, slashfilter.New(mds), j) + m := storageminer.NewMiner(api, epp, a, slashfilter.New(mds), j) { if err := m.Start(ctx); err != nil { return xerrors.Errorf("failed to start up genesis miner: %w", err) @@ -569,7 +574,7 @@ func configureStorageMiner(ctx context.Context, api lapi.FullNode, addr address. return xerrors.Errorf("getWorkerAddr returned bad address: %w", err) } - enc, err := actors.SerializeParams(&miner0.ChangePeerIDParams{NewID: abi.PeerID(peerid)}) + enc, err := actors.SerializeParams(&miner2.ChangePeerIDParams{NewID: abi.PeerID(peerid)}) if err != nil { return err } @@ -577,7 +582,7 @@ func configureStorageMiner(ctx context.Context, api lapi.FullNode, addr address. msg := &types.Message{ To: addr, From: mi.Worker, - Method: builtin0.MethodsMiner.ChangePeerID, + Method: miner.Methods.ChangePeerID, Params: enc, Value: types.NewInt(0), GasPremium: gasPrice, @@ -602,8 +607,6 @@ func configureStorageMiner(ctx context.Context, api lapi.FullNode, addr address. } func createStorageMiner(ctx context.Context, api lapi.FullNode, peerid peer.ID, gasPrice types.BigInt, cctx *cli.Context) (address.Address, error) { - log.Info("Creating StorageMarket.CreateStorageMiner message") - var err error var owner address.Address if cctx.String("owner") != "" { @@ -624,11 +627,34 @@ func createStorageMiner(ctx context.Context, api lapi.FullNode, peerid peer.ID, if cctx.String("worker") != "" { worker, err = address.NewFromString(cctx.String("worker")) } else if cctx.Bool("create-worker-key") { // TODO: Do we need to force this if owner is Secpk? - worker, err = api.WalletNew(ctx, crypto2.SigTypeBLS) + worker, err = api.WalletNew(ctx, types.KTBLS) } - // TODO: Transfer some initial funds to worker if err != nil { - return address.Undef, err + return address.Address{}, err + } + + // make sure the worker account exists on chain + _, err = api.StateLookupID(ctx, worker, types.EmptyTSK) + if err != nil { + signed, err := api.MpoolPushMessage(ctx, &types.Message{ + From: owner, + To: worker, + Value: types.NewInt(0), + }, nil) + if err != nil { + return address.Undef, xerrors.Errorf("push worker init: %w", err) + } + + log.Infof("Initializing worker account %s, message: %s", worker, signed.Cid()) + log.Infof("Waiting for confirmation") + + mw, err := api.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence) + if err != nil { + return address.Undef, xerrors.Errorf("waiting for worker init: %w", err) + } + if mw.Receipt.ExitCode != 0 { + return address.Undef, xerrors.Errorf("initializing worker account failed: exit code %d", mw.Receipt.ExitCode) + } } spt, err := ffiwrapper.SealProofTypeFromSectorSize(abi.SectorSize(ssize)) @@ -636,7 +662,7 @@ func createStorageMiner(ctx context.Context, api lapi.FullNode, peerid peer.ID, return address.Undef, err } - params, err := actors.SerializeParams(&power0.CreateMinerParams{ + params, err := actors.SerializeParams(&power2.CreateMinerParams{ Owner: owner, Worker: worker, SealProofType: spt, @@ -656,11 +682,11 @@ func createStorageMiner(ctx context.Context, api lapi.FullNode, peerid peer.ID, } createStorageMinerMsg := &types.Message{ - To: builtin0.StoragePowerActorAddr, + To: power.Address, From: sender, Value: big.Zero(), - Method: builtin0.MethodsPower.CreateMiner, + Method: power.Methods.CreateMiner, Params: params, GasLimit: 0, @@ -669,22 +695,22 @@ func createStorageMiner(ctx context.Context, api lapi.FullNode, peerid peer.ID, signed, err := api.MpoolPushMessage(ctx, createStorageMinerMsg, nil) if err != nil { - return address.Undef, err + return address.Undef, xerrors.Errorf("pushing createMiner message: %w", err) } - log.Infof("Pushed StorageMarket.CreateStorageMiner, %s to Mpool", signed.Cid()) + log.Infof("Pushed CreateMiner message: %s", signed.Cid()) log.Infof("Waiting for confirmation") mw, err := api.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence) if err != nil { - return address.Undef, err + return address.Undef, xerrors.Errorf("waiting for createMiner message: %w", err) } if mw.Receipt.ExitCode != 0 { return address.Undef, xerrors.Errorf("create miner failed: exit code %d", mw.Receipt.ExitCode) } - var retval power0.CreateMinerReturn + var retval power2.CreateMinerReturn if err := retval.UnmarshalCBOR(bytes.NewReader(mw.Receipt.Return)); err != nil { return address.Undef, err } diff --git a/cmd/lotus-storage-miner/init_restore.go b/cmd/lotus-storage-miner/init_restore.go index bdbb99fe0..83a9ad87c 100644 --- a/cmd/lotus-storage-miner/init_restore.go +++ b/cmd/lotus-storage-miner/init_restore.go @@ -72,7 +72,7 @@ var initRestoreCmd = &cli.Command{ } if !cctx.Bool("nosync") { - if err := lcli.SyncWait(ctx, api); err != nil { + if err := lcli.SyncWait(ctx, api, false); err != nil { return xerrors.Errorf("sync wait: %w", err) } } diff --git a/cmd/lotus-storage-miner/market.go b/cmd/lotus-storage-miner/market.go index bb1ebd9ec..be4a529e9 100644 --- a/cmd/lotus-storage-miner/market.go +++ b/cmd/lotus-storage-miner/market.go @@ -2,6 +2,7 @@ package main import ( "bufio" + "errors" "fmt" "io" "os" @@ -13,8 +14,10 @@ import ( tm "github.com/buger/goterm" "github.com/docker/go-units" + datatransfer "github.com/filecoin-project/go-data-transfer" "github.com/ipfs/go-cid" "github.com/ipfs/go-cidutil/cidenc" + "github.com/libp2p/go-libp2p-core/peer" "github.com/multiformats/go-multibase" "github.com/urfave/cli/v2" "golang.org/x/xerrors" @@ -569,6 +572,128 @@ var dataTransfersCmd = &cli.Command{ Usage: "Manage data transfers", Subcommands: []*cli.Command{ transfersListCmd, + marketRestartTransfer, + marketCancelTransfer, + }, +} + +var marketRestartTransfer = &cli.Command{ + Name: "restart", + Usage: "Force restart a stalled data transfer", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "peerid", + Usage: "narrow to transfer with specific peer", + }, + &cli.BoolFlag{ + Name: "initiator", + Usage: "specify only transfers where peer is/is not initiator", + Value: false, + }, + }, + Action: func(cctx *cli.Context) error { + if !cctx.Args().Present() { + return cli.ShowCommandHelp(cctx, cctx.Command.Name) + } + nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := lcli.ReqContext(cctx) + + transferUint, err := strconv.ParseUint(cctx.Args().First(), 10, 64) + if err != nil { + return fmt.Errorf("Error reading transfer ID: %w", err) + } + transferID := datatransfer.TransferID(transferUint) + initiator := cctx.Bool("initiator") + var other peer.ID + if pidstr := cctx.String("peerid"); pidstr != "" { + p, err := peer.Decode(pidstr) + if err != nil { + return err + } + other = p + } else { + channels, err := nodeApi.MarketListDataTransfers(ctx) + if err != nil { + return err + } + found := false + for _, channel := range channels { + if channel.IsInitiator == initiator && channel.TransferID == transferID { + other = channel.OtherPeer + found = true + break + } + } + if !found { + return errors.New("unable to find matching data transfer") + } + } + + return nodeApi.MarketRestartDataTransfer(ctx, transferID, other, initiator) + }, +} + +var marketCancelTransfer = &cli.Command{ + Name: "cancel", + Usage: "Force cancel a data transfer", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "peerid", + Usage: "narrow to transfer with specific peer", + }, + &cli.BoolFlag{ + Name: "initiator", + Usage: "specify only transfers where peer is/is not initiator", + Value: false, + }, + }, + Action: func(cctx *cli.Context) error { + if !cctx.Args().Present() { + return cli.ShowCommandHelp(cctx, cctx.Command.Name) + } + nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := lcli.ReqContext(cctx) + + transferUint, err := strconv.ParseUint(cctx.Args().First(), 10, 64) + if err != nil { + return fmt.Errorf("Error reading transfer ID: %w", err) + } + transferID := datatransfer.TransferID(transferUint) + initiator := cctx.Bool("initiator") + var other peer.ID + if pidstr := cctx.String("peerid"); pidstr != "" { + p, err := peer.Decode(pidstr) + if err != nil { + return err + } + other = p + } else { + channels, err := nodeApi.MarketListDataTransfers(ctx) + if err != nil { + return err + } + found := false + for _, channel := range channels { + if channel.IsInitiator == initiator && channel.TransferID == transferID { + other = channel.OtherPeer + found = true + break + } + } + if !found { + return errors.New("unable to find matching data transfer") + } + } + + return nodeApi.MarketCancelDataTransfer(ctx, transferID, other, initiator) }, } @@ -589,6 +714,10 @@ var transfersListCmd = &cli.Command{ Name: "watch", Usage: "watch deal updates in real-time, rather than a one time list", }, + &cli.BoolFlag{ + Name: "show-failed", + Usage: "show failed/cancelled transfers", + }, }, Action: func(cctx *cli.Context) error { api, closer, err := lcli.GetStorageMinerAPI(cctx) @@ -606,7 +735,7 @@ var transfersListCmd = &cli.Command{ completed := cctx.Bool("completed") color := cctx.Bool("color") watch := cctx.Bool("watch") - + showFailed := cctx.Bool("show-failed") if watch { channelUpdates, err := api.MarketDataTransferUpdates(ctx) if err != nil { @@ -618,7 +747,7 @@ var transfersListCmd = &cli.Command{ tm.MoveCursor(1, 1) - lcli.OutputDataTransferChannels(tm.Screen, channels, completed, color) + lcli.OutputDataTransferChannels(tm.Screen, channels, completed, color, showFailed) tm.Flush() @@ -643,7 +772,7 @@ var transfersListCmd = &cli.Command{ } } } - lcli.OutputDataTransferChannels(os.Stdout, channels, completed, color) + lcli.OutputDataTransferChannels(os.Stdout, channels, completed, color, showFailed) return nil }, } diff --git a/cmd/lotus-storage-miner/run.go b/cmd/lotus-storage-miner/run.go index 98a9cfaba..0c2fba8b3 100644 --- a/cmd/lotus-storage-miner/run.go +++ b/cmd/lotus-storage-miner/run.go @@ -2,6 +2,7 @@ package main import ( "context" + "net" "net/http" _ "net/http/pprof" "os" @@ -12,6 +13,8 @@ import ( "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" "github.com/urfave/cli/v2" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" "golang.org/x/xerrors" "github.com/filecoin-project/go-jsonrpc" @@ -22,6 +25,7 @@ import ( "github.com/filecoin-project/lotus/build" lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/lib/ulimit" + "github.com/filecoin-project/lotus/metrics" "github.com/filecoin-project/lotus/node" "github.com/filecoin-project/lotus/node/impl" "github.com/filecoin-project/lotus/node/modules/dtypes" @@ -66,6 +70,13 @@ var runCmd = &cli.Command{ defer ncloser() ctx := lcli.DaemonContext(cctx) + // Register all metric views + if err := view.Register( + metrics.DefaultViews..., + ); err != nil { + log.Fatalf("Cannot register the view: %v", err) + } + v, err := nodeApi.Version(ctx) if err != nil { return err @@ -84,7 +95,7 @@ var runCmd = &cli.Command{ log.Info("Checking full node sync status") if !cctx.Bool("nosync") { - if err := lcli.SyncWait(ctx, nodeApi); err != nil { + if err := lcli.SyncWait(ctx, nodeApi, false); err != nil { return xerrors.Errorf("sync wait: %w", err) } } @@ -147,7 +158,7 @@ var runCmd = &cli.Command{ mux := mux.NewRouter() rpcServer := jsonrpc.NewServer() - rpcServer.Register("Filecoin", apistruct.PermissionedStorMinerAPI(minerapi)) + rpcServer.Register("Filecoin", apistruct.PermissionedStorMinerAPI(metrics.MetricedStorMinerAPI(minerapi))) mux.Handle("/rpc/v0", rpcServer) mux.PathPrefix("/remote").HandlerFunc(minerapi.(*impl.StorageMinerAPI).ServeRemote) @@ -158,7 +169,13 @@ var runCmd = &cli.Command{ Next: mux.ServeHTTP, } - srv := &http.Server{Handler: ah} + srv := &http.Server{ + Handler: ah, + BaseContext: func(listener net.Listener) context.Context { + ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-miner")) + return ctx + }, + } sigChan := make(chan os.Signal, 2) go func() { diff --git a/cmd/lotus-storage-miner/sealing.go b/cmd/lotus-storage-miner/sealing.go index 5cc5c419a..49003fc26 100644 --- a/cmd/lotus-storage-miner/sealing.go +++ b/cmd/lotus-storage-miner/sealing.go @@ -1,6 +1,7 @@ package main import ( + "encoding/hex" "encoding/json" "fmt" "os" @@ -9,10 +10,10 @@ import ( "text/tabwriter" "time" - "golang.org/x/xerrors" - "github.com/fatih/color" + "github.com/google/uuid" "github.com/urfave/cli/v2" + "golang.org/x/xerrors" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" @@ -53,7 +54,7 @@ var sealingWorkersCmd = &cli.Command{ } type sortableStat struct { - id uint64 + id uuid.UUID storiface.WorkerStats } @@ -63,7 +64,7 @@ var sealingWorkersCmd = &cli.Command{ } sort.Slice(st, func(i, j int) bool { - return st[i].id < st[j].id + return st[i].id.String() < st[j].id.String() }) for _, stat := range st { @@ -74,13 +75,19 @@ var sealingWorkersCmd = &cli.Command{ gpuUse = "" } - fmt.Printf("Worker %d, host %s\n", stat.id, color.MagentaString(stat.Info.Hostname)) + var disabled string + if !stat.Enabled { + disabled = color.RedString(" (disabled)") + } + + fmt.Printf("Worker %s, host %s%s\n", stat.id, color.MagentaString(stat.Info.Hostname), disabled) var barCols = uint64(64) cpuBars := int(stat.CpuUse * barCols / stat.Info.Resources.CPUs) cpuBar := strings.Repeat("|", cpuBars) + strings.Repeat(" ", int(barCols)-cpuBars) - fmt.Printf("\tCPU: [%s] %d core(s) in use\n", color.GreenString(cpuBar), stat.CpuUse) + fmt.Printf("\tCPU: [%s] %d/%d core(s) in use\n", + color.GreenString(cpuBar), stat.CpuUse, stat.Info.Resources.CPUs) ramBarsRes := int(stat.Info.Resources.MemReserved * barCols / stat.Info.Resources.MemPhysical) ramBarsUsed := int(stat.MemUsedMin * barCols / stat.Info.Resources.MemPhysical) @@ -139,7 +146,7 @@ var sealingJobsCmd = &cli.Command{ type line struct { storiface.WorkerJob - wid uint64 + wid uuid.UUID } lines := make([]line, 0) @@ -158,10 +165,13 @@ var sealingJobsCmd = &cli.Command{ if lines[i].RunWait != lines[j].RunWait { return lines[i].RunWait < lines[j].RunWait } + if lines[i].Start.Equal(lines[j].Start) { + return lines[i].ID.ID.String() < lines[j].ID.ID.String() + } return lines[i].Start.Before(lines[j].Start) }) - workerHostnames := map[uint64]string{} + workerHostnames := map[uuid.UUID]string{} wst, err := nodeApi.WorkerStats(ctx) if err != nil { @@ -177,10 +187,25 @@ var sealingJobsCmd = &cli.Command{ for _, l := range lines { state := "running" - if l.RunWait != 0 { + if l.RunWait > 0 { state = fmt.Sprintf("assigned(%d)", l.RunWait-1) } - _, _ = fmt.Fprintf(tw, "%d\t%d\t%d\t%s\t%s\t%s\t%s\n", l.ID, l.Sector.Number, l.wid, workerHostnames[l.wid], l.Task.Short(), state, time.Now().Sub(l.Start).Truncate(time.Millisecond*100)) + if l.RunWait == -1 { + state = "ret-wait" + } + dur := "n/a" + if !l.Start.IsZero() { + dur = time.Now().Sub(l.Start).Truncate(time.Millisecond * 100).String() + } + + _, _ = fmt.Fprintf(tw, "%s\t%d\t%s\t%s\t%s\t%s\t%s\n", + hex.EncodeToString(l.ID.ID[10:]), + l.Sector.Number, + hex.EncodeToString(l.wid[5:]), + workerHostnames[l.wid], + l.Task.Short(), + state, + dur) } return tw.Flush() @@ -190,6 +215,11 @@ var sealingJobsCmd = &cli.Command{ var sealingSchedDiagCmd = &cli.Command{ Name: "sched-diag", Usage: "Dump internal scheduler state", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "force-sched", + }, + }, Action: func(cctx *cli.Context) error { nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) if err != nil { @@ -199,7 +229,7 @@ var sealingSchedDiagCmd = &cli.Command{ ctx := lcli.ReqContext(cctx) - st, err := nodeApi.SealingSchedDiag(ctx) + st, err := nodeApi.SealingSchedDiag(ctx, cctx.Bool("force-sched")) if err != nil { return err } diff --git a/cmd/lotus-storage-miner/sectors.go b/cmd/lotus-storage-miner/sectors.go index b50f4a86d..e2e94cf69 100644 --- a/cmd/lotus-storage-miner/sectors.go +++ b/cmd/lotus-storage-miner/sectors.go @@ -5,6 +5,7 @@ import ( "os" "sort" "strconv" + "strings" "time" "github.com/docker/go-units" @@ -15,10 +16,9 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" - miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" - "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/lib/tablewriter" @@ -156,6 +156,14 @@ var sectorsListCmd = &cli.Command{ Name: "fast", Usage: "don't show on-chain info for better performance", }, + &cli.BoolFlag{ + Name: "events", + Usage: "display number of events the sector has received", + }, + &cli.BoolFlag{ + Name: "seal-time", + Usage: "display how long it took for the sector to be sealed", + }, }, Action: func(cctx *cli.Context) error { color.NoColor = !cctx.Bool("color") @@ -217,10 +225,12 @@ var sectorsListCmd = &cli.Command{ tablewriter.Col("OnChain"), tablewriter.Col("Active"), tablewriter.Col("Expiration"), + tablewriter.Col("SealTime"), + tablewriter.Col("Events"), tablewriter.Col("Deals"), tablewriter.Col("DealWeight"), tablewriter.NewLineCol("Error"), - tablewriter.NewLineCol("EarlyExpiration")) + tablewriter.NewLineCol("RecoveryTimeout")) fast := cctx.Bool("fast") @@ -282,7 +292,53 @@ var sectorsListCmd = &cli.Command{ } if st.Early > 0 { - m["EarlyExpiration"] = color.YellowString(lcli.EpochTime(head.Height(), st.Early)) + m["RecoveryTimeout"] = color.YellowString(lcli.EpochTime(head.Height(), st.Early)) + } + } + } + + if cctx.Bool("events") { + var events int + for _, sectorLog := range st.Log { + if !strings.HasPrefix(sectorLog.Kind, "event") { + continue + } + if sectorLog.Kind == "event;sealing.SectorRestart" { + continue + } + events++ + } + + pieces := len(st.Deals) + + switch { + case events < 12+pieces: + m["Events"] = color.GreenString("%d", events) + case events < 20+pieces: + m["Events"] = color.YellowString("%d", events) + default: + m["Events"] = color.RedString("%d", events) + } + } + + if cctx.Bool("seal-time") && len(st.Log) > 1 { + start := time.Unix(int64(st.Log[0].Timestamp), 0) + + for _, sectorLog := range st.Log { + if sectorLog.Kind == "event;sealing.SectorProving" { + end := time.Unix(int64(sectorLog.Timestamp), 0) + dur := end.Sub(start) + + switch { + case dur < 12*time.Hour: + m["SealTime"] = color.GreenString("%s", dur) + case dur < 24*time.Hour: + m["SealTime"] = color.YellowString("%s", dur) + default: + m["SealTime"] = color.RedString("%s", dur) + } + + break } } } @@ -463,7 +519,7 @@ var sectorsCapacityCollateralCmd = &cli.Command{ Expiration: abi.ChainEpoch(cctx.Uint64("expiration")), } if pci.Expiration == 0 { - pci.Expiration = miner0.MaxSectorExpirationExtension + pci.Expiration = policy.GetMaxSectorExpirationExtension() } pc, err := nApi.StateMinerInitialPledgeCollateral(ctx, maddr, pci, types.EmptyTSK) if err != nil { diff --git a/cmd/lotus-storage-miner/storage.go b/cmd/lotus-storage-miner/storage.go index 77792f32a..8b960a4bf 100644 --- a/cmd/lotus-storage-miner/storage.go +++ b/cmd/lotus-storage-miner/storage.go @@ -19,11 +19,12 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" - "github.com/filecoin-project/lotus/extern/sector-storage/stores" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" + "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) const metaFile = "sectorstore.json" @@ -317,17 +318,17 @@ var storageFindCmd = &cli.Command{ Number: abi.SectorNumber(snum), } - u, err := nodeApi.StorageFindSector(ctx, sid, stores.FTUnsealed, 0, false) + u, err := nodeApi.StorageFindSector(ctx, sid, storiface.FTUnsealed, 0, false) if err != nil { return xerrors.Errorf("finding unsealed: %w", err) } - s, err := nodeApi.StorageFindSector(ctx, sid, stores.FTSealed, 0, false) + s, err := nodeApi.StorageFindSector(ctx, sid, storiface.FTSealed, 0, false) if err != nil { return xerrors.Errorf("finding sealed: %w", err) } - c, err := nodeApi.StorageFindSector(ctx, sid, stores.FTCache, 0, false) + c, err := nodeApi.StorageFindSector(ctx, sid, storiface.FTCache, 0, false) if err != nil { return xerrors.Errorf("finding cache: %w", err) } diff --git a/cmd/lotus-wallet/logged.go b/cmd/lotus-wallet/logged.go index 3bcb3f867..272a8d10b 100644 --- a/cmd/lotus-wallet/logged.go +++ b/cmd/lotus-wallet/logged.go @@ -19,13 +19,8 @@ type LoggedWallet struct { under api.WalletAPI } -func (c *LoggedWallet) WalletNew(ctx context.Context, typ crypto.SigType) (address.Address, error) { - n, err := typ.Name() - if err != nil { - return address.Address{}, err - } - - log.Infow("WalletNew", "type", n) +func (c *LoggedWallet) WalletNew(ctx context.Context, typ types.KeyType) (address.Address, error) { + log.Infow("WalletNew", "type", typ) return c.under.WalletNew(ctx, typ) } diff --git a/cmd/lotus-wallet/main.go b/cmd/lotus-wallet/main.go index ae3580a59..25b89eb9d 100644 --- a/cmd/lotus-wallet/main.go +++ b/cmd/lotus-wallet/main.go @@ -9,12 +9,18 @@ import ( "github.com/gorilla/mux" logging "github.com/ipfs/go-log/v2" "github.com/urfave/cli/v2" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" "github.com/filecoin-project/go-jsonrpc" + + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/wallet" + ledgerwallet "github.com/filecoin-project/lotus/chain/wallet/ledger" lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/lib/lotuslog" + "github.com/filecoin-project/lotus/metrics" "github.com/filecoin-project/lotus/node/repo" ) @@ -60,6 +66,10 @@ var runCmd = &cli.Command{ Usage: "host address and port the wallet api will listen on", Value: "0.0.0.0:1777", }, + &cli.BoolFlag{ + Name: "ledger", + Usage: "use a ledger device instead of an on-disk wallet", + }, }, Action: func(cctx *cli.Context) error { log.Info("Starting lotus wallet") @@ -68,6 +78,13 @@ var runCmd = &cli.Command{ ctx, cancel := context.WithCancel(ctx) defer cancel() + // Register all metric views + if err := view.Register( + metrics.DefaultViews..., + ); err != nil { + log.Fatalf("Cannot register the view: %v", err) + } + repoPath := cctx.String(FlagWalletRepo) r, err := repo.NewFS(repoPath) if err != nil { @@ -94,18 +111,31 @@ var runCmd = &cli.Command{ return err } - w, err := wallet.NewWallet(ks) + lw, err := wallet.NewWallet(ks) if err != nil { return err } + var w api.WalletAPI = lw + if cctx.Bool("ledger") { + ds, err := lr.Datastore("/metadata") + if err != nil { + return err + } + + w = wallet.MultiWallet{ + Local: lw, + Ledger: ledgerwallet.NewWallet(ds), + } + } + address := cctx.String("listen") mux := mux.NewRouter() log.Info("Setting up API endpoint at " + address) rpcServer := jsonrpc.NewServer() - rpcServer.Register("Filecoin", &LoggedWallet{under: w}) + rpcServer.Register("Filecoin", &LoggedWallet{under: metrics.MetricedWalletAPI(w)}) mux.Handle("/rpc/v0", rpcServer) mux.PathPrefix("/").Handler(http.DefaultServeMux) // pprof @@ -118,6 +148,7 @@ var runCmd = &cli.Command{ srv := &http.Server{ Handler: mux, BaseContext: func(listener net.Listener) context.Context { + ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-wallet")) return ctx }, } diff --git a/cmd/lotus/rpc.go b/cmd/lotus/rpc.go index 9718deb3a..4f68ac85a 100644 --- a/cmd/lotus/rpc.go +++ b/cmd/lotus/rpc.go @@ -3,6 +3,7 @@ package main import ( "context" "encoding/json" + "net" "net/http" _ "net/http/pprof" "os" @@ -13,6 +14,7 @@ import ( logging "github.com/ipfs/go-log/v2" "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" + "go.opencensus.io/tag" "golang.org/x/xerrors" "contrib.go.opencensus.io/exporter/prometheus" @@ -22,6 +24,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api/apistruct" + "github.com/filecoin-project/lotus/metrics" "github.com/filecoin-project/lotus/node" "github.com/filecoin-project/lotus/node/impl" ) @@ -30,7 +33,7 @@ var log = logging.Logger("main") func serveRPC(a api.FullNode, stop node.StopFunc, addr multiaddr.Multiaddr, shutdownCh <-chan struct{}) error { rpcServer := jsonrpc.NewServer() - rpcServer.Register("Filecoin", apistruct.PermissionedFullAPI(a)) + rpcServer.Register("Filecoin", apistruct.PermissionedFullAPI(metrics.MetricedFullAPI(a))) ah := &auth.Handler{ Verify: a.AuthVerify, @@ -60,7 +63,13 @@ func serveRPC(a api.FullNode, stop node.StopFunc, addr multiaddr.Multiaddr, shut return xerrors.Errorf("could not listen: %w", err) } - srv := &http.Server{Handler: http.DefaultServeMux} + srv := &http.Server{ + Handler: http.DefaultServeMux, + BaseContext: func(listener net.Listener) context.Context { + ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-daemon")) + return ctx + }, + } sigCh := make(chan os.Signal, 2) shutdownDone := make(chan struct{}) diff --git a/cmd/tvx/codenames.go b/cmd/tvx/codenames.go new file mode 100644 index 000000000..b9f590914 --- /dev/null +++ b/cmd/tvx/codenames.go @@ -0,0 +1,38 @@ +package main + +import ( + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/build" +) + +// ProtocolCodenames is a table that summarises the protocol codenames that +// will be set on extracted vectors, depending on the original execution height. +// +// Implementers rely on these names to filter the vectors they can run through +// their implementations, based on their support level +var ProtocolCodenames = []struct { + firstEpoch abi.ChainEpoch + name string +}{ + {0, "genesis"}, + {build.UpgradeBreezeHeight + 1, "breeze"}, + {build.UpgradeSmokeHeight + 1, "smoke"}, + {build.UpgradeIgnitionHeight + 1, "ignition"}, + {build.UpgradeRefuelHeight + 1, "refuel"}, + {build.UpgradeActorsV2Height + 1, "actorsv2"}, + {build.UpgradeTapeHeight + 1, "tape"}, + {build.UpgradeLiftoffHeight + 1, "liftoff"}, + {build.UpgradeKumquatHeight + 1, "postliftoff"}, +} + +// GetProtocolCodename gets the protocol codename associated with a height. +func GetProtocolCodename(height abi.ChainEpoch) string { + for i, v := range ProtocolCodenames { + if height < v.firstEpoch { + // found the cutoff, return previous. + return ProtocolCodenames[i-1].name + } + } + return ProtocolCodenames[len(ProtocolCodenames)-1].name +} diff --git a/cmd/tvx/codenames_test.go b/cmd/tvx/codenames_test.go new file mode 100644 index 000000000..00d107707 --- /dev/null +++ b/cmd/tvx/codenames_test.go @@ -0,0 +1,28 @@ +package main + +import ( + "math" + "testing" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/build" +) + +func TestProtocolCodenames(t *testing.T) { + if height := abi.ChainEpoch(100); GetProtocolCodename(height) != "genesis" { + t.Fatal("expected genesis codename") + } + + if height := abi.ChainEpoch(build.UpgradeBreezeHeight + 1); GetProtocolCodename(height) != "breeze" { + t.Fatal("expected breeze codename") + } + + if height := build.UpgradeActorsV2Height + 1; GetProtocolCodename(height) != "actorsv2" { + t.Fatal("expected actorsv2 codename") + } + + if height := abi.ChainEpoch(math.MaxInt64); GetProtocolCodename(height) != ProtocolCodenames[len(ProtocolCodenames)-1].name { + t.Fatal("expected last codename") + } +} diff --git a/cmd/tvx/exec.go b/cmd/tvx/exec.go index 9ec6f9e2b..89ad23913 100644 --- a/cmd/tvx/exec.go +++ b/cmd/tvx/exec.go @@ -72,20 +72,24 @@ func runExecLotus(_ *cli.Context) error { func executeTestVector(tv schema.TestVector) error { log.Println("executing test vector:", tv.Meta.ID) - r := new(conformance.LogReporter) - switch class := tv.Class; class { - case "message": - conformance.ExecuteMessageVector(r, &tv) - case "tipset": - conformance.ExecuteTipsetVector(r, &tv) - default: - return fmt.Errorf("test vector class %s not supported", class) - } - if r.Failed() { - log.Println(color.HiRedString("❌ test vector failed")) - } else { - log.Println(color.GreenString("✅ test vector succeeded")) + for _, v := range tv.Pre.Variants { + r := new(conformance.LogReporter) + + switch class, v := tv.Class, v; class { + case "message": + conformance.ExecuteMessageVector(r, &tv, &v) + case "tipset": + conformance.ExecuteTipsetVector(r, &tv, &v) + default: + return fmt.Errorf("test vector class %s not supported", class) + } + + if r.Failed() { + log.Println(color.HiRedString("❌ test vector failed for variant %s", v.ID)) + } else { + log.Println(color.GreenString("✅ test vector succeeded for variant %s", v.ID)) + } } return nil diff --git a/cmd/tvx/extract.go b/cmd/tvx/extract.go index b0ed574df..3dfec37d8 100644 --- a/cmd/tvx/extract.go +++ b/cmd/tvx/extract.go @@ -14,15 +14,13 @@ import ( "github.com/fatih/color" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin" init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" "github.com/filecoin-project/lotus/chain/actors/builtin/reward" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" - lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/conformance" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/test-vectors/schema" "github.com/ipfs/go-cid" @@ -35,13 +33,14 @@ const ( ) type extractOpts struct { - id string - block string - class string - cid string - file string - retain string - precursor string + id string + block string + class string + cid string + file string + retain string + precursor string + ignoreSanityChecks bool } var extractFlags extractOpts @@ -50,6 +49,8 @@ var extractCmd = &cli.Command{ Name: "extract", Description: "generate a test vector by extracting it from a live chain", Action: runExtract, + Before: initialize, + After: destroy, Flags: []cli.Flag{ &repoFlag, &cli.StringFlag{ @@ -89,53 +90,42 @@ var extractCmd = &cli.Command{ }, &cli.StringFlag{ Name: "precursor-select", - Usage: "precursors to apply; values: 'all', 'sender'; 'all' selects all preceding" + - "messages in the canonicalised tipset, 'sender' selects only preceding messages from the same" + - "sender. Usually, 'sender' is a good tradeoff and gives you sufficient accuracy. If the receipt sanity" + - "check fails due to gas reasons, switch to 'all', as previous messages in the tipset may have" + + Usage: "precursors to apply; values: 'all', 'sender'; 'all' selects all preceding " + + "messages in the canonicalised tipset, 'sender' selects only preceding messages from the same " + + "sender. Usually, 'sender' is a good tradeoff and gives you sufficient accuracy. If the receipt sanity " + + "check fails due to gas reasons, switch to 'all', as previous messages in the tipset may have " + "affected state in a disruptive way", Value: "sender", Destination: &extractFlags.precursor, }, + &cli.BoolFlag{ + Name: "ignore-sanity-checks", + Usage: "generate vector even if sanity checks fail", + Value: false, + Destination: &extractFlags.ignoreSanityChecks, + }, }, } -func runExtract(c *cli.Context) error { - // LOTUS_DISABLE_VM_BUF disables what's called "VM state tree buffering", - // which stashes write operations in a BufferedBlockstore - // (https://github.com/filecoin-project/lotus/blob/b7a4dbb07fd8332b4492313a617e3458f8003b2a/lib/bufbstore/buf_bstore.go#L21) - // such that they're not written until the VM is actually flushed. - // - // For some reason, the standard behaviour was not working for me (raulk), - // and disabling it (such that the state transformations are written immediately - // to the blockstore) worked. - _ = os.Setenv("LOTUS_DISABLE_VM_BUF", "iknowitsabadidea") - - ctx := context.Background() - - // Make the API client. - fapi, closer, err := lcli.GetFullNodeAPI(c) - if err != nil { - return err - } - defer closer() - - return doExtract(ctx, fapi, extractFlags) +func runExtract(_ *cli.Context) error { + return doExtract(extractFlags) } -func doExtract(ctx context.Context, fapi api.FullNode, opts extractOpts) error { +func doExtract(opts extractOpts) error { + ctx := context.Background() + mcid, err := cid.Decode(opts.cid) if err != nil { return err } - msg, execTs, incTs, err := resolveFromChain(ctx, fapi, mcid, opts.block) + msg, execTs, incTs, err := resolveFromChain(ctx, FullAPI, mcid, opts.block) if err != nil { return fmt.Errorf("failed to resolve message and tipsets from chain: %w", err) } // get the circulating supply before the message was executed. - circSupplyDetail, err := fapi.StateCirculatingSupply(ctx, incTs.Key()) + circSupplyDetail, err := FullAPI.StateVMCirculatingSupplyInternal(ctx, incTs.Key()) if err != nil { return fmt.Errorf("failed while fetching circulating supply: %w", err) } @@ -148,7 +138,7 @@ func doExtract(ctx context.Context, fapi api.FullNode, opts extractOpts) error { log.Printf("finding precursor messages using mode: %s", opts.precursor) // Fetch messages in canonical order from inclusion tipset. - msgs, err := fapi.ChainGetParentMessages(ctx, execTs.Blocks()[0].Cid()) + msgs, err := FullAPI.ChainGetParentMessages(ctx, execTs.Blocks()[0].Cid()) if err != nil { return fmt.Errorf("failed to fetch messages in canonical order from inclusion tipset: %w", err) } @@ -175,8 +165,8 @@ func doExtract(ctx context.Context, fapi api.FullNode, opts extractOpts) error { var ( // create a read-through store that uses ChainGetObject to fetch unknown CIDs. - pst = NewProxyingStores(ctx, fapi) - g = NewSurgeon(ctx, fapi, pst) + pst = NewProxyingStores(ctx, FullAPI) + g = NewSurgeon(ctx, FullAPI, pst) ) driver := conformance.NewDriver(ctx, schema.Selector{}, conformance.DriverOpts{ @@ -201,7 +191,7 @@ func doExtract(ctx context.Context, fapi api.FullNode, opts extractOpts) error { CircSupply: circSupplyDetail.FilCirculating, BaseFee: basefee, // recorded randomness will be discarded. - Rand: conformance.NewRecordingRand(new(conformance.LogReporter), fapi), + Rand: conformance.NewRecordingRand(new(conformance.LogReporter), FullAPI), }) if err != nil { return fmt.Errorf("failed to execute precursor message: %w", err) @@ -216,7 +206,7 @@ func doExtract(ctx context.Context, fapi api.FullNode, opts extractOpts) error { retention = opts.retain // recordingRand will record randomness so we can embed it in the test vector. - recordingRand = conformance.NewRecordingRand(new(conformance.LogReporter), fapi) + recordingRand = conformance.NewRecordingRand(new(conformance.LogReporter), FullAPI) ) log.Printf("using state retention strategy: %s", retention) @@ -249,7 +239,7 @@ func doExtract(ctx context.Context, fapi api.FullNode, opts extractOpts) error { case "accessed-actors": log.Printf("calculating accessed actors") // get actors accessed by message. - retain, err := g.GetAccessedActors(ctx, fapi, mcid) + retain, err := g.GetAccessedActors(ctx, FullAPI, mcid) if err != nil { return fmt.Errorf("failed to calculate accessed actors: %w", err) } @@ -287,7 +277,7 @@ func doExtract(ctx context.Context, fapi api.FullNode, opts extractOpts) error { // TODO sometimes this returns a nil receipt and no error ¯\_(ツ)_/¯ // ex: https://filfox.info/en/message/bafy2bzacebpxw3yiaxzy2bako62akig46x3imji7fewszen6fryiz6nymu2b2 // This code is lenient and skips receipt comparison in case of a nil receipt. - rec, err := fapi.StateGetReceipt(ctx, mcid, execTs.Key()) + rec, err := FullAPI.StateGetReceipt(ctx, mcid, execTs.Key()) if err != nil { return fmt.Errorf("failed to find receipt on chain: %w", err) } @@ -301,13 +291,20 @@ func doExtract(ctx context.Context, fapi api.FullNode, opts extractOpts) error { ReturnValue: rec.Return, GasUsed: rec.GasUsed, } + reporter := new(conformance.LogReporter) conformance.AssertMsgResult(reporter, receipt, applyret, "as locally executed") if reporter.Failed() { - log.Println(color.RedString("receipt sanity check failed; aborting")) - return fmt.Errorf("vector generation aborted") + if opts.ignoreSanityChecks { + log.Println(color.YellowString("receipt sanity check failed; proceeding anyway")) + } else { + log.Println(color.RedString("receipt sanity check failed; aborting")) + return fmt.Errorf("vector generation aborted") + } + } else { + log.Println(color.GreenString("receipt sanity check succeeded")) } - log.Println(color.GreenString("receipt sanity check succeeded")) + } else { receipt = &schema.Receipt{ ExitCode: int64(applyret.ExitCode), @@ -337,16 +334,23 @@ func doExtract(ctx context.Context, fapi api.FullNode, opts extractOpts) error { return err } - version, err := fapi.Version(ctx) + version, err := FullAPI.Version(ctx) if err != nil { return err } - ntwkName, err := fapi.StateNetworkName(ctx) + ntwkName, err := FullAPI.StateNetworkName(ctx) if err != nil { return err } + nv, err := FullAPI.StateNetworkVersion(ctx, execTs.Key()) + if err != nil { + return err + } + + codename := GetProtocolCodename(execTs.Height()) + // Write out the test vector. vector := schema.TestVector{ Class: schema.ClassMessage, @@ -363,10 +367,15 @@ func doExtract(ctx context.Context, fapi api.FullNode, opts extractOpts) error { {Source: fmt.Sprintf("execution_tipset:%s", execTs.Key().String())}, {Source: "github.com/filecoin-project/lotus", Version: version.String()}}, }, + Selector: schema.Selector{ + schema.SelectorMinProtocolVersion: codename, + }, Randomness: recordingRand.Recorded(), CAR: out.Bytes(), Pre: &schema.Preconditions{ - Epoch: int64(execTs.Height()), + Variants: []schema.Variant{ + {ID: codename, Epoch: int64(execTs.Height()), NetworkVersion: uint(nv)}, + }, CircSupply: circSupply.Int, BaseFee: basefee.Int, StateTree: &schema.StateTree{ @@ -388,8 +397,12 @@ func doExtract(ctx context.Context, fapi api.FullNode, opts extractOpts) error { }, } + return writeVector(vector, opts.file) +} + +func writeVector(vector schema.TestVector, file string) (err error) { output := io.WriteCloser(os.Stdout) - if file := opts.file; file != "" { + if file := file; file != "" { dir := filepath.Dir(file) if err := os.MkdirAll(dir, 0755); err != nil { return fmt.Errorf("unable to create directory %s: %w", dir, err) @@ -404,11 +417,7 @@ func doExtract(ctx context.Context, fapi api.FullNode, opts extractOpts) error { enc := json.NewEncoder(output) enc.SetIndent("", " ") - if err := enc.Encode(&vector); err != nil { - return err - } - - return nil + return enc.Encode(&vector) } // resolveFromChain queries the chain for the provided message, using the block CID to diff --git a/cmd/tvx/extract_many.go b/cmd/tvx/extract_many.go index fe0ce6a6c..048271456 100644 --- a/cmd/tvx/extract_many.go +++ b/cmd/tvx/extract_many.go @@ -1,7 +1,6 @@ package main import ( - "context" "encoding/csv" "fmt" "io" @@ -20,7 +19,6 @@ import ( "github.com/urfave/cli/v2" "github.com/filecoin-project/lotus/chain/stmgr" - lcli "github.com/filecoin-project/lotus/cli" ) var extractManyFlags struct { @@ -45,6 +43,8 @@ var extractManyCmd = &cli.Command{ after these compulsory seven. `, Action: runExtractMany, + Before: initialize, + After: destroy, Flags: []cli.Flag{ &repoFlag, &cli.StringFlag{ @@ -77,15 +77,6 @@ func runExtractMany(c *cli.Context) error { // to the blockstore) worked. _ = os.Setenv("LOTUS_DISABLE_VM_BUF", "iknowitsabadidea") - ctx := context.Background() - - // Make the API client. - fapi, closer, err := lcli.GetFullNodeAPI(c) - if err != nil { - return err - } - defer closer() - var ( in = extractManyFlags.in outdir = extractManyFlags.outdir @@ -198,8 +189,8 @@ func runExtractMany(c *cli.Context) error { precursor: PrecursorSelectSender, } - if err := doExtract(ctx, fapi, opts); err != nil { - log.Println(color.RedString("failed to extract vector for message %s: %s; queuing for 'canonical' precursor selection", mcid, err)) + if err := doExtract(opts); err != nil { + log.Println(color.RedString("failed to extract vector for message %s: %s; queuing for 'all' precursor selection", mcid, err)) retry = append(retry, opts) continue } @@ -215,7 +206,7 @@ func runExtractMany(c *cli.Context) error { log.Printf("retrying %s: %s", r.cid, r.id) r.precursor = PrecursorSelectAll - if err := doExtract(ctx, fapi, r); err != nil { + if err := doExtract(r); err != nil { merr = multierror.Append(merr, fmt.Errorf("failed to extract vector for message %s: %w", r.cid, err)) continue } diff --git a/cmd/tvx/main.go b/cmd/tvx/main.go index 6c887d163..8de851ed5 100644 --- a/cmd/tvx/main.go +++ b/cmd/tvx/main.go @@ -1,13 +1,26 @@ package main import ( + "fmt" "log" "os" "sort" + "github.com/filecoin-project/go-jsonrpc" "github.com/urfave/cli/v2" + + "github.com/filecoin-project/lotus/api" + lcli "github.com/filecoin-project/lotus/cli" ) +// FullAPI is a JSON-RPC client targeting a full node. It's initialized in a +// cli.BeforeFunc. +var FullAPI api.FullNode + +// Closer is the closer for the JSON-RPC client, which must be called on +// cli.AfterFunc. +var Closer jsonrpc.ClientCloser + // DefaultLotusRepoPath is where the fallback path where to look for a Lotus // client repo. It is expanded with mitchellh/go-homedir, so it'll work with all // OSes despite the Unix twiddle notation. @@ -23,7 +36,7 @@ var repoFlag = cli.StringFlag{ func main() { app := &cli.App{ Name: "tvx", - Description: `tvx is a tool for extracting and executing test vectors. It has three subcommands. + Description: `tvx is a tool for extracting and executing test vectors. It has four subcommands. tvx extract extracts a test vector from a live network. It requires access to a Filecoin client that exposes the standard JSON-RPC API endpoint. Only @@ -35,6 +48,10 @@ func main() { tvx extract-many performs a batch extraction of many messages, supplied in a CSV file. Refer to the help of that subcommand for more info. + tvx simulate takes a raw message and simulates it on top of the supplied + epoch, reporting the result on stderr and writing a test vector on stdout + or into the specified file. + SETTING THE JSON-RPC API ENDPOINT You can set the JSON-RPC API endpoint through one of the following methods. @@ -57,6 +74,7 @@ func main() { extractCmd, execCmd, extractManyCmd, + simulateCmd, }, } @@ -69,3 +87,29 @@ func main() { log.Fatal(err) } } + +func initialize(c *cli.Context) error { + // LOTUS_DISABLE_VM_BUF disables what's called "VM state tree buffering", + // which stashes write operations in a BufferedBlockstore + // (https://github.com/filecoin-project/lotus/blob/b7a4dbb07fd8332b4492313a617e3458f8003b2a/lib/bufbstore/buf_bstore.go#L21) + // such that they're not written until the VM is actually flushed. + // + // For some reason, the standard behaviour was not working for me (raulk), + // and disabling it (such that the state transformations are written immediately + // to the blockstore) worked. + _ = os.Setenv("LOTUS_DISABLE_VM_BUF", "iknowitsabadidea") + + // Make the API client. + var err error + if FullAPI, Closer, err = lcli.GetFullNodeAPI(c); err != nil { + err = fmt.Errorf("failed to locate Lotus node; ") + } + return err +} + +func destroy(_ *cli.Context) error { + if Closer != nil { + Closer() + } + return nil +} diff --git a/cmd/tvx/simulate.go b/cmd/tvx/simulate.go new file mode 100644 index 000000000..82b2bc118 --- /dev/null +++ b/cmd/tvx/simulate.go @@ -0,0 +1,235 @@ +package main + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "log" + "os/exec" + + "github.com/fatih/color" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/test-vectors/schema" + "github.com/urfave/cli/v2" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/conformance" +) + +var simulateFlags struct { + msg string + epoch int64 + out string + statediff bool +} + +var simulateCmd = &cli.Command{ + Name: "simulate", + Description: "simulate a raw message on top of the supplied epoch (or HEAD), " + + "reporting the result on stderr and writing a test vector on stdout " + + "or into the specified file", + Action: runSimulateCmd, + Before: initialize, + After: destroy, + Flags: []cli.Flag{ + &repoFlag, + &cli.StringFlag{ + Name: "msg", + Usage: "base64 cbor-encoded message", + Destination: &simulateFlags.msg, + Required: true, + }, + &cli.Int64Flag{ + Name: "at-epoch", + Usage: "epoch at which to run this message (or HEAD if not provided)", + Destination: &simulateFlags.epoch, + }, + &cli.StringFlag{ + Name: "out", + Usage: "file to write the test vector to; if nil, the vector will be written to stdout", + TakesFile: true, + Destination: &simulateFlags.out, + }, + &cli.BoolFlag{ + Name: "statediff", + Usage: "display a statediff of the precondition and postcondition states", + Destination: &simulateFlags.statediff, + }, + }, +} + +func runSimulateCmd(_ *cli.Context) error { + ctx := context.Background() + r := new(conformance.LogReporter) + + msgb, err := base64.StdEncoding.DecodeString(simulateFlags.msg) + if err != nil { + return fmt.Errorf("failed to base64-decode message: %w", err) + } + + msg, err := types.DecodeMessage(msgb) + if err != nil { + return fmt.Errorf("failed to deserialize message: %w", err) + } + + log.Printf("message to simulate has CID: %s", msg.Cid()) + + msgjson, err := json.Marshal(msg) + if err != nil { + return fmt.Errorf("failed to serialize message to json for printing: %w", err) + } + + log.Printf("message to simulate: %s", string(msgjson)) + + // Resolve the tipset, root, epoch. + var ts *types.TipSet + if epochIn := simulateFlags.epoch; epochIn == 0 { + ts, err = FullAPI.ChainHead(ctx) + } else { + ts, err = FullAPI.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(epochIn), types.EmptyTSK) + } + + if err != nil { + return fmt.Errorf("failed to get tipset: %w", err) + } + + var ( + preroot = ts.ParentState() + epoch = ts.Height() + baseFee = ts.Blocks()[0].ParentBaseFee + circSupply api.CirculatingSupply + ) + + // Get circulating supply. + circSupply, err = FullAPI.StateVMCirculatingSupplyInternal(ctx, ts.Key()) + if err != nil { + return fmt.Errorf("failed to get circulating supply for tipset %s: %w", ts.Key(), err) + } + + // Create the driver. + stores := NewProxyingStores(ctx, FullAPI) + driver := conformance.NewDriver(ctx, schema.Selector{}, conformance.DriverOpts{ + DisableVMFlush: true, + }) + rand := conformance.NewRecordingRand(r, FullAPI) + + tbs, ok := stores.Blockstore.(TracingBlockstore) + if !ok { + return fmt.Errorf("no tracing blockstore available") + } + tbs.StartTracing() + applyret, postroot, err := driver.ExecuteMessage(stores.Blockstore, conformance.ExecuteMessageParams{ + Preroot: preroot, + Epoch: epoch, + Message: msg, + CircSupply: circSupply.FilCirculating, + BaseFee: baseFee, + Rand: rand, + }) + if err != nil { + return fmt.Errorf("failed to apply message: %w", err) + } + + accessed := tbs.FinishTracing() + + var ( + out = new(bytes.Buffer) + gw = gzip.NewWriter(out) + g = NewSurgeon(ctx, FullAPI, stores) + ) + if err := g.WriteCARIncluding(gw, accessed, preroot, postroot); err != nil { + return err + } + if err = gw.Flush(); err != nil { + return err + } + if err = gw.Close(); err != nil { + return err + } + + version, err := FullAPI.Version(ctx) + if err != nil { + log.Printf("failed to get node version: %s; falling back to unknown", err) + version = api.Version{} + } + + nv, err := FullAPI.StateNetworkVersion(ctx, ts.Key()) + if err != nil { + return err + } + + codename := GetProtocolCodename(epoch) + + // Write out the test vector. + vector := schema.TestVector{ + Class: schema.ClassMessage, + Meta: &schema.Metadata{ + ID: fmt.Sprintf("simulated-%s", msg.Cid()), + Gen: []schema.GenerationData{ + {Source: "github.com/filecoin-project/lotus", Version: version.String()}}, + }, + Selector: schema.Selector{ + schema.SelectorMinProtocolVersion: codename, + }, + Randomness: rand.Recorded(), + CAR: out.Bytes(), + Pre: &schema.Preconditions{ + Variants: []schema.Variant{ + {ID: codename, Epoch: int64(epoch), NetworkVersion: uint(nv)}, + }, + CircSupply: circSupply.FilCirculating.Int, + BaseFee: baseFee.Int, + StateTree: &schema.StateTree{ + RootCID: preroot, + }, + }, + ApplyMessages: []schema.Message{{Bytes: msgb}}, + Post: &schema.Postconditions{ + StateTree: &schema.StateTree{ + RootCID: postroot, + }, + Receipts: []*schema.Receipt{ + { + ExitCode: int64(applyret.ExitCode), + ReturnValue: applyret.Return, + GasUsed: applyret.GasUsed, + }, + }, + }, + } + + if err := writeVector(vector, simulateFlags.out); err != nil { + return fmt.Errorf("failed to write vector: %w", err) + } + + log.Printf(color.GreenString("wrote vector at: %s"), simulateFlags.out) + + if !simulateFlags.statediff { + return nil + } + + if simulateFlags.out == "" { + log.Print("omitting statediff in non-file mode") + return nil + } + + // check if statediff is installed; if not, skip. + if err := exec.Command("statediff", "--help").Run(); err != nil { + log.Printf("could not perform statediff on generated vector; command not found (%s)", err) + log.Printf("install statediff with:") + log.Printf("$ GOMODULE111=off go get github.com/filecoin-project/statediff/cmd/statediff") + return err + } + + stdiff, err := exec.Command("statediff", "vector", "--file", simulateFlags.out).CombinedOutput() + if err != nil { + return fmt.Errorf("failed to statediff: %w", err) + } + + log.Print(string(stdiff)) + return nil +} diff --git a/cmd/tvx/stores.go b/cmd/tvx/stores.go index 93e0d215f..4f574c175 100644 --- a/cmd/tvx/stores.go +++ b/cmd/tvx/stores.go @@ -87,7 +87,7 @@ type proxyingBlockstore struct { ctx context.Context api api.FullNode - lk sync.RWMutex + lk sync.Mutex tracing bool traced map[cid.Cid]struct{} @@ -113,11 +113,11 @@ func (pb *proxyingBlockstore) FinishTracing() map[cid.Cid]struct{} { } func (pb *proxyingBlockstore) Get(cid cid.Cid) (blocks.Block, error) { - pb.lk.RLock() + pb.lk.Lock() if pb.tracing { pb.traced[cid] = struct{}{} } - pb.lk.RUnlock() + pb.lk.Unlock() if block, err := pb.Blockstore.Get(cid); err == nil { return block, err @@ -140,3 +140,12 @@ func (pb *proxyingBlockstore) Get(cid cid.Cid) (blocks.Block, error) { return block, nil } + +func (pb *proxyingBlockstore) Put(block blocks.Block) error { + pb.lk.Lock() + if pb.tracing { + pb.traced[block.Cid()] = struct{}{} + } + pb.lk.Unlock() + return pb.Blockstore.Put(block) +} diff --git a/conformance/chaos/actor.go b/conformance/chaos/actor.go index d5e0b4352..cdda1db83 100644 --- a/conformance/chaos/actor.go +++ b/conformance/chaos/actor.go @@ -6,9 +6,11 @@ import ( "github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/go-state-types/rt" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/runtime" + "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/ipfs/go-cid" + + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + runtime2 "github.com/filecoin-project/specs-actors/v2/actors/runtime" ) //go:generate go run ./gen @@ -104,19 +106,19 @@ type SendArgs struct { // SendReturn is the return values for the Send method. type SendReturn struct { - Return runtime.CBORBytes + Return builtin2.CBORBytes Code exitcode.ExitCode } // Send requests for this actor to send a message to an actor with the // passed parameters. -func (a Actor) Send(rt runtime.Runtime, args *SendArgs) *SendReturn { +func (a Actor) Send(rt runtime2.Runtime, args *SendArgs) *SendReturn { rt.ValidateImmediateCallerAcceptAny() - var out runtime.CBORBytes + var out builtin2.CBORBytes code := rt.Send( args.To, args.Method, - runtime.CBORBytes(args.Params), + builtin2.CBORBytes(args.Params), args.Value, &out, ) @@ -127,7 +129,7 @@ func (a Actor) Send(rt runtime.Runtime, args *SendArgs) *SendReturn { } // Constructor will panic because the Chaos actor is a singleton. -func (a Actor) Constructor(_ runtime.Runtime, _ *abi.EmptyValue) *abi.EmptyValue { +func (a Actor) Constructor(_ runtime2.Runtime, _ *abi.EmptyValue) *abi.EmptyValue { panic("constructor should not be called; the Chaos actor is a singleton actor") } @@ -144,7 +146,7 @@ type CallerValidationArgs struct { // CallerValidationBranchTwice validates twice. // CallerValidationBranchIsAddress validates caller against CallerValidationArgs.Addrs. // CallerValidationBranchIsType validates caller against CallerValidationArgs.Types. -func (a Actor) CallerValidation(rt runtime.Runtime, args *CallerValidationArgs) *abi.EmptyValue { +func (a Actor) CallerValidation(rt runtime2.Runtime, args *CallerValidationArgs) *abi.EmptyValue { switch args.Branch { case CallerValidationBranchNone: case CallerValidationBranchTwice: @@ -174,7 +176,7 @@ type CreateActorArgs struct { } // CreateActor creates an actor with the supplied CID and Address. -func (a Actor) CreateActor(rt runtime.Runtime, args *CreateActorArgs) *abi.EmptyValue { +func (a Actor) CreateActor(rt runtime2.Runtime, args *CreateActorArgs) *abi.EmptyValue { rt.ValidateImmediateCallerAcceptAny() var ( @@ -199,7 +201,7 @@ type ResolveAddressResponse struct { Success bool } -func (a Actor) ResolveAddress(rt runtime.Runtime, args *address.Address) *ResolveAddressResponse { +func (a Actor) ResolveAddress(rt runtime2.Runtime, args *address.Address) *ResolveAddressResponse { rt.ValidateImmediateCallerAcceptAny() resolvedAddr, ok := rt.ResolveAddress(*args) @@ -212,7 +214,7 @@ func (a Actor) ResolveAddress(rt runtime.Runtime, args *address.Address) *Resolv // DeleteActor deletes the executing actor from the state tree, transferring any // balance to beneficiary. -func (a Actor) DeleteActor(rt runtime.Runtime, beneficiary *address.Address) *abi.EmptyValue { +func (a Actor) DeleteActor(rt runtime2.Runtime, beneficiary *address.Address) *abi.EmptyValue { rt.ValidateImmediateCallerAcceptAny() rt.DeleteActor(*beneficiary) return nil @@ -226,7 +228,7 @@ type MutateStateArgs struct { } // MutateState attempts to mutate a state value in the actor. -func (a Actor) MutateState(rt runtime.Runtime, args *MutateStateArgs) *abi.EmptyValue { +func (a Actor) MutateState(rt runtime2.Runtime, args *MutateStateArgs) *abi.EmptyValue { rt.ValidateImmediateCallerAcceptAny() var st State switch args.Branch { @@ -257,7 +259,7 @@ type AbortWithArgs struct { } // AbortWith simply causes a panic with the passed exit code. -func (a Actor) AbortWith(rt runtime.Runtime, args *AbortWithArgs) *abi.EmptyValue { +func (a Actor) AbortWith(rt runtime2.Runtime, args *AbortWithArgs) *abi.EmptyValue { if args.Uncontrolled { // uncontrolled abort: directly panic panic(args.Message) } else { @@ -277,7 +279,7 @@ type InspectRuntimeReturn struct { } // InspectRuntime returns a copy of the serializable values available in the Runtime. -func (a Actor) InspectRuntime(rt runtime.Runtime, _ *abi.EmptyValue) *InspectRuntimeReturn { +func (a Actor) InspectRuntime(rt runtime2.Runtime, _ *abi.EmptyValue) *InspectRuntimeReturn { rt.ValidateImmediateCallerAcceptAny() var st State rt.StateReadonly(&st) diff --git a/conformance/chaos/actor_test.go b/conformance/chaos/actor_test.go index 2061efb82..dbce4f4c5 100644 --- a/conformance/chaos/actor_test.go +++ b/conformance/chaos/actor_test.go @@ -7,15 +7,16 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/exitcode" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/support/mock" - atesting "github.com/filecoin-project/specs-actors/support/testing" "github.com/ipfs/go-cid" + + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + mock2 "github.com/filecoin-project/specs-actors/v2/support/mock" + atesting2 "github.com/filecoin-project/specs-actors/v2/support/testing" ) func TestSingleton(t *testing.T) { - receiver := atesting.NewIDAddr(t, 100) - builder := mock.NewBuilder(context.Background(), receiver) + receiver := atesting2.NewIDAddr(t, 100) + builder := mock2.NewBuilder(context.Background(), receiver) rt := builder.Build(t) var a Actor @@ -28,8 +29,8 @@ func TestSingleton(t *testing.T) { } func TestCallerValidationNone(t *testing.T) { - receiver := atesting.NewIDAddr(t, 100) - builder := mock.NewBuilder(context.Background(), receiver) + receiver := atesting2.NewIDAddr(t, 100) + builder := mock2.NewBuilder(context.Background(), receiver) rt := builder.Build(t) var a Actor @@ -39,19 +40,19 @@ func TestCallerValidationNone(t *testing.T) { } func TestCallerValidationIs(t *testing.T) { - caller := atesting.NewIDAddr(t, 100) - receiver := atesting.NewIDAddr(t, 101) - builder := mock.NewBuilder(context.Background(), receiver) + caller := atesting2.NewIDAddr(t, 100) + receiver := atesting2.NewIDAddr(t, 101) + builder := mock2.NewBuilder(context.Background(), receiver) rt := builder.Build(t) - rt.SetCaller(caller, builtin.AccountActorCodeID) + rt.SetCaller(caller, builtin2.AccountActorCodeID) var a Actor - caddrs := []address.Address{atesting.NewIDAddr(t, 101)} + caddrs := []address.Address{atesting2.NewIDAddr(t, 101)} rt.ExpectValidateCallerAddr(caddrs...) - // FIXME: https://github.com/filecoin-project/specs-actors/pull/1155 - rt.ExpectAbort(exitcode.ErrForbidden, func() { + // fixed in: https://github.com/filecoin-project/specs-actors/pull/1155 + rt.ExpectAbort(exitcode.SysErrForbidden, func() { rt.Call(a.CallerValidation, &CallerValidationArgs{ Branch: CallerValidationBranchIsAddress, Addrs: caddrs, @@ -68,35 +69,34 @@ func TestCallerValidationIs(t *testing.T) { } func TestCallerValidationType(t *testing.T) { - caller := atesting.NewIDAddr(t, 100) - receiver := atesting.NewIDAddr(t, 101) - builder := mock.NewBuilder(context.Background(), receiver) + caller := atesting2.NewIDAddr(t, 100) + receiver := atesting2.NewIDAddr(t, 101) + builder := mock2.NewBuilder(context.Background(), receiver) rt := builder.Build(t) - rt.SetCaller(caller, builtin.AccountActorCodeID) + rt.SetCaller(caller, builtin2.AccountActorCodeID) var a Actor - rt.ExpectValidateCallerType(builtin.CronActorCodeID) - // FIXME: https://github.com/filecoin-project/specs-actors/pull/1155 - rt.ExpectAbort(exitcode.ErrForbidden, func() { + rt.ExpectValidateCallerType(builtin2.CronActorCodeID) + rt.ExpectAbort(exitcode.SysErrForbidden, func() { rt.Call(a.CallerValidation, &CallerValidationArgs{ Branch: CallerValidationBranchIsType, - Types: []cid.Cid{builtin.CronActorCodeID}, + Types: []cid.Cid{builtin2.CronActorCodeID}, }) }) rt.Verify() - rt.ExpectValidateCallerType(builtin.AccountActorCodeID) + rt.ExpectValidateCallerType(builtin2.AccountActorCodeID) rt.Call(a.CallerValidation, &CallerValidationArgs{ Branch: CallerValidationBranchIsType, - Types: []cid.Cid{builtin.AccountActorCodeID}, + Types: []cid.Cid{builtin2.AccountActorCodeID}, }) rt.Verify() } func TestCallerValidationInvalidBranch(t *testing.T) { - receiver := atesting.NewIDAddr(t, 100) - builder := mock.NewBuilder(context.Background(), receiver) + receiver := atesting2.NewIDAddr(t, 100) + builder := mock2.NewBuilder(context.Background(), receiver) rt := builder.Build(t) var a Actor @@ -108,9 +108,9 @@ func TestCallerValidationInvalidBranch(t *testing.T) { } func TestDeleteActor(t *testing.T) { - receiver := atesting.NewIDAddr(t, 100) - beneficiary := atesting.NewIDAddr(t, 101) - builder := mock.NewBuilder(context.Background(), receiver) + receiver := atesting2.NewIDAddr(t, 100) + beneficiary := atesting2.NewIDAddr(t, 101) + builder := mock2.NewBuilder(context.Background(), receiver) rt := builder.Build(t) var a Actor @@ -122,8 +122,8 @@ func TestDeleteActor(t *testing.T) { } func TestMutateStateInTransaction(t *testing.T) { - receiver := atesting.NewIDAddr(t, 100) - builder := mock.NewBuilder(context.Background(), receiver) + receiver := atesting2.NewIDAddr(t, 100) + builder := mock2.NewBuilder(context.Background(), receiver) rt := builder.Build(t) var a Actor @@ -148,8 +148,8 @@ func TestMutateStateInTransaction(t *testing.T) { } func TestMutateStateAfterTransaction(t *testing.T) { - receiver := atesting.NewIDAddr(t, 100) - builder := mock.NewBuilder(context.Background(), receiver) + receiver := atesting2.NewIDAddr(t, 100) + builder := mock2.NewBuilder(context.Background(), receiver) rt := builder.Build(t) var a Actor @@ -175,8 +175,8 @@ func TestMutateStateAfterTransaction(t *testing.T) { } func TestMutateStateReadonly(t *testing.T) { - receiver := atesting.NewIDAddr(t, 100) - builder := mock.NewBuilder(context.Background(), receiver) + receiver := atesting2.NewIDAddr(t, 100) + builder := mock2.NewBuilder(context.Background(), receiver) rt := builder.Build(t) var a Actor @@ -201,8 +201,8 @@ func TestMutateStateReadonly(t *testing.T) { } func TestMutateStateInvalidBranch(t *testing.T) { - receiver := atesting.NewIDAddr(t, 100) - builder := mock.NewBuilder(context.Background(), receiver) + receiver := atesting2.NewIDAddr(t, 100) + builder := mock2.NewBuilder(context.Background(), receiver) rt := builder.Build(t) var a Actor @@ -215,8 +215,8 @@ func TestMutateStateInvalidBranch(t *testing.T) { } func TestAbortWith(t *testing.T) { - receiver := atesting.NewIDAddr(t, 100) - builder := mock.NewBuilder(context.Background(), receiver) + receiver := atesting2.NewIDAddr(t, 100) + builder := mock2.NewBuilder(context.Background(), receiver) rt := builder.Build(t) var a Actor @@ -233,8 +233,8 @@ func TestAbortWith(t *testing.T) { } func TestAbortWithUncontrolled(t *testing.T) { - receiver := atesting.NewIDAddr(t, 100) - builder := mock.NewBuilder(context.Background(), receiver) + receiver := atesting2.NewIDAddr(t, 100) + builder := mock2.NewBuilder(context.Background(), receiver) rt := builder.Build(t) var a Actor @@ -250,12 +250,12 @@ func TestAbortWithUncontrolled(t *testing.T) { } func TestInspectRuntime(t *testing.T) { - caller := atesting.NewIDAddr(t, 100) - receiver := atesting.NewIDAddr(t, 101) - builder := mock.NewBuilder(context.Background(), receiver) + caller := atesting2.NewIDAddr(t, 100) + receiver := atesting2.NewIDAddr(t, 101) + builder := mock2.NewBuilder(context.Background(), receiver) rt := builder.Build(t) - rt.SetCaller(caller, builtin.AccountActorCodeID) + rt.SetCaller(caller, builtin2.AccountActorCodeID) rt.StateCreate(&State{}) var a Actor diff --git a/conformance/chaos/cbor_gen.go b/conformance/chaos/cbor_gen.go index 882af7026..876d6a893 100644 --- a/conformance/chaos/cbor_gen.go +++ b/conformance/chaos/cbor_gen.go @@ -587,7 +587,7 @@ func (t *SendReturn) MarshalCBOR(w io.Writer) error { scratch := make([]byte, 9) - // t.Return (runtime.CBORBytes) (slice) + // t.Return (builtin.CBORBytes) (slice) if len(t.Return) > cbg.ByteArrayMaxLen { return xerrors.Errorf("Byte array in field t.Return was too long") } @@ -631,7 +631,7 @@ func (t *SendReturn) UnmarshalCBOR(r io.Reader) error { return fmt.Errorf("cbor input had wrong number of fields") } - // t.Return (runtime.CBORBytes) (slice) + // t.Return (builtin.CBORBytes) (slice) maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) if err != nil { diff --git a/conformance/corpus_test.go b/conformance/corpus_test.go index 3d447570d..a09f9a8d3 100644 --- a/conformance/corpus_test.go +++ b/conformance/corpus_test.go @@ -11,6 +11,11 @@ import ( "github.com/filecoin-project/test-vectors/schema" ) +var invokees = map[schema.Class]func(Reporter, *schema.TestVector, *schema.Variant){ + schema.ClassMessage: ExecuteMessageVector, + schema.ClassTipset: ExecuteTipsetVector, +} + const ( // EnvSkipConformance, if 1, skips the conformance test suite. EnvSkipConformance = "SKIP_CONFORMANCE" @@ -120,13 +125,16 @@ func TestConformance(t *testing.T) { } // dispatch the execution depending on the vector class. - switch vector.Class { - case "message": - ExecuteMessageVector(t, &vector) - case "tipset": - ExecuteTipsetVector(t, &vector) - default: - t.Fatalf("test vector class not supported: %s", vector.Class) + invokee, ok := invokees[vector.Class] + if !ok { + t.Fatalf("unsupported test vector class: %s", vector.Class) + } + + for _, variant := range vector.Pre.Variants { + variant := variant + t.Run(variant.ID, func(t *testing.T) { + invokee(t, &vector, &variant) + }) } }) } diff --git a/conformance/driver.go b/conformance/driver.go index f49022b9c..95b6f2659 100644 --- a/conformance/driver.go +++ b/conformance/driver.go @@ -82,7 +82,7 @@ type ExecuteTipsetResult struct { // This method returns the the receipts root, the poststate root, and the VM // message results. The latter _include_ implicit messages, such as cron ticks // and reward withdrawal per miner. -func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, preroot cid.Cid, parentEpoch abi.ChainEpoch, tipset *schema.Tipset) (*ExecuteTipsetResult, error) { +func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, preroot cid.Cid, parentEpoch abi.ChainEpoch, tipset *schema.Tipset, execEpoch abi.ChainEpoch) (*ExecuteTipsetResult, error) { var ( syscalls = vm.Syscalls(ffiwrapper.ProofVerifier) vmRand = NewFixedRand() @@ -121,11 +121,10 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, preroot messages []*types.Message results []*vm.ApplyRet - epoch = abi.ChainEpoch(tipset.Epoch) basefee = abi.NewTokenAmount(tipset.BaseFee.Int64()) ) - postcid, receiptsroot, err := sm.ApplyBlocks(context.Background(), parentEpoch, preroot, blocks, epoch, vmRand, func(_ cid.Cid, msg *types.Message, ret *vm.ApplyRet) error { + postcid, receiptsroot, err := sm.ApplyBlocks(context.Background(), parentEpoch, preroot, blocks, execEpoch, vmRand, func(_ cid.Cid, msg *types.Message, ret *vm.ApplyRet) error { messages = append(messages, msg) results = append(results, ret) return nil @@ -249,7 +248,7 @@ func BaseFeeOrDefault(basefee *gobig.Int) abi.TokenAmount { // DefaultCirculatingSupply. func CircSupplyOrDefault(circSupply *gobig.Int) abi.TokenAmount { if circSupply == nil { - return DefaultBaseFee + return DefaultCirculatingSupply } return big.NewFromGo(circSupply) } diff --git a/conformance/runner.go b/conformance/runner.go index d489ac288..6f9d73305 100644 --- a/conformance/runner.go +++ b/conformance/runner.go @@ -30,11 +30,11 @@ import ( ) // ExecuteMessageVector executes a message-class test vector. -func ExecuteMessageVector(r Reporter, vector *schema.TestVector) { +func ExecuteMessageVector(r Reporter, vector *schema.TestVector, variant *schema.Variant) { var ( - ctx = context.Background() - epoch = vector.Pre.Epoch - root = vector.Pre.StateTree.RootCID + ctx = context.Background() + baseEpoch = variant.Epoch + root = vector.Pre.StateTree.RootCID ) // Load the CAR into a new temporary Blockstore. @@ -53,16 +53,16 @@ func ExecuteMessageVector(r Reporter, vector *schema.TestVector) { r.Fatalf("failed to deserialize message: %s", err) } - // add an epoch if one's set. - if m.Epoch != nil { - epoch = *m.Epoch + // add the epoch offset if one is set. + if m.EpochOffset != nil { + baseEpoch += *m.EpochOffset } // Execute the message. var ret *vm.ApplyRet ret, root, err = driver.ExecuteMessage(bs, ExecuteMessageParams{ Preroot: root, - Epoch: abi.ChainEpoch(epoch), + Epoch: abi.ChainEpoch(baseEpoch), Message: msg, BaseFee: BaseFeeOrDefault(vector.Pre.BaseFee), CircSupply: CircSupplyOrDefault(vector.Pre.CircSupply), @@ -86,10 +86,10 @@ func ExecuteMessageVector(r Reporter, vector *schema.TestVector) { } // ExecuteTipsetVector executes a tipset-class test vector. -func ExecuteTipsetVector(r Reporter, vector *schema.TestVector) { +func ExecuteTipsetVector(r Reporter, vector *schema.TestVector, variant *schema.Variant) { var ( ctx = context.Background() - prevEpoch = vector.Pre.Epoch + baseEpoch = abi.ChainEpoch(variant.Epoch) root = vector.Pre.StateTree.RootCID tmpds = ds.NewMapDatastore() ) @@ -105,9 +105,11 @@ func ExecuteTipsetVector(r Reporter, vector *schema.TestVector) { // Apply every tipset. var receiptsIdx int + var prevEpoch = baseEpoch for i, ts := range vector.ApplyTipsets { ts := ts // capture - ret, err := driver.ExecuteTipset(bs, tmpds, root, abi.ChainEpoch(prevEpoch), &ts) + execEpoch := baseEpoch + abi.ChainEpoch(ts.EpochOffset) + ret, err := driver.ExecuteTipset(bs, tmpds, root, prevEpoch, &ts, execEpoch) if err != nil { r.Fatalf("failed to apply tipset %d message: %s", i, err) } @@ -122,7 +124,7 @@ func ExecuteTipsetVector(r Reporter, vector *schema.TestVector) { r.Errorf("post receipts root doesn't match; expected: %s, was: %s", expected, actual) } - prevEpoch = ts.Epoch + prevEpoch = execEpoch root = ret.PostStateRoot } diff --git a/documentation/en/api-methods.md b/documentation/en/api-methods.md index ec8071b57..4ab89d688 100644 --- a/documentation/en/api-methods.md +++ b/documentation/en/api-methods.md @@ -1,6 +1,7 @@ # Groups * [](#) * [Closing](#Closing) + * [Session](#Session) * [Shutdown](#Shutdown) * [Version](#Version) * [Auth](#Auth) @@ -32,11 +33,14 @@ * [ChainTipSetWeight](#ChainTipSetWeight) * [Client](#Client) * [ClientCalcCommP](#ClientCalcCommP) + * [ClientCancelDataTransfer](#ClientCancelDataTransfer) * [ClientDataTransferUpdates](#ClientDataTransferUpdates) + * [ClientDealPieceCID](#ClientDealPieceCID) * [ClientDealSize](#ClientDealSize) * [ClientFindData](#ClientFindData) * [ClientGenCar](#ClientGenCar) * [ClientGetDealInfo](#ClientGetDealInfo) + * [ClientGetDealStatus](#ClientGetDealStatus) * [ClientGetDealUpdates](#ClientGetDealUpdates) * [ClientHasLocal](#ClientHasLocal) * [ClientImport](#ClientImport) @@ -46,6 +50,7 @@ * [ClientMinerQueryOffer](#ClientMinerQueryOffer) * [ClientQueryAsk](#ClientQueryAsk) * [ClientRemoveImport](#ClientRemoveImport) + * [ClientRestartDataTransfer](#ClientRestartDataTransfer) * [ClientRetrieve](#ClientRetrieve) * [ClientRetrieveTryRestartInsufficientFunds](#ClientRetrieveTryRestartInsufficientFunds) * [ClientRetrieveWithEvents](#ClientRetrieveWithEvents) @@ -68,6 +73,9 @@ * [MinerCreateBlock](#MinerCreateBlock) * [MinerGetBaseInfo](#MinerGetBaseInfo) * [Mpool](#Mpool) + * [MpoolBatchPush](#MpoolBatchPush) + * [MpoolBatchPushMessage](#MpoolBatchPushMessage) + * [MpoolBatchPushUntrusted](#MpoolBatchPushUntrusted) * [MpoolClear](#MpoolClear) * [MpoolGetConfig](#MpoolGetConfig) * [MpoolGetNonce](#MpoolGetNonce) @@ -132,6 +140,7 @@ * [StateCirculatingSupply](#StateCirculatingSupply) * [StateCompute](#StateCompute) * [StateDealProviderCollateralBounds](#StateDealProviderCollateralBounds) + * [StateDecodeParams](#StateDecodeParams) * [StateGetActor](#StateGetActor) * [StateGetReceipt](#StateGetReceipt) * [StateListActors](#StateListActors) @@ -153,9 +162,9 @@ * [StateMinerPreCommitDepositForPower](#StateMinerPreCommitDepositForPower) * [StateMinerProvingDeadline](#StateMinerProvingDeadline) * [StateMinerRecoveries](#StateMinerRecoveries) + * [StateMinerSectorAllocated](#StateMinerSectorAllocated) * [StateMinerSectorCount](#StateMinerSectorCount) * [StateMinerSectors](#StateMinerSectors) - * [StateMsgGasCost](#StateMsgGasCost) * [StateNetworkName](#StateNetworkName) * [StateNetworkVersion](#StateNetworkVersion) * [StateReadState](#StateReadState) @@ -165,6 +174,7 @@ * [StateSectorGetInfo](#StateSectorGetInfo) * [StateSectorPartition](#StateSectorPartition) * [StateSectorPreCommitInfo](#StateSectorPreCommitInfo) + * [StateVMCirculatingSupplyInternal](#StateVMCirculatingSupplyInternal) * [StateVerifiedClientStatus](#StateVerifiedClientStatus) * [StateVerifiedRegistryRootKey](#StateVerifiedRegistryRootKey) * [StateVerifierStatus](#StateVerifierStatus) @@ -206,6 +216,15 @@ Inputs: `null` Response: `{}` +### Session + + +Perms: read + +Inputs: `null` + +Response: `"07070707-0707-0707-0707-070707070707"` + ### Shutdown @@ -226,7 +245,7 @@ Response: ```json { "Version": "string value", - "APIVersion": 4096, + "APIVersion": 4352, "BlockDelay": 42 } ``` @@ -462,7 +481,10 @@ Response: "GasFeeCap": "0", "GasPremium": "0", "Method": 1, - "Params": "Ynl0ZSBhcnJheQ==" + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } } ``` @@ -840,6 +862,23 @@ Response: } ``` +### ClientCancelDataTransfer +ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer + + +Perms: write + +Inputs: +```json +[ + 3, + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + true +] +``` + +Response: `{}` + ### ClientDataTransferUpdates There are not yet any comments for this method. @@ -864,6 +903,32 @@ Response: } ``` +### ClientDealPieceCID +ClientCalcCommP calculates the CommP and data size of the specified CID + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "PayloadSize": 9, + "PieceSize": 1032, + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +} +``` + ### ClientDealSize ClientDealSize calculates real deal data size @@ -968,6 +1033,21 @@ Response: } ``` +### ClientGetDealStatus +ClientGetDealStatus returns status given a code + + +Perms: read + +Inputs: +```json +[ + 42 +] +``` + +Response: `"string value"` + ### ClientGetDealUpdates ClientGetDealUpdates returns the status of updated deals @@ -1160,6 +1240,23 @@ Inputs: Response: `{}` +### ClientRestartDataTransfer +ClientRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer + + +Perms: write + +Inputs: +```json +[ + 3, + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + true +] +``` + +Response: `{}` + ### ClientRetrieve ClientRetrieve initiates the retrieval of a file, as specified in the order. @@ -1333,7 +1430,10 @@ Inputs: "GasFeeCap": "0", "GasPremium": "0", "Method": 1, - "Params": "Ynl0ZSBhcnJheQ==" + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } }, 9, [ @@ -1369,7 +1469,10 @@ Inputs: "GasFeeCap": "0", "GasPremium": "0", "Method": 1, - "Params": "Ynl0ZSBhcnJheQ==" + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } }, [ { @@ -1429,7 +1532,10 @@ Inputs: "GasFeeCap": "0", "GasPremium": "0", "Method": 1, - "Params": "Ynl0ZSBhcnJheQ==" + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } }, { "MaxFee": "0" @@ -1457,7 +1563,10 @@ Response: "GasFeeCap": "0", "GasPremium": "0", "Method": 1, - "Params": "Ynl0ZSBhcnJheQ==" + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } } ``` @@ -1648,6 +1757,54 @@ The Mpool methods are for interacting with the message pool. The message pool manages all incoming and outgoing 'messages' going over the network. +### MpoolBatchPush +MpoolBatchPush batch pushes a signed message to mempool. + + +Perms: write + +Inputs: +```json +[ + null +] +``` + +Response: `null` + +### MpoolBatchPushMessage +MpoolBatchPushMessage batch pushes a unsigned message to mempool. + + +Perms: sign + +Inputs: +```json +[ + null, + { + "MaxFee": "0" + } +] +``` + +Response: `null` + +### MpoolBatchPushUntrusted +MpoolBatchPushUntrusted batch pushes a signed message to mempool from untrusted sources. + + +Perms: write + +Inputs: +```json +[ + null +] +``` + +Response: `null` + ### MpoolClear MpoolClear clears pending messages from the mpool @@ -1741,11 +1898,17 @@ Inputs: "GasFeeCap": "0", "GasPremium": "0", "Method": 1, - "Params": "Ynl0ZSBhcnJheQ==" + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } }, "Signature": { "Type": 2, "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" } } ] @@ -1782,7 +1945,10 @@ Inputs: "GasFeeCap": "0", "GasPremium": "0", "Method": 1, - "Params": "Ynl0ZSBhcnJheQ==" + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } }, { "MaxFee": "0" @@ -1803,11 +1969,17 @@ Response: "GasFeeCap": "0", "GasPremium": "0", "Method": 1, - "Params": "Ynl0ZSBhcnJheQ==" + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } }, "Signature": { "Type": 2, "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" } } ``` @@ -1832,11 +2004,17 @@ Inputs: "GasFeeCap": "0", "GasPremium": "0", "Method": 1, - "Params": "Ynl0ZSBhcnJheQ==" + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } }, "Signature": { "Type": 2, "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" } } ] @@ -1916,11 +2094,17 @@ Response: "GasFeeCap": "0", "GasPremium": "0", "Method": 1, - "Params": "Ynl0ZSBhcnJheQ==" + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } }, "Signature": { "Type": 2, "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" } } } @@ -2928,7 +3112,7 @@ Response: ## State The State methods are used to query, inspect, and interact with chain state. -All methods take a TipSetKey as a parameter. The state looked up is the state at that tipset. +Most methods take a TipSetKey as a parameter. The state looked up is the state at that tipset. A nil TipSetKey can be provided as a param, this will cause the heaviest tipset in the chain to be used. @@ -2997,7 +3181,10 @@ Inputs: "GasFeeCap": "0", "GasPremium": "0", "Method": 1, - "Params": "Ynl0ZSBhcnJheQ==" + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } }, [ { @@ -3013,6 +3200,9 @@ Inputs: Response: ```json { + "MsgCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, "Msg": { "Version": 42, "To": "f01234", @@ -3023,13 +3213,28 @@ Response: "GasFeeCap": "0", "GasPremium": "0", "Method": 1, - "Params": "Ynl0ZSBhcnJheQ==" + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } }, "MsgRct": { "ExitCode": 0, "Return": "Ynl0ZSBhcnJheQ==", "GasUsed": 9 }, + "GasCost": { + "Message": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "GasUsed": "0", + "BaseFeeBurn": "0", + "OverEstimationBurn": "0", + "MinerPenalty": "0", + "MinerTip": "0", + "Refund": "0", + "TotalCost": "0" + }, "ExecutionTrace": { "Msg": { "Version": 42, @@ -3041,7 +3246,10 @@ Response: "GasFeeCap": "0", "GasPremium": "0", "Method": 1, - "Params": "Ynl0ZSBhcnJheQ==" + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } }, "MsgRct": { "ExitCode": 0, @@ -3094,7 +3302,8 @@ Response: ``` ### StateCirculatingSupply -StateCirculatingSupply returns the circulating supply of Filecoin at the given tipset +StateCirculatingSupply returns the exact circulating supply of Filecoin at the given tipset. +This is not used anywhere in the protocol itself, and is only for external consumption. Perms: read @@ -3113,16 +3322,7 @@ Inputs: ] ``` -Response: -```json -{ - "FilVested": "0", - "FilMined": "0", - "FilBurnt": "0", - "FilLocked": "0", - "FilCirculating": "0" -} -``` +Response: `"0"` ### StateCompute StateCompute is a flexible command that applies the given messages on the given tipset. @@ -3188,6 +3388,31 @@ Response: } ``` +### StateDecodeParams +StateDecodeParams attempts to decode the provided params, based on the recipient actor address and method number. + + +Perms: read + +Inputs: +```json +[ + "f01234", + 1, + "Ynl0ZSBhcnJheQ==", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `{}` + ### StateGetActor StateGetActor returns the indicated actor's nonce and balance. @@ -3287,16 +3512,8 @@ Inputs: ```json [ { - "Version": 42, "To": "f01234", - "From": "f01234", - "Nonce": 42, - "Value": "0", - "GasLimit": 9, - "GasFeeCap": "0", - "GasPremium": "0", - "Method": 1, - "Params": "Ynl0ZSBhcnJheQ==" + "From": "f01234" }, [ { @@ -3850,6 +4067,30 @@ Response: ] ``` +### StateMinerSectorAllocated +StateMinerSectorAllocated checks if a sector is allocated + + +Perms: read + +Inputs: +```json +[ + "f01234", + 9, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `true` + ### StateMinerSectorCount StateMinerSectorCount returns the number of sectors in a miner's sector set and proving set @@ -3906,45 +4147,6 @@ Inputs: Response: `null` -### StateMsgGasCost -StateMsgGasCost searches for a message in the chain, and returns details of the messages gas costs, including the penalty and miner tip - - -Perms: read - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: -```json -{ - "Message": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "GasUsed": "0", - "BaseFeeBurn": "0", - "OverEstimationBurn": "0", - "MinerPenalty": "0", - "MinerTip": "0", - "Refund": "0", - "TotalCost": "0" -} -``` - ### StateNetworkName StateNetworkName returns the name of the network the node is synced to @@ -3975,7 +4177,7 @@ Inputs: ] ``` -Response: `4` +Response: `6` ### StateReadState StateReadState returns the indicated actor's state. @@ -4007,7 +4209,8 @@ Response: ``` ### StateReplay -StateReplay returns the result of executing the indicated message, assuming it was executed in the indicated tipset. +StateReplay replays a given message, assuming it was included in a block in the specified tipset. +If no tipset key is provided, the appropriate tipset is looked up. Perms: read @@ -4032,6 +4235,9 @@ Inputs: Response: ```json { + "MsgCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, "Msg": { "Version": 42, "To": "f01234", @@ -4042,13 +4248,28 @@ Response: "GasFeeCap": "0", "GasPremium": "0", "Method": 1, - "Params": "Ynl0ZSBhcnJheQ==" + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } }, "MsgRct": { "ExitCode": 0, "Return": "Ynl0ZSBhcnJheQ==", "GasUsed": 9 }, + "GasCost": { + "Message": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "GasUsed": "0", + "BaseFeeBurn": "0", + "OverEstimationBurn": "0", + "MinerPenalty": "0", + "MinerTip": "0", + "Refund": "0", + "TotalCost": "0" + }, "ExecutionTrace": { "Msg": { "Version": 42, @@ -4060,7 +4281,10 @@ Response: "GasFeeCap": "0", "GasPremium": "0", "Method": 1, - "Params": "Ynl0ZSBhcnJheQ==" + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } }, "MsgRct": { "ExitCode": 0, @@ -4265,6 +4489,38 @@ Response: } ``` +### StateVMCirculatingSupplyInternal +StateVMCirculatingSupplyInternal returns an approximation of the circulating supply of Filecoin at the given tipset. +This is the value reported by the runtime interface to actors code. + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "FilVested": "0", + "FilMined": "0", + "FilBurnt": "0", + "FilLocked": "0", + "FilCirculating": "0" +} +``` + ### StateVerifiedClientStatus StateVerifiedClientStatus returns the data cap for the given address. Returns nil if there is no entry in the data cap table for the @@ -4710,7 +4966,7 @@ Inputs: Response: ```json { - "Type": "string value", + "Type": "bls", "PrivateKey": "Ynl0ZSBhcnJheQ==" } ``` @@ -4740,7 +4996,7 @@ Inputs: ```json [ { - "Type": "string value", + "Type": "bls", "PrivateKey": "Ynl0ZSBhcnJheQ==" } ] @@ -4760,6 +5016,8 @@ Response: `null` ### WalletNew WalletNew creates a new address in the wallet with the given sigType. +Available key types: bls, secp256k1, secp256k1-ledger +Support for numerical types: 1 - secp256k1, 2 - BLS is deprecated Perms: write @@ -4767,7 +5025,7 @@ Perms: write Inputs: ```json [ - 2 + "bls" ] ``` @@ -4830,7 +5088,10 @@ Inputs: "GasFeeCap": "0", "GasPremium": "0", "Method": 1, - "Params": "Ynl0ZSBhcnJheQ==" + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } } ] ``` @@ -4848,11 +5109,17 @@ Response: "GasFeeCap": "0", "GasPremium": "0", "Method": 1, - "Params": "Ynl0ZSBhcnJheQ==" + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } }, "Signature": { "Type": 2, "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" } } ``` diff --git a/extern/filecoin-ffi b/extern/filecoin-ffi index f640612a1..0226d0be6 160000 --- a/extern/filecoin-ffi +++ b/extern/filecoin-ffi @@ -1 +1 @@ -Subproject commit f640612a1a1f7a2dd8b3a49e1531db0aa0f63447 +Subproject commit 0226d0be6f0ec441e687512cd833040414437351 diff --git a/extern/oni b/extern/oni index dbee44e4f..10ed9ef57 160000 --- a/extern/oni +++ b/extern/oni @@ -1 +1 @@ -Subproject commit dbee44e4f940a502971f17116ccbba61ceaf2674 +Subproject commit 10ed9ef576836186de3b8513c03cdc3fb18c44ed diff --git a/extern/sector-storage/cbor_gen.go b/extern/sector-storage/cbor_gen.go new file mode 100644 index 000000000..0db97f2c9 --- /dev/null +++ b/extern/sector-storage/cbor_gen.go @@ -0,0 +1,492 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package sectorstorage + +import ( + "fmt" + "io" + + sealtasks "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf + +func (t *Call) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{164}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.ID (storiface.CallID) (struct) + if len("ID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ID")); err != nil { + return err + } + + if err := t.ID.MarshalCBOR(w); err != nil { + return err + } + + // t.RetType (sectorstorage.ReturnType) (string) + if len("RetType") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"RetType\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("RetType"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("RetType")); err != nil { + return err + } + + if len(t.RetType) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.RetType was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.RetType))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.RetType)); err != nil { + return err + } + + // t.State (sectorstorage.CallState) (uint64) + if len("State") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"State\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("State"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("State")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.State)); err != nil { + return err + } + + // t.Result (sectorstorage.ManyBytes) (struct) + if len("Result") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Result\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Result"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Result")); err != nil { + return err + } + + if err := t.Result.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *Call) UnmarshalCBOR(r io.Reader) error { + *t = Call{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("Call: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.ID (storiface.CallID) (struct) + case "ID": + + { + + if err := t.ID.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ID: %w", err) + } + + } + // t.RetType (sectorstorage.ReturnType) (string) + case "RetType": + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.RetType = ReturnType(sval) + } + // t.State (sectorstorage.CallState) (uint64) + case "State": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.State = CallState(extra) + + } + // t.Result (sectorstorage.ManyBytes) (struct) + case "Result": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.Result = new(ManyBytes) + if err := t.Result.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Result pointer: %w", err) + } + } + + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } + } + + return nil +} +func (t *WorkState) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{164}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.ID (sectorstorage.WorkID) (struct) + if len("ID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ID")); err != nil { + return err + } + + if err := t.ID.MarshalCBOR(w); err != nil { + return err + } + + // t.Status (sectorstorage.WorkStatus) (string) + if len("Status") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Status\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Status"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Status")); err != nil { + return err + } + + if len(t.Status) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Status was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Status))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Status)); err != nil { + return err + } + + // t.WorkerCall (storiface.CallID) (struct) + if len("WorkerCall") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"WorkerCall\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("WorkerCall"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("WorkerCall")); err != nil { + return err + } + + if err := t.WorkerCall.MarshalCBOR(w); err != nil { + return err + } + + // t.WorkError (string) (string) + if len("WorkError") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"WorkError\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("WorkError"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("WorkError")); err != nil { + return err + } + + if len(t.WorkError) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.WorkError was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.WorkError))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.WorkError)); err != nil { + return err + } + return nil +} + +func (t *WorkState) UnmarshalCBOR(r io.Reader) error { + *t = WorkState{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("WorkState: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.ID (sectorstorage.WorkID) (struct) + case "ID": + + { + + if err := t.ID.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ID: %w", err) + } + + } + // t.Status (sectorstorage.WorkStatus) (string) + case "Status": + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.Status = WorkStatus(sval) + } + // t.WorkerCall (storiface.CallID) (struct) + case "WorkerCall": + + { + + if err := t.WorkerCall.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.WorkerCall: %w", err) + } + + } + // t.WorkError (string) (string) + case "WorkError": + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.WorkError = string(sval) + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } + } + + return nil +} +func (t *WorkID) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{162}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Method (sealtasks.TaskType) (string) + if len("Method") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Method\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Method"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Method")); err != nil { + return err + } + + if len(t.Method) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Method was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Method))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Method)); err != nil { + return err + } + + // t.Params (string) (string) + if len("Params") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Params\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Params"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Params")); err != nil { + return err + } + + if len(t.Params) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Params was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Params))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Params)); err != nil { + return err + } + return nil +} + +func (t *WorkID) UnmarshalCBOR(r io.Reader) error { + *t = WorkID{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("WorkID: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Method (sealtasks.TaskType) (string) + case "Method": + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.Method = sealtasks.TaskType(sval) + } + // t.Params (string) (string) + case "Params": + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.Params = string(sval) + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } + } + + return nil +} diff --git a/extern/sector-storage/faults.go b/extern/sector-storage/faults.go index 31a1a3690..c4e1364ad 100644 --- a/extern/sector-storage/faults.go +++ b/extern/sector-storage/faults.go @@ -9,19 +9,20 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/extern/sector-storage/stores" + + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) // FaultTracker TODO: Track things more actively type FaultTracker interface { - CheckProvable(ctx context.Context, spt abi.RegisteredSealProof, sectors []abi.SectorID) ([]abi.SectorID, error) + CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []abi.SectorID) ([]abi.SectorID, error) } // CheckProvable returns unprovable sectors -func (m *Manager) CheckProvable(ctx context.Context, spt abi.RegisteredSealProof, sectors []abi.SectorID) ([]abi.SectorID, error) { +func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []abi.SectorID) ([]abi.SectorID, error) { var bad []abi.SectorID - ssize, err := spt.SectorSize() + ssize, err := pp.SectorSize() if err != nil { return nil, err } @@ -32,7 +33,7 @@ func (m *Manager) CheckProvable(ctx context.Context, spt abi.RegisteredSealProof ctx, cancel := context.WithCancel(ctx) defer cancel() - locked, err := m.index.StorageTryLock(ctx, sector, stores.FTSealed|stores.FTCache, stores.FTNone) + locked, err := m.index.StorageTryLock(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTNone) if err != nil { return xerrors.Errorf("acquiring sector lock: %w", err) } @@ -43,7 +44,7 @@ func (m *Manager) CheckProvable(ctx context.Context, spt abi.RegisteredSealProof return nil } - lp, _, err := m.localStore.AcquireSector(ctx, sector, spt, stores.FTSealed|stores.FTCache, stores.FTNone, stores.PathStorage, stores.AcquireMove) + lp, _, err := m.localStore.AcquireSector(ctx, sector, ssize, storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) if err != nil { log.Warnw("CheckProvable Sector FAULT: acquire sector in checkProvable", "sector", sector, "error", err) bad = append(bad, sector) diff --git a/extern/sector-storage/ffiwrapper/basicfs/fs.go b/extern/sector-storage/ffiwrapper/basicfs/fs.go index 00aa49b98..7ae303d9c 100644 --- a/extern/sector-storage/ffiwrapper/basicfs/fs.go +++ b/extern/sector-storage/ffiwrapper/basicfs/fs.go @@ -8,13 +8,12 @@ import ( "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/extern/sector-storage/stores" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) type sectorFile struct { abi.SectorID - stores.SectorFileType + storiface.SectorFileType } type Provider struct { @@ -24,24 +23,24 @@ type Provider struct { waitSector map[sectorFile]chan struct{} } -func (b *Provider) AcquireSector(ctx context.Context, id abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, ptype stores.PathType) (stores.SectorPaths, func(), error) { - if err := os.Mkdir(filepath.Join(b.Root, stores.FTUnsealed.String()), 0755); err != nil && !os.IsExist(err) { // nolint - return stores.SectorPaths{}, nil, err +func (b *Provider) AcquireSector(ctx context.Context, id abi.SectorID, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) { + if err := os.Mkdir(filepath.Join(b.Root, storiface.FTUnsealed.String()), 0755); err != nil && !os.IsExist(err) { // nolint + return storiface.SectorPaths{}, nil, err } - if err := os.Mkdir(filepath.Join(b.Root, stores.FTSealed.String()), 0755); err != nil && !os.IsExist(err) { // nolint - return stores.SectorPaths{}, nil, err + if err := os.Mkdir(filepath.Join(b.Root, storiface.FTSealed.String()), 0755); err != nil && !os.IsExist(err) { // nolint + return storiface.SectorPaths{}, nil, err } - if err := os.Mkdir(filepath.Join(b.Root, stores.FTCache.String()), 0755); err != nil && !os.IsExist(err) { // nolint - return stores.SectorPaths{}, nil, err + if err := os.Mkdir(filepath.Join(b.Root, storiface.FTCache.String()), 0755); err != nil && !os.IsExist(err) { // nolint + return storiface.SectorPaths{}, nil, err } done := func() {} - out := stores.SectorPaths{ + out := storiface.SectorPaths{ ID: id, } - for _, fileType := range stores.PathTypes { + for _, fileType := range storiface.PathTypes { if !existing.Has(fileType) && !allocate.Has(fileType) { continue } @@ -61,10 +60,10 @@ func (b *Provider) AcquireSector(ctx context.Context, id abi.SectorID, existing case ch <- struct{}{}: case <-ctx.Done(): done() - return stores.SectorPaths{}, nil, ctx.Err() + return storiface.SectorPaths{}, nil, ctx.Err() } - path := filepath.Join(b.Root, fileType.String(), stores.SectorName(id)) + path := filepath.Join(b.Root, fileType.String(), storiface.SectorName(id)) prevDone := done done = func() { @@ -75,11 +74,11 @@ func (b *Provider) AcquireSector(ctx context.Context, id abi.SectorID, existing if !allocate.Has(fileType) { if _, err := os.Stat(path); os.IsNotExist(err) { done() - return stores.SectorPaths{}, nil, storiface.ErrSectorNotFound + return storiface.SectorPaths{}, nil, storiface.ErrSectorNotFound } } - stores.SetPathByType(&out, fileType, path) + storiface.SetPathByType(&out, fileType, path) } return out, done, nil diff --git a/extern/sector-storage/ffiwrapper/sealer_cgo.go b/extern/sector-storage/ffiwrapper/sealer_cgo.go index bae6cafb4..b48b0bfd5 100644 --- a/extern/sector-storage/ffiwrapper/sealer_cgo.go +++ b/extern/sector-storage/ffiwrapper/sealer_cgo.go @@ -21,7 +21,6 @@ import ( "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/extern/sector-storage/fr32" - "github.com/filecoin-project/lotus/extern/sector-storage/stores" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" "github.com/filecoin-project/lotus/extern/sector-storage/zerocomm" ) @@ -80,9 +79,9 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie } }() - var stagedPath stores.SectorPaths + var stagedPath storiface.SectorPaths if len(existingPieceSizes) == 0 { - stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, 0, stores.FTUnsealed, stores.PathSealing) + stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, 0, storiface.FTUnsealed, storiface.PathSealing) if err != nil { return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err) } @@ -92,7 +91,7 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie return abi.PieceInfo{}, xerrors.Errorf("creating unsealed sector file: %w", err) } } else { - stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, 0, stores.PathSealing) + stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, 0, storiface.PathSealing) if err != nil { return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err) } @@ -199,12 +198,12 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s maxPieceSize := abi.PaddedPieceSize(sb.ssize) // try finding existing - unsealedPath, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTNone, stores.PathStorage) + unsealedPath, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, storiface.FTNone, storiface.PathStorage) var pf *partialFile switch { case xerrors.Is(err, storiface.ErrSectorNotFound): - unsealedPath, done, err = sb.sectors.AcquireSector(ctx, sector, stores.FTNone, stores.FTUnsealed, stores.PathStorage) + unsealedPath, done, err = sb.sectors.AcquireSector(ctx, sector, storiface.FTNone, storiface.FTUnsealed, storiface.PathStorage) if err != nil { return xerrors.Errorf("acquire unsealed sector path (allocate): %w", err) } @@ -241,7 +240,7 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s return nil } - srcPaths, srcDone, err := sb.sectors.AcquireSector(ctx, sector, stores.FTCache|stores.FTSealed, stores.FTNone, stores.PathStorage) + srcPaths, srcDone, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTCache|storiface.FTSealed, storiface.FTNone, storiface.PathStorage) if err != nil { return xerrors.Errorf("acquire sealed sector paths: %w", err) } @@ -358,7 +357,7 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s } func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { - path, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTNone, stores.PathStorage) + path, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, storiface.FTNone, storiface.PathStorage) if err != nil { return false, xerrors.Errorf("acquire unsealed sector path: %w", err) } @@ -410,7 +409,7 @@ func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector abi.Se } func (sb *Sealer) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) { - paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTSealed|stores.FTCache, stores.PathSealing) + paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, storiface.FTSealed|storiface.FTCache, storiface.PathSealing) if err != nil { return nil, xerrors.Errorf("acquiring sector paths: %w", err) } @@ -467,7 +466,7 @@ func (sb *Sealer) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke } func (sb *Sealer) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.PreCommit1Out) (storage.SectorCids, error) { - paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTSealed|stores.FTCache, 0, stores.PathSealing) + paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, 0, storiface.PathSealing) if err != nil { return storage.SectorCids{}, xerrors.Errorf("acquiring sector paths: %w", err) } @@ -485,7 +484,7 @@ func (sb *Sealer) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase } func (sb *Sealer) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) { - paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTSealed|stores.FTCache, 0, stores.PathSealing) + paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, 0, storiface.PathSealing) if err != nil { return nil, xerrors.Errorf("acquire sector paths: %w", err) } @@ -535,7 +534,7 @@ func (sb *Sealer) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU } } - paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, 0, stores.PathStorage) + paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, 0, storiface.PathStorage) if err != nil { return xerrors.Errorf("acquiring sector cache path: %w", err) } @@ -575,7 +574,7 @@ func (sb *Sealer) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU } - paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTCache, 0, stores.PathStorage) + paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTCache, 0, storiface.PathStorage) if err != nil { return xerrors.Errorf("acquiring sector cache path: %w", err) } diff --git a/extern/sector-storage/ffiwrapper/sealer_test.go b/extern/sector-storage/ffiwrapper/sealer_test.go index bb26adb77..5ae5cec67 100644 --- a/extern/sector-storage/ffiwrapper/sealer_test.go +++ b/extern/sector-storage/ffiwrapper/sealer_test.go @@ -15,7 +15,7 @@ import ( "testing" "time" - saproof "github.com/filecoin-project/specs-actors/actors/runtime/proof" + proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" "github.com/ipfs/go-cid" @@ -30,7 +30,7 @@ import ( ffi "github.com/filecoin-project/filecoin-ffi" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper/basicfs" - "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) func init() { @@ -93,7 +93,7 @@ func (s *seal) commit(t *testing.T, sb *Sealer, done func()) { t.Fatalf("%+v", err) } - ok, err := ProofVerifier.VerifySeal(saproof.SealVerifyInfo{ + ok, err := ProofVerifier.VerifySeal(proof2.SealVerifyInfo{ SectorID: s.id, SealedCID: s.cids.Sealed, SealProof: sealProofType, @@ -125,7 +125,7 @@ func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si abi.Sec t.Fatal("read wrong bytes") } - p, sd, err := sp.AcquireSector(context.TODO(), si, stores.FTUnsealed, stores.FTNone, stores.PathStorage) + p, sd, err := sp.AcquireSector(context.TODO(), si, storiface.FTUnsealed, storiface.FTNone, storiface.PathStorage) if err != nil { t.Fatal(err) } @@ -171,9 +171,9 @@ func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si abi.Sec func post(t *testing.T, sealer *Sealer, skipped []abi.SectorID, seals ...seal) { randomness := abi.PoStRandomness{0, 9, 2, 7, 6, 5, 4, 3, 2, 1, 0, 9, 8, 7, 6, 45, 3, 2, 1, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9, 7} - sis := make([]saproof.SectorInfo, len(seals)) + sis := make([]proof2.SectorInfo, len(seals)) for i, s := range seals { - sis[i] = saproof.SectorInfo{ + sis[i] = proof2.SectorInfo{ SealProof: sealProofType, SectorNumber: s.id.Number, SealedCID: s.cids.Sealed, @@ -191,7 +191,7 @@ func post(t *testing.T, sealer *Sealer, skipped []abi.SectorID, seals ...seal) { t.Fatalf("%+v", err) } - ok, err := ProofVerifier.VerifyWindowPoSt(context.TODO(), saproof.WindowPoStVerifyInfo{ + ok, err := ProofVerifier.VerifyWindowPoSt(context.TODO(), proof2.WindowPoStVerifyInfo{ Randomness: randomness, Proofs: proofs, ChallengedSectors: sis, @@ -206,7 +206,7 @@ func post(t *testing.T, sealer *Sealer, skipped []abi.SectorID, seals ...seal) { } func corrupt(t *testing.T, sealer *Sealer, id abi.SectorID) { - paths, done, err := sealer.sectors.AcquireSector(context.Background(), id, stores.FTSealed, 0, stores.PathStorage) + paths, done, err := sealer.sectors.AcquireSector(context.Background(), id, storiface.FTSealed, 0, storiface.PathStorage) require.NoError(t, err) defer done() diff --git a/extern/sector-storage/ffiwrapper/types.go b/extern/sector-storage/ffiwrapper/types.go index 318dbd2b0..b67f9c595 100644 --- a/extern/sector-storage/ffiwrapper/types.go +++ b/extern/sector-storage/ffiwrapper/types.go @@ -4,7 +4,7 @@ import ( "context" "io" - "github.com/filecoin-project/specs-actors/actors/runtime/proof" + proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" "github.com/ipfs/go-cid" @@ -12,13 +12,12 @@ import ( "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper/basicfs" - "github.com/filecoin-project/lotus/extern/sector-storage/stores" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) type Validator interface { - CanCommit(sector stores.SectorPaths) (bool, error) - CanProve(sector stores.SectorPaths) (bool, error) + CanCommit(sector storiface.SectorPaths) (bool, error) + CanProve(sector storiface.SectorPaths) (bool, error) } type StorageSealer interface { @@ -35,9 +34,9 @@ type Storage interface { } type Verifier interface { - VerifySeal(proof.SealVerifyInfo) (bool, error) - VerifyWinningPoSt(ctx context.Context, info proof.WinningPoStVerifyInfo) (bool, error) - VerifyWindowPoSt(ctx context.Context, info proof.WindowPoStVerifyInfo) (bool, error) + VerifySeal(proof2.SealVerifyInfo) (bool, error) + VerifyWinningPoSt(ctx context.Context, info proof2.WinningPoStVerifyInfo) (bool, error) + VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStVerifyInfo) (bool, error) GenerateWinningPoStSectorChallenge(context.Context, abi.RegisteredPoStProof, abi.ActorID, abi.PoStRandomness, uint64) ([]uint64, error) } @@ -45,7 +44,7 @@ type Verifier interface { type SectorProvider interface { // * returns storiface.ErrSectorNotFound if a requested existing sector doesn't exist // * returns an error when allocate is set, and existing isn't, and the sector exists - AcquireSector(ctx context.Context, id abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, ptype stores.PathType) (stores.SectorPaths, func(), error) + AcquireSector(ctx context.Context, id abi.SectorID, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) } var _ SectorProvider = &basicfs.Provider{} diff --git a/extern/sector-storage/ffiwrapper/verifier_cgo.go b/extern/sector-storage/ffiwrapper/verifier_cgo.go index d6c0ae35f..9dab7103e 100644 --- a/extern/sector-storage/ffiwrapper/verifier_cgo.go +++ b/extern/sector-storage/ffiwrapper/verifier_cgo.go @@ -5,20 +5,17 @@ package ffiwrapper import ( "context" - "github.com/filecoin-project/specs-actors/actors/runtime/proof" - + "go.opencensus.io/trace" "golang.org/x/xerrors" - "github.com/filecoin-project/go-state-types/abi" - ffi "github.com/filecoin-project/filecoin-ffi" + "github.com/filecoin-project/go-state-types/abi" + proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" - "github.com/filecoin-project/lotus/extern/sector-storage/stores" - - "go.opencensus.io/trace" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) -func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.SectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, error) { +func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof2.SectorInfo, randomness abi.PoStRandomness) ([]proof2.PoStProof, error) { randomness[31] &= 0x3f privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWinningPoStProof) // TODO: FAULTS? if err != nil { @@ -32,7 +29,7 @@ func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, return ffi.GenerateWinningPoSt(minerID, privsectors, randomness) } -func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.SectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, []abi.SectorID, error) { +func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof2.SectorInfo, randomness abi.PoStRandomness) ([]proof2.PoStProof, []abi.SectorID, error) { randomness[31] &= 0x3f privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWindowPoStProof) if err != nil { @@ -57,7 +54,7 @@ func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, s return proof, faultyIDs, err } -func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorInfo []proof.SectorInfo, faults []abi.SectorNumber, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error)) (ffi.SortedPrivateSectorInfo, []abi.SectorID, func(), error) { +func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorInfo []proof2.SectorInfo, faults []abi.SectorNumber, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error)) (ffi.SortedPrivateSectorInfo, []abi.SectorID, func(), error) { fmap := map[abi.SectorNumber]struct{}{} for _, fault := range faults { fmap[fault] = struct{}{} @@ -79,7 +76,7 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn sid := abi.SectorID{Miner: mid, Number: s.SectorNumber} - paths, d, err := sb.sectors.AcquireSector(ctx, sid, stores.FTCache|stores.FTSealed, 0, stores.PathStorage) + paths, d, err := sb.sectors.AcquireSector(ctx, sid, storiface.FTCache|storiface.FTSealed, 0, storiface.PathStorage) if err != nil { log.Warnw("failed to acquire sector, skipping", "sector", sid, "error", err) skipped = append(skipped, sid) @@ -110,11 +107,11 @@ type proofVerifier struct{} var ProofVerifier = proofVerifier{} -func (proofVerifier) VerifySeal(info proof.SealVerifyInfo) (bool, error) { +func (proofVerifier) VerifySeal(info proof2.SealVerifyInfo) (bool, error) { return ffi.VerifySeal(info) } -func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info proof.WinningPoStVerifyInfo) (bool, error) { +func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info proof2.WinningPoStVerifyInfo) (bool, error) { info.Randomness[31] &= 0x3f _, span := trace.StartSpan(ctx, "VerifyWinningPoSt") defer span.End() @@ -122,7 +119,7 @@ func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info proof.WinningPo return ffi.VerifyWinningPoSt(info) } -func (proofVerifier) VerifyWindowPoSt(ctx context.Context, info proof.WindowPoStVerifyInfo) (bool, error) { +func (proofVerifier) VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStVerifyInfo) (bool, error) { info.Randomness[31] &= 0x3f _, span := trace.StartSpan(ctx, "VerifyWindowPoSt") defer span.End() diff --git a/extern/sector-storage/localworker.go b/extern/sector-storage/localworker.go deleted file mode 100644 index b1193a2e2..000000000 --- a/extern/sector-storage/localworker.go +++ /dev/null @@ -1,306 +0,0 @@ -package sectorstorage - -import ( - "context" - "io" - "os" - "runtime" - - "github.com/elastic/go-sysinfo" - "github.com/hashicorp/go-multierror" - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" - - ffi "github.com/filecoin-project/filecoin-ffi" - "github.com/filecoin-project/go-state-types/abi" - storage2 "github.com/filecoin-project/specs-storage/storage" - - "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" - "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" - "github.com/filecoin-project/lotus/extern/sector-storage/stores" - "github.com/filecoin-project/lotus/extern/sector-storage/storiface" -) - -var pathTypes = []stores.SectorFileType{stores.FTUnsealed, stores.FTSealed, stores.FTCache} - -type WorkerConfig struct { - SealProof abi.RegisteredSealProof - TaskTypes []sealtasks.TaskType - NoSwap bool -} - -type LocalWorker struct { - scfg *ffiwrapper.Config - storage stores.Store - localStore *stores.Local - sindex stores.SectorIndex - noSwap bool - - acceptTasks map[sealtasks.TaskType]struct{} -} - -func NewLocalWorker(wcfg WorkerConfig, store stores.Store, local *stores.Local, sindex stores.SectorIndex) *LocalWorker { - acceptTasks := map[sealtasks.TaskType]struct{}{} - for _, taskType := range wcfg.TaskTypes { - acceptTasks[taskType] = struct{}{} - } - - return &LocalWorker{ - scfg: &ffiwrapper.Config{ - SealProofType: wcfg.SealProof, - }, - storage: store, - localStore: local, - sindex: sindex, - noSwap: wcfg.NoSwap, - - acceptTasks: acceptTasks, - } -} - -type localWorkerPathProvider struct { - w *LocalWorker - op stores.AcquireMode -} - -func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, sealing stores.PathType) (stores.SectorPaths, func(), error) { - - paths, storageIDs, err := l.w.storage.AcquireSector(ctx, sector, l.w.scfg.SealProofType, existing, allocate, sealing, l.op) - if err != nil { - return stores.SectorPaths{}, nil, err - } - - releaseStorage, err := l.w.localStore.Reserve(ctx, sector, l.w.scfg.SealProofType, allocate, storageIDs, stores.FSOverheadSeal) - if err != nil { - return stores.SectorPaths{}, nil, xerrors.Errorf("reserving storage space: %w", err) - } - - log.Debugf("acquired sector %d (e:%d; a:%d): %v", sector, existing, allocate, paths) - - return paths, func() { - releaseStorage() - - for _, fileType := range pathTypes { - if fileType&allocate == 0 { - continue - } - - sid := stores.PathByType(storageIDs, fileType) - - if err := l.w.sindex.StorageDeclareSector(ctx, stores.ID(sid), sector, fileType, l.op == stores.AcquireMove); err != nil { - log.Errorf("declare sector error: %+v", err) - } - } - }, nil -} - -func (l *LocalWorker) sb() (ffiwrapper.Storage, error) { - return ffiwrapper.New(&localWorkerPathProvider{w: l}, l.scfg) -} - -func (l *LocalWorker) NewSector(ctx context.Context, sector abi.SectorID) error { - sb, err := l.sb() - if err != nil { - return err - } - - return sb.NewSector(ctx, sector) -} - -func (l *LocalWorker) AddPiece(ctx context.Context, sector abi.SectorID, epcs []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) { - sb, err := l.sb() - if err != nil { - return abi.PieceInfo{}, err - } - - return sb.AddPiece(ctx, sector, epcs, sz, r) -} - -func (l *LocalWorker) Fetch(ctx context.Context, sector abi.SectorID, fileType stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) error { - _, done, err := (&localWorkerPathProvider{w: l, op: am}).AcquireSector(ctx, sector, fileType, stores.FTNone, ptype) - if err != nil { - return err - } - done() - return nil -} - -func (l *LocalWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage2.PreCommit1Out, err error) { - { - // cleanup previous failed attempts if they exist - if err := l.storage.Remove(ctx, sector, stores.FTSealed, true); err != nil { - return nil, xerrors.Errorf("cleaning up sealed data: %w", err) - } - - if err := l.storage.Remove(ctx, sector, stores.FTCache, true); err != nil { - return nil, xerrors.Errorf("cleaning up cache data: %w", err) - } - } - - sb, err := l.sb() - if err != nil { - return nil, err - } - - return sb.SealPreCommit1(ctx, sector, ticket, pieces) -} - -func (l *LocalWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage2.PreCommit1Out) (cids storage2.SectorCids, err error) { - sb, err := l.sb() - if err != nil { - return storage2.SectorCids{}, err - } - - return sb.SealPreCommit2(ctx, sector, phase1Out) -} - -func (l *LocalWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage2.SectorCids) (output storage2.Commit1Out, err error) { - sb, err := l.sb() - if err != nil { - return nil, err - } - - return sb.SealCommit1(ctx, sector, ticket, seed, pieces, cids) -} - -func (l *LocalWorker) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage2.Commit1Out) (proof storage2.Proof, err error) { - sb, err := l.sb() - if err != nil { - return nil, err - } - - return sb.SealCommit2(ctx, sector, phase1Out) -} - -func (l *LocalWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage2.Range) error { - sb, err := l.sb() - if err != nil { - return err - } - - if err := sb.FinalizeSector(ctx, sector, keepUnsealed); err != nil { - return xerrors.Errorf("finalizing sector: %w", err) - } - - if len(keepUnsealed) == 0 { - if err := l.storage.Remove(ctx, sector, stores.FTUnsealed, true); err != nil { - return xerrors.Errorf("removing unsealed data: %w", err) - } - } - - return nil -} - -func (l *LocalWorker) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage2.Range) error { - return xerrors.Errorf("implement me") -} - -func (l *LocalWorker) Remove(ctx context.Context, sector abi.SectorID) error { - var err error - - if rerr := l.storage.Remove(ctx, sector, stores.FTSealed, true); rerr != nil { - err = multierror.Append(err, xerrors.Errorf("removing sector (sealed): %w", rerr)) - } - if rerr := l.storage.Remove(ctx, sector, stores.FTCache, true); rerr != nil { - err = multierror.Append(err, xerrors.Errorf("removing sector (cache): %w", rerr)) - } - if rerr := l.storage.Remove(ctx, sector, stores.FTUnsealed, true); rerr != nil { - err = multierror.Append(err, xerrors.Errorf("removing sector (unsealed): %w", rerr)) - } - - return err -} - -func (l *LocalWorker) MoveStorage(ctx context.Context, sector abi.SectorID, types stores.SectorFileType) error { - if err := l.storage.MoveStorage(ctx, sector, l.scfg.SealProofType, types); err != nil { - return xerrors.Errorf("moving sealed data to storage: %w", err) - } - - return nil -} - -func (l *LocalWorker) UnsealPiece(ctx context.Context, sector abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) error { - sb, err := l.sb() - if err != nil { - return err - } - - if err := sb.UnsealPiece(ctx, sector, index, size, randomness, cid); err != nil { - return xerrors.Errorf("unsealing sector: %w", err) - } - - if err := l.storage.RemoveCopies(ctx, sector, stores.FTSealed); err != nil { - return xerrors.Errorf("removing source data: %w", err) - } - - if err := l.storage.RemoveCopies(ctx, sector, stores.FTCache); err != nil { - return xerrors.Errorf("removing source data: %w", err) - } - - return nil -} - -func (l *LocalWorker) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { - sb, err := l.sb() - if err != nil { - return false, err - } - - return sb.ReadPiece(ctx, writer, sector, index, size) -} - -func (l *LocalWorker) TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) { - return l.acceptTasks, nil -} - -func (l *LocalWorker) Paths(ctx context.Context) ([]stores.StoragePath, error) { - return l.localStore.Local(ctx) -} - -func (l *LocalWorker) Info(context.Context) (storiface.WorkerInfo, error) { - hostname, err := os.Hostname() // TODO: allow overriding from config - if err != nil { - panic(err) - } - - gpus, err := ffi.GetGPUDevices() - if err != nil { - log.Errorf("getting gpu devices failed: %+v", err) - } - - h, err := sysinfo.Host() - if err != nil { - return storiface.WorkerInfo{}, xerrors.Errorf("getting host info: %w", err) - } - - mem, err := h.Memory() - if err != nil { - return storiface.WorkerInfo{}, xerrors.Errorf("getting memory info: %w", err) - } - - memSwap := mem.VirtualTotal - if l.noSwap { - memSwap = 0 - } - - return storiface.WorkerInfo{ - Hostname: hostname, - Resources: storiface.WorkerResources{ - MemPhysical: mem.Total, - MemSwap: memSwap, - MemReserved: mem.VirtualUsed + mem.Total - mem.Available, // TODO: sub this process - CPUs: uint64(runtime.NumCPU()), - GPUs: gpus, - }, - }, nil -} - -func (l *LocalWorker) Closing(ctx context.Context) (<-chan struct{}, error) { - return make(chan struct{}), nil -} - -func (l *LocalWorker) Close() error { - return nil -} - -var _ Worker = &LocalWorker{} diff --git a/extern/sector-storage/manager.go b/extern/sector-storage/manager.go index 73a5eb51e..9a41dcd44 100644 --- a/extern/sector-storage/manager.go +++ b/extern/sector-storage/manager.go @@ -5,7 +5,9 @@ import ( "errors" "io" "net/http" + "sync" + "github.com/google/uuid" "github.com/hashicorp/go-multierror" "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log/v2" @@ -13,6 +15,7 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-statestore" "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" @@ -29,13 +32,7 @@ var ErrNoWorkers = errors.New("no suitable workers found") type URLs []string type Worker interface { - ffiwrapper.StorageSealer - - MoveStorage(ctx context.Context, sector abi.SectorID, types stores.SectorFileType) error - - Fetch(ctx context.Context, s abi.SectorID, ft stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) error - UnsealPiece(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error - ReadPiece(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (bool, error) + storiface.WorkerCalls TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) @@ -44,10 +41,9 @@ type Worker interface { Info(context.Context) (storiface.WorkerInfo, error) - // returns channel signalling worker shutdown - Closing(context.Context) (<-chan struct{}, error) + Session(context.Context) (uuid.UUID, error) - Close() error + Close() error // TODO: do we need this? } type SectorManager interface { @@ -57,10 +53,12 @@ type SectorManager interface { ffiwrapper.StorageSealer storage.Prover + storiface.WorkerReturn FaultTracker } -type WorkerID uint64 +type WorkerID uuid.UUID // worker session UUID +var ClosedWorkerID = uuid.UUID{} type Manager struct { scfg *ffiwrapper.Config @@ -74,6 +72,21 @@ type Manager struct { sched *scheduler storage.Prover + + workLk sync.Mutex + work *statestore.StateStore + + callToWork map[storiface.CallID]WorkID + // used when we get an early return and there's no callToWork mapping + callRes map[storiface.CallID]chan result + + results map[WorkID]result + waitRes map[WorkID]chan struct{} +} + +type result struct { + r interface{} + err error } type SealerConfig struct { @@ -89,13 +102,16 @@ type SealerConfig struct { type StorageAuth http.Header -func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg *ffiwrapper.Config, sc SealerConfig, urls URLs, sa StorageAuth) (*Manager, error) { +type WorkerStateStore *statestore.StateStore +type ManagerStateStore *statestore.StateStore + +func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg *ffiwrapper.Config, sc SealerConfig, urls URLs, sa StorageAuth, wss WorkerStateStore, mss ManagerStateStore) (*Manager, error) { lstor, err := stores.NewLocal(ctx, ls, si, urls) if err != nil { return nil, err } - prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor, index: si}, cfg) + prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor, index: si, spt: cfg.SealProofType}, cfg) if err != nil { return nil, xerrors.Errorf("creating prover instance: %w", err) } @@ -114,8 +130,16 @@ func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg sched: newScheduler(cfg.SealProofType), Prover: prover, + + work: mss, + callToWork: map[storiface.CallID]WorkID{}, + callRes: map[storiface.CallID]chan result{}, + results: map[WorkID]result{}, + waitRes: map[WorkID]chan struct{}{}, } + m.setupWorkTracker() + go m.sched.runSched() localTasks := []sealtasks.TaskType{ @@ -140,7 +164,7 @@ func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg err = m.AddWorker(ctx, NewLocalWorker(WorkerConfig{ SealProof: cfg.SealProofType, TaskTypes: localTasks, - }, stor, lstor, si)) + }, stor, lstor, si, m, wss)) if err != nil { return nil, xerrors.Errorf("adding local worker: %w", err) } @@ -167,21 +191,7 @@ func (m *Manager) AddLocalStorage(ctx context.Context, path string) error { } func (m *Manager) AddWorker(ctx context.Context, w Worker) error { - info, err := w.Info(ctx) - if err != nil { - return xerrors.Errorf("getting worker info: %w", err) - } - - m.sched.newWorkers <- &workerHandle{ - w: w, - wt: &workTracker{ - running: map[uint64]storiface.WorkerJob{}, - }, - info: info, - preparing: &activeResources{}, - active: &activeResources{}, - } - return nil + return m.sched.runWorker(ctx, w) } func (m *Manager) ServeHTTP(w http.ResponseWriter, r *http.Request) { @@ -197,9 +207,21 @@ func schedNop(context.Context, Worker) error { return nil } -func schedFetch(sector abi.SectorID, ft stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) func(context.Context, Worker) error { +func (m *Manager) schedFetch(sector abi.SectorID, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) func(context.Context, Worker) error { return func(ctx context.Context, worker Worker) error { - return worker.Fetch(ctx, sector, ft, ptype, am) + _, err := m.waitSimpleCall(ctx)(worker.Fetch(ctx, sector, ft, ptype, am)) + return err + } +} + +func (m *Manager) readPiece(sink io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, rok *bool) func(ctx context.Context, w Worker) error { + return func(ctx context.Context, w Worker) error { + r, err := m.waitSimpleCall(ctx)(w.ReadPiece(ctx, sink, sector, offset, size)) + if err != nil { + return err + } + *rok = r.(bool) + return nil } } @@ -209,13 +231,13 @@ func (m *Manager) tryReadUnsealedPiece(ctx context.Context, sink io.Writer, sect ctx, cancel := context.WithCancel(ctx) defer cancel() - if err := m.index.StorageLock(ctx, sector, stores.FTUnsealed, stores.FTNone); err != nil { + if err := m.index.StorageLock(ctx, sector, storiface.FTUnsealed, storiface.FTNone); err != nil { returnErr = xerrors.Errorf("acquiring read sector lock: %w", err) return } // passing 0 spt because we only need it when allowFetch is true - best, err := m.index.StorageFindSector(ctx, sector, stores.FTUnsealed, 0, false) + best, err := m.index.StorageFindSector(ctx, sector, storiface.FTUnsealed, 0, false) if err != nil { returnErr = xerrors.Errorf("read piece: checking for already existing unsealed sector: %w", err) return @@ -225,17 +247,15 @@ func (m *Manager) tryReadUnsealedPiece(ctx context.Context, sink io.Writer, sect if foundUnsealed { // append to existing // There is unsealed sector, see if we can read from it - selector = newExistingSelector(m.index, sector, stores.FTUnsealed, false) + selector = newExistingSelector(m.index, sector, storiface.FTUnsealed, false) - err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, schedFetch(sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error { - readOk, err = w.ReadPiece(ctx, sink, sector, offset, size) - return err - }) + err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, m.schedFetch(sector, storiface.FTUnsealed, storiface.PathSealing, storiface.AcquireMove), + m.readPiece(sink, sector, offset, size, &readOk)) if err != nil { returnErr = xerrors.Errorf("reading piece from sealed sector: %w", err) } } else { - selector = newAllocSelector(m.index, stores.FTUnsealed, stores.PathSealing) + selector = newAllocSelector(m.index, storiface.FTUnsealed, storiface.PathSealing) } return } @@ -251,17 +271,17 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect ctx, cancel := context.WithCancel(ctx) defer cancel() - if err := m.index.StorageLock(ctx, sector, stores.FTSealed|stores.FTCache, stores.FTUnsealed); err != nil { + if err := m.index.StorageLock(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTUnsealed); err != nil { return xerrors.Errorf("acquiring unseal sector lock: %w", err) } unsealFetch := func(ctx context.Context, worker Worker) error { - if err := worker.Fetch(ctx, sector, stores.FTSealed|stores.FTCache, stores.PathSealing, stores.AcquireCopy); err != nil { + if _, err := m.waitSimpleCall(ctx)(worker.Fetch(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.PathSealing, storiface.AcquireCopy)); err != nil { return xerrors.Errorf("copy sealed/cache sector data: %w", err) } if foundUnsealed { - if err := worker.Fetch(ctx, sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove); err != nil { + if _, err := m.waitSimpleCall(ctx)(worker.Fetch(ctx, sector, storiface.FTUnsealed, storiface.PathSealing, storiface.AcquireMove)); err != nil { return xerrors.Errorf("copy unsealed sector data: %w", err) } } @@ -272,18 +292,18 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect return xerrors.Errorf("cannot unseal piece (sector: %d, offset: %d size: %d) - unsealed cid is undefined", sector, offset, size) } err = m.sched.Schedule(ctx, sector, sealtasks.TTUnseal, selector, unsealFetch, func(ctx context.Context, w Worker) error { - return w.UnsealPiece(ctx, sector, offset, size, ticket, unsealed) + // TODO: make restartable + _, err := m.waitSimpleCall(ctx)(w.UnsealPiece(ctx, sector, offset, size, ticket, unsealed)) + return err }) if err != nil { return err } - selector = newExistingSelector(m.index, sector, stores.FTUnsealed, false) + selector = newExistingSelector(m.index, sector, storiface.FTUnsealed, false) - err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, schedFetch(sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error { - readOk, err = w.ReadPiece(ctx, sink, sector, offset, size) - return err - }) + err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, m.schedFetch(sector, storiface.FTUnsealed, storiface.PathSealing, storiface.AcquireMove), + m.readPiece(sink, sector, offset, size, &readOk)) if err != nil { return xerrors.Errorf("reading piece from sealed sector: %w", err) } @@ -304,25 +324,25 @@ func (m *Manager) AddPiece(ctx context.Context, sector abi.SectorID, existingPie ctx, cancel := context.WithCancel(ctx) defer cancel() - if err := m.index.StorageLock(ctx, sector, stores.FTNone, stores.FTUnsealed); err != nil { + if err := m.index.StorageLock(ctx, sector, storiface.FTNone, storiface.FTUnsealed); err != nil { return abi.PieceInfo{}, xerrors.Errorf("acquiring sector lock: %w", err) } var selector WorkerSelector var err error if len(existingPieces) == 0 { // new - selector = newAllocSelector(m.index, stores.FTUnsealed, stores.PathSealing) + selector = newAllocSelector(m.index, storiface.FTUnsealed, storiface.PathSealing) } else { // use existing - selector = newExistingSelector(m.index, sector, stores.FTUnsealed, false) + selector = newExistingSelector(m.index, sector, storiface.FTUnsealed, false) } var out abi.PieceInfo err = m.sched.Schedule(ctx, sector, sealtasks.TTAddPiece, selector, schedNop, func(ctx context.Context, w Worker) error { - p, err := w.AddPiece(ctx, sector, existingPieces, sz, r) + p, err := m.waitSimpleCall(ctx)(w.AddPiece(ctx, sector, existingPieces, sz, r)) if err != nil { return err } - out = p + out = p.(abi.PieceInfo) return nil }) @@ -333,129 +353,234 @@ func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke ctx, cancel := context.WithCancel(ctx) defer cancel() - if err := m.index.StorageLock(ctx, sector, stores.FTUnsealed, stores.FTSealed|stores.FTCache); err != nil { + wk, wait, cancel, err := m.getWork(ctx, sealtasks.TTPreCommit1, sector, ticket, pieces) + if err != nil { + return nil, xerrors.Errorf("getWork: %w", err) + } + defer cancel() + + var waitErr error + waitRes := func() { + p, werr := m.waitWork(ctx, wk) + if werr != nil { + waitErr = werr + return + } + out = p.(storage.PreCommit1Out) + } + + if wait { // already in progress + waitRes() + return out, waitErr + } + + if err := m.index.StorageLock(ctx, sector, storiface.FTUnsealed, storiface.FTSealed|storiface.FTCache); err != nil { return nil, xerrors.Errorf("acquiring sector lock: %w", err) } // TODO: also consider where the unsealed data sits - selector := newAllocSelector(m.index, stores.FTCache|stores.FTSealed, stores.PathSealing) + selector := newAllocSelector(m.index, storiface.FTCache|storiface.FTSealed, storiface.PathSealing) - err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit1, selector, schedFetch(sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error { - p, err := w.SealPreCommit1(ctx, sector, ticket, pieces) + err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit1, selector, m.schedFetch(sector, storiface.FTUnsealed, storiface.PathSealing, storiface.AcquireMove), func(ctx context.Context, w Worker) error { + err := m.startWork(ctx, wk)(w.SealPreCommit1(ctx, sector, ticket, pieces)) if err != nil { return err } - out = p + + waitRes() return nil }) + if err != nil { + return nil, err + } - return out, err + return out, waitErr } func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.PreCommit1Out) (out storage.SectorCids, err error) { ctx, cancel := context.WithCancel(ctx) defer cancel() - if err := m.index.StorageLock(ctx, sector, stores.FTSealed, stores.FTCache); err != nil { + wk, wait, cancel, err := m.getWork(ctx, sealtasks.TTPreCommit2, sector, phase1Out) + if err != nil { + return storage.SectorCids{}, xerrors.Errorf("getWork: %w", err) + } + defer cancel() + + var waitErr error + waitRes := func() { + p, werr := m.waitWork(ctx, wk) + if werr != nil { + waitErr = werr + return + } + out = p.(storage.SectorCids) + } + + if wait { // already in progress + waitRes() + return out, waitErr + } + + if err := m.index.StorageLock(ctx, sector, storiface.FTSealed, storiface.FTCache); err != nil { return storage.SectorCids{}, xerrors.Errorf("acquiring sector lock: %w", err) } - selector := newExistingSelector(m.index, sector, stores.FTCache|stores.FTSealed, true) + selector := newExistingSelector(m.index, sector, storiface.FTCache|storiface.FTSealed, true) - err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit2, selector, schedFetch(sector, stores.FTCache|stores.FTSealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error { - p, err := w.SealPreCommit2(ctx, sector, phase1Out) + err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit2, selector, m.schedFetch(sector, storiface.FTCache|storiface.FTSealed, storiface.PathSealing, storiface.AcquireMove), func(ctx context.Context, w Worker) error { + err := m.startWork(ctx, wk)(w.SealPreCommit2(ctx, sector, phase1Out)) if err != nil { return err } - out = p + + waitRes() return nil }) - return out, err + if err != nil { + return storage.SectorCids{}, err + } + + return out, waitErr } func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (out storage.Commit1Out, err error) { ctx, cancel := context.WithCancel(ctx) defer cancel() - if err := m.index.StorageLock(ctx, sector, stores.FTSealed, stores.FTCache); err != nil { + wk, wait, cancel, err := m.getWork(ctx, sealtasks.TTCommit1, sector, ticket, seed, pieces, cids) + if err != nil { + return storage.Commit1Out{}, xerrors.Errorf("getWork: %w", err) + } + defer cancel() + + var waitErr error + waitRes := func() { + p, werr := m.waitWork(ctx, wk) + if werr != nil { + waitErr = werr + return + } + out = p.(storage.Commit1Out) + } + + if wait { // already in progress + waitRes() + return out, waitErr + } + + if err := m.index.StorageLock(ctx, sector, storiface.FTSealed, storiface.FTCache); err != nil { return storage.Commit1Out{}, xerrors.Errorf("acquiring sector lock: %w", err) } // NOTE: We set allowFetch to false in so that we always execute on a worker // with direct access to the data. We want to do that because this step is // generally very cheap / fast, and transferring data is not worth the effort - selector := newExistingSelector(m.index, sector, stores.FTCache|stores.FTSealed, false) + selector := newExistingSelector(m.index, sector, storiface.FTCache|storiface.FTSealed, false) - err = m.sched.Schedule(ctx, sector, sealtasks.TTCommit1, selector, schedFetch(sector, stores.FTCache|stores.FTSealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error { - p, err := w.SealCommit1(ctx, sector, ticket, seed, pieces, cids) + err = m.sched.Schedule(ctx, sector, sealtasks.TTCommit1, selector, m.schedFetch(sector, storiface.FTCache|storiface.FTSealed, storiface.PathSealing, storiface.AcquireMove), func(ctx context.Context, w Worker) error { + err := m.startWork(ctx, wk)(w.SealCommit1(ctx, sector, ticket, seed, pieces, cids)) if err != nil { return err } - out = p + + waitRes() return nil }) - return out, err + if err != nil { + return nil, err + } + + return out, waitErr } func (m *Manager) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.Commit1Out) (out storage.Proof, err error) { + wk, wait, cancel, err := m.getWork(ctx, sealtasks.TTCommit2, sector, phase1Out) + if err != nil { + return storage.Proof{}, xerrors.Errorf("getWork: %w", err) + } + defer cancel() + + var waitErr error + waitRes := func() { + p, werr := m.waitWork(ctx, wk) + if werr != nil { + waitErr = werr + return + } + out = p.(storage.Proof) + } + + if wait { // already in progress + waitRes() + return out, waitErr + } + selector := newTaskSelector() err = m.sched.Schedule(ctx, sector, sealtasks.TTCommit2, selector, schedNop, func(ctx context.Context, w Worker) error { - p, err := w.SealCommit2(ctx, sector, phase1Out) + err := m.startWork(ctx, wk)(w.SealCommit2(ctx, sector, phase1Out)) if err != nil { return err } - out = p + + waitRes() return nil }) - return out, err + if err != nil { + return nil, err + } + + return out, waitErr } func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error { ctx, cancel := context.WithCancel(ctx) defer cancel() - if err := m.index.StorageLock(ctx, sector, stores.FTNone, stores.FTSealed|stores.FTUnsealed|stores.FTCache); err != nil { + if err := m.index.StorageLock(ctx, sector, storiface.FTNone, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache); err != nil { return xerrors.Errorf("acquiring sector lock: %w", err) } - unsealed := stores.FTUnsealed + unsealed := storiface.FTUnsealed { - unsealedStores, err := m.index.StorageFindSector(ctx, sector, stores.FTUnsealed, 0, false) + unsealedStores, err := m.index.StorageFindSector(ctx, sector, storiface.FTUnsealed, 0, false) if err != nil { return xerrors.Errorf("finding unsealed sector: %w", err) } if len(unsealedStores) == 0 { // Is some edge-cases unsealed sector may not exist already, that's fine - unsealed = stores.FTNone + unsealed = storiface.FTNone } } - selector := newExistingSelector(m.index, sector, stores.FTCache|stores.FTSealed, false) + selector := newExistingSelector(m.index, sector, storiface.FTCache|storiface.FTSealed, false) err := m.sched.Schedule(ctx, sector, sealtasks.TTFinalize, selector, - schedFetch(sector, stores.FTCache|stores.FTSealed|unsealed, stores.PathSealing, stores.AcquireMove), + m.schedFetch(sector, storiface.FTCache|storiface.FTSealed|unsealed, storiface.PathSealing, storiface.AcquireMove), func(ctx context.Context, w Worker) error { - return w.FinalizeSector(ctx, sector, keepUnsealed) + _, err := m.waitSimpleCall(ctx)(w.FinalizeSector(ctx, sector, keepUnsealed)) + return err }) if err != nil { return err } - fetchSel := newAllocSelector(m.index, stores.FTCache|stores.FTSealed, stores.PathStorage) + fetchSel := newAllocSelector(m.index, storiface.FTCache|storiface.FTSealed, storiface.PathStorage) moveUnsealed := unsealed { if len(keepUnsealed) == 0 { - moveUnsealed = stores.FTNone + moveUnsealed = storiface.FTNone } } err = m.sched.Schedule(ctx, sector, sealtasks.TTFetch, fetchSel, - schedFetch(sector, stores.FTCache|stores.FTSealed|moveUnsealed, stores.PathStorage, stores.AcquireMove), + m.schedFetch(sector, storiface.FTCache|storiface.FTSealed|moveUnsealed, storiface.PathStorage, storiface.AcquireMove), func(ctx context.Context, w Worker) error { - return w.MoveStorage(ctx, sector, stores.FTCache|stores.FTSealed|moveUnsealed) + _, err := m.waitSimpleCall(ctx)(w.MoveStorage(ctx, sector, storiface.FTCache|storiface.FTSealed|moveUnsealed)) + return err }) if err != nil { return xerrors.Errorf("moving sector to storage: %w", err) @@ -473,25 +598,69 @@ func (m *Manager) Remove(ctx context.Context, sector abi.SectorID) error { ctx, cancel := context.WithCancel(ctx) defer cancel() - if err := m.index.StorageLock(ctx, sector, stores.FTNone, stores.FTSealed|stores.FTUnsealed|stores.FTCache); err != nil { + if err := m.index.StorageLock(ctx, sector, storiface.FTNone, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache); err != nil { return xerrors.Errorf("acquiring sector lock: %w", err) } var err error - if rerr := m.storage.Remove(ctx, sector, stores.FTSealed, true); rerr != nil { + if rerr := m.storage.Remove(ctx, sector, storiface.FTSealed, true); rerr != nil { err = multierror.Append(err, xerrors.Errorf("removing sector (sealed): %w", rerr)) } - if rerr := m.storage.Remove(ctx, sector, stores.FTCache, true); rerr != nil { + if rerr := m.storage.Remove(ctx, sector, storiface.FTCache, true); rerr != nil { err = multierror.Append(err, xerrors.Errorf("removing sector (cache): %w", rerr)) } - if rerr := m.storage.Remove(ctx, sector, stores.FTUnsealed, true); rerr != nil { + if rerr := m.storage.Remove(ctx, sector, storiface.FTUnsealed, true); rerr != nil { err = multierror.Append(err, xerrors.Errorf("removing sector (unsealed): %w", rerr)) } return err } +func (m *Manager) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err string) error { + return m.returnResult(callID, pi, err) +} + +func (m *Manager) ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err string) error { + return m.returnResult(callID, p1o, err) +} + +func (m *Manager) ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err string) error { + return m.returnResult(callID, sealed, err) +} + +func (m *Manager) ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err string) error { + return m.returnResult(callID, out, err) +} + +func (m *Manager) ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err string) error { + return m.returnResult(callID, proof, err) +} + +func (m *Manager) ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err string) error { + return m.returnResult(callID, nil, err) +} + +func (m *Manager) ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err string) error { + return m.returnResult(callID, nil, err) +} + +func (m *Manager) ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err string) error { + return m.returnResult(callID, nil, err) +} + +func (m *Manager) ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err string) error { + return m.returnResult(callID, nil, err) +} + +func (m *Manager) ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err string) error { + return m.returnResult(callID, ok, err) +} + +func (m *Manager) ReturnFetch(ctx context.Context, callID storiface.CallID, err string) error { + return m.returnResult(callID, nil, err) +} + func (m *Manager) StorageLocal(ctx context.Context) (map[stores.ID]string, error) { l, err := m.localStore.Local(ctx) if err != nil { @@ -510,7 +679,15 @@ func (m *Manager) FsStat(ctx context.Context, id stores.ID) (fsutil.FsStat, erro return m.storage.FsStat(ctx, id) } -func (m *Manager) SchedDiag(ctx context.Context) (interface{}, error) { +func (m *Manager) SchedDiag(ctx context.Context, doSched bool) (interface{}, error) { + if doSched { + select { + case m.sched.workerChange <- struct{}{}: + case <-ctx.Done(): + return nil, ctx.Err() + } + } + return m.sched.Info(ctx) } diff --git a/extern/sector-storage/manager_calltracker.go b/extern/sector-storage/manager_calltracker.go new file mode 100644 index 000000000..8a1c1e4f9 --- /dev/null +++ b/extern/sector-storage/manager_calltracker.go @@ -0,0 +1,389 @@ +package sectorstorage + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "os" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" +) + +type WorkID struct { + Method sealtasks.TaskType + Params string // json [...params] +} + +func (w WorkID) String() string { + return fmt.Sprintf("%s(%s)", w.Method, w.Params) +} + +var _ fmt.Stringer = &WorkID{} + +type WorkStatus string + +const ( + wsStarted WorkStatus = "started" // task started, not scheduled/running on a worker yet + wsRunning WorkStatus = "running" // task running on a worker, waiting for worker return + wsDone WorkStatus = "done" // task returned from the worker, results available +) + +type WorkState struct { + ID WorkID + + Status WorkStatus + + WorkerCall storiface.CallID // Set when entering wsRunning + WorkError string // Status = wsDone, set when failed to start work +} + +func newWorkID(method sealtasks.TaskType, params ...interface{}) (WorkID, error) { + pb, err := json.Marshal(params) + if err != nil { + return WorkID{}, xerrors.Errorf("marshaling work params: %w", err) + } + + if len(pb) > 256 { + s := sha256.Sum256(pb) + pb = []byte(hex.EncodeToString(s[:])) + } + + return WorkID{ + Method: method, + Params: string(pb), + }, nil +} + +func (m *Manager) setupWorkTracker() { + m.workLk.Lock() + defer m.workLk.Unlock() + + var ids []WorkState + if err := m.work.List(&ids); err != nil { + log.Error("getting work IDs") // quite bad + return + } + + for _, st := range ids { + wid := st.ID + + if os.Getenv("LOTUS_MINER_ABORT_UNFINISHED_WORK") == "1" { + st.Status = wsDone + } + + switch st.Status { + case wsStarted: + log.Warnf("dropping non-running work %s", wid) + + if err := m.work.Get(wid).End(); err != nil { + log.Errorf("cleannig up work state for %s", wid) + } + case wsDone: + // realistically this shouldn't ever happen as we return results + // immediately after getting them + log.Warnf("dropping done work, no result, wid %s", wid) + + if err := m.work.Get(wid).End(); err != nil { + log.Errorf("cleannig up work state for %s", wid) + } + case wsRunning: + m.callToWork[st.WorkerCall] = wid + } + } +} + +// returns wait=true when the task is already tracked/running +func (m *Manager) getWork(ctx context.Context, method sealtasks.TaskType, params ...interface{}) (wid WorkID, wait bool, cancel func(), err error) { + wid, err = newWorkID(method, params) + if err != nil { + return WorkID{}, false, nil, xerrors.Errorf("creating WorkID: %w", err) + } + + m.workLk.Lock() + defer m.workLk.Unlock() + + have, err := m.work.Has(wid) + if err != nil { + return WorkID{}, false, nil, xerrors.Errorf("failed to check if the task is already tracked: %w", err) + } + + if !have { + err := m.work.Begin(wid, &WorkState{ + ID: wid, + Status: wsStarted, + }) + if err != nil { + return WorkID{}, false, nil, xerrors.Errorf("failed to track task start: %w", err) + } + + return wid, false, func() { + m.workLk.Lock() + defer m.workLk.Unlock() + + have, err := m.work.Has(wid) + if err != nil { + log.Errorf("cancel: work has error: %+v", err) + return + } + + if !have { + return // expected / happy path + } + + var ws WorkState + if err := m.work.Get(wid).Get(&ws); err != nil { + log.Errorf("cancel: get work %s: %+v", wid, err) + return + } + + switch ws.Status { + case wsStarted: + log.Warn("canceling started (not running) work %s", wid) + + if err := m.work.Get(wid).End(); err != nil { + log.Errorf("cancel: failed to cancel started work %s: %+v", wid, err) + return + } + case wsDone: + // TODO: still remove? + log.Warn("cancel called on work %s in 'done' state", wid) + case wsRunning: + log.Warn("cancel called on work %s in 'running' state (manager shutting down?)", wid) + } + + }, nil + } + + // already started + + return wid, true, func() { + // TODO + }, nil +} + +func (m *Manager) startWork(ctx context.Context, wk WorkID) func(callID storiface.CallID, err error) error { + return func(callID storiface.CallID, err error) error { + m.workLk.Lock() + defer m.workLk.Unlock() + + if err != nil { + merr := m.work.Get(wk).Mutate(func(ws *WorkState) error { + ws.Status = wsDone + ws.WorkError = err.Error() + return nil + }) + + if merr != nil { + return xerrors.Errorf("failed to start work and to track the error; merr: %+v, err: %w", merr, err) + } + return err + } + + err = m.work.Get(wk).Mutate(func(ws *WorkState) error { + _, ok := m.results[wk] + if ok { + log.Warn("work returned before we started tracking it") + ws.Status = wsDone + } else { + ws.Status = wsRunning + } + ws.WorkerCall = callID + return nil + }) + if err != nil { + return xerrors.Errorf("registering running work: %w", err) + } + + m.callToWork[callID] = wk + + return nil + } +} + +func (m *Manager) waitWork(ctx context.Context, wid WorkID) (interface{}, error) { + m.workLk.Lock() + + var ws WorkState + if err := m.work.Get(wid).Get(&ws); err != nil { + m.workLk.Unlock() + return nil, xerrors.Errorf("getting work status: %w", err) + } + + if ws.Status == wsStarted { + m.workLk.Unlock() + return nil, xerrors.Errorf("waitWork called for work in 'started' state") + } + + // sanity check + wk := m.callToWork[ws.WorkerCall] + if wk != wid { + m.workLk.Unlock() + return nil, xerrors.Errorf("wrong callToWork mapping for call %s; expected %s, got %s", ws.WorkerCall, wid, wk) + } + + // make sure we don't have the result ready + cr, ok := m.callRes[ws.WorkerCall] + if ok { + delete(m.callToWork, ws.WorkerCall) + + if len(cr) == 1 { + err := m.work.Get(wk).End() + if err != nil { + m.workLk.Unlock() + // Not great, but not worth discarding potentially multi-hour computation over this + log.Errorf("marking work as done: %+v", err) + } + + res := <-cr + delete(m.callRes, ws.WorkerCall) + + m.workLk.Unlock() + return res.r, res.err + } + + m.workLk.Unlock() + return nil, xerrors.Errorf("something else in waiting on callRes") + } + + done := func() { + delete(m.results, wid) + + _, ok := m.callToWork[ws.WorkerCall] + if ok { + delete(m.callToWork, ws.WorkerCall) + } + + err := m.work.Get(wk).End() + if err != nil { + // Not great, but not worth discarding potentially multi-hour computation over this + log.Errorf("marking work as done: %+v", err) + } + } + + // the result can already be there if the work was running, manager restarted, + // and the worker has delivered the result before we entered waitWork + res, ok := m.results[wid] + if ok { + done() + m.workLk.Unlock() + return res.r, res.err + } + + ch, ok := m.waitRes[wid] + if !ok { + ch = make(chan struct{}) + m.waitRes[wid] = ch + } + + m.workLk.Unlock() + + select { + case <-ch: + m.workLk.Lock() + defer m.workLk.Unlock() + + res := m.results[wid] + done() + + return res.r, res.err + case <-ctx.Done(): + return nil, xerrors.Errorf("waiting for work result: %w", ctx.Err()) + } +} + +func (m *Manager) waitSimpleCall(ctx context.Context) func(callID storiface.CallID, err error) (interface{}, error) { + return func(callID storiface.CallID, err error) (interface{}, error) { + if err != nil { + return nil, err + } + + return m.waitCall(ctx, callID) + } +} + +func (m *Manager) waitCall(ctx context.Context, callID storiface.CallID) (interface{}, error) { + m.workLk.Lock() + _, ok := m.callToWork[callID] + if ok { + m.workLk.Unlock() + return nil, xerrors.Errorf("can't wait for calls related to work") + } + + ch, ok := m.callRes[callID] + if !ok { + ch = make(chan result, 1) + m.callRes[callID] = ch + } + m.workLk.Unlock() + + defer func() { + m.workLk.Lock() + defer m.workLk.Unlock() + + delete(m.callRes, callID) + }() + + select { + case res := <-ch: + return res.r, res.err + case <-ctx.Done(): + return nil, xerrors.Errorf("waiting for call result: %w", ctx.Err()) + } +} + +func (m *Manager) returnResult(callID storiface.CallID, r interface{}, serr string) error { + var err error + if serr != "" { + err = errors.New(serr) + } + + res := result{ + r: r, + err: err, + } + + m.sched.workTracker.onDone(callID) + + m.workLk.Lock() + defer m.workLk.Unlock() + + wid, ok := m.callToWork[callID] + if !ok { + rch, ok := m.callRes[callID] + if !ok { + rch = make(chan result, 1) + m.callRes[callID] = rch + } + + if len(rch) > 0 { + return xerrors.Errorf("callRes channel already has a response") + } + if cap(rch) == 0 { + return xerrors.Errorf("expected rch to be buffered") + } + + rch <- res + return nil + } + + _, ok = m.results[wid] + if ok { + return xerrors.Errorf("result for call %v already reported", wid) + } + + m.results[wid] = res + + _, found := m.waitRes[wid] + if found { + close(m.waitRes[wid]) + delete(m.waitRes, wid) + } + + return nil +} diff --git a/extern/sector-storage/manager_test.go b/extern/sector-storage/manager_test.go index ee704cb5a..f69d62b17 100644 --- a/extern/sector-storage/manager_test.go +++ b/extern/sector-storage/manager_test.go @@ -9,18 +9,24 @@ import ( "os" "path/filepath" "strings" + "sync" + "sync/atomic" "testing" + "time" + + "github.com/google/uuid" + "github.com/ipfs/go-datastore" + logging "github.com/ipfs/go-log" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-statestore" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" "github.com/filecoin-project/lotus/extern/sector-storage/stores" - - "github.com/filecoin-project/go-state-types/abi" - - "github.com/google/uuid" - logging "github.com/ipfs/go-log" - "github.com/stretchr/testify/require" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) func init() { @@ -80,9 +86,8 @@ func (t *testStorage) Stat(path string) (fsutil.FsStat, error) { var _ stores.LocalStorage = &testStorage{} -func newTestMgr(ctx context.Context, t *testing.T) (*Manager, *stores.Local, *stores.Remote, *stores.Index) { +func newTestMgr(ctx context.Context, t *testing.T, ds datastore.Datastore) (*Manager, *stores.Local, *stores.Remote, *stores.Index, func()) { st := newTestStorage(t) - defer st.cleanup() si := stores.NewIndex() cfg := &ffiwrapper.Config{ @@ -92,7 +97,7 @@ func newTestMgr(ctx context.Context, t *testing.T) (*Manager, *stores.Local, *st lstor, err := stores.NewLocal(ctx, st, si, nil) require.NoError(t, err) - prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor}, cfg) + prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor, spt: cfg.SealProofType}, cfg) require.NoError(t, err) stor := stores.NewRemote(lstor, si, nil, 6000) @@ -109,18 +114,27 @@ func newTestMgr(ctx context.Context, t *testing.T) (*Manager, *stores.Local, *st sched: newScheduler(cfg.SealProofType), Prover: prover, + + work: statestore.New(ds), + callToWork: map[storiface.CallID]WorkID{}, + callRes: map[storiface.CallID]chan result{}, + results: map[WorkID]result{}, + waitRes: map[WorkID]chan struct{}{}, } + m.setupWorkTracker() + go m.sched.runSched() - return m, lstor, stor, si + return m, lstor, stor, si, st.cleanup } func TestSimple(t *testing.T) { logging.SetAllLoggers(logging.LevelDebug) ctx := context.Background() - m, lstor, _, _ := newTestMgr(ctx, t) + m, lstor, _, _, cleanup := newTestMgr(ctx, t, datastore.NewMapDatastore()) + defer cleanup() localTasks := []sealtasks.TaskType{ sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFetch, @@ -129,7 +143,44 @@ func TestSimple(t *testing.T) { err := m.AddWorker(ctx, newTestWorker(WorkerConfig{ SealProof: abi.RegisteredSealProof_StackedDrg2KiBV1, TaskTypes: localTasks, - }, lstor)) + }, lstor, m)) + require.NoError(t, err) + + sid := abi.SectorID{Miner: 1000, Number: 1} + + pi, err := m.AddPiece(ctx, sid, nil, 1016, strings.NewReader(strings.Repeat("testthis", 127))) + require.NoError(t, err) + require.Equal(t, abi.PaddedPieceSize(1024), pi.Size) + + piz, err := m.AddPiece(ctx, sid, nil, 1016, bytes.NewReader(make([]byte, 1016)[:])) + require.NoError(t, err) + require.Equal(t, abi.PaddedPieceSize(1024), piz.Size) + + pieces := []abi.PieceInfo{pi, piz} + + ticket := abi.SealRandomness{9, 9, 9, 9, 9, 9, 9, 9} + + _, err = m.SealPreCommit1(ctx, sid, ticket, pieces) + require.NoError(t, err) +} + +func TestRedoPC1(t *testing.T) { + logging.SetAllLoggers(logging.LevelDebug) + + ctx := context.Background() + m, lstor, _, _, cleanup := newTestMgr(ctx, t, datastore.NewMapDatastore()) + defer cleanup() + + localTasks := []sealtasks.TaskType{ + sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFetch, + } + + tw := newTestWorker(WorkerConfig{ + SealProof: abi.RegisteredSealProof_StackedDrg2KiBV1, + TaskTypes: localTasks, + }, lstor, m) + + err := m.AddWorker(ctx, tw) require.NoError(t, err) sid := abi.SectorID{Miner: 1000, Number: 1} @@ -149,4 +200,254 @@ func TestSimple(t *testing.T) { _, err = m.SealPreCommit1(ctx, sid, ticket, pieces) require.NoError(t, err) + // tell mock ffi that we expect PC1 again + require.NoError(t, tw.mockSeal.ForceState(sid, 0)) // sectorPacking + + _, err = m.SealPreCommit1(ctx, sid, ticket, pieces) + require.NoError(t, err) + + require.Equal(t, 2, tw.pc1s) +} + +// Manager restarts in the middle of a task, restarts it, it completes +func TestRestartManager(t *testing.T) { + test := func(returnBeforeCall bool) func(*testing.T) { + return func(t *testing.T) { + logging.SetAllLoggers(logging.LevelDebug) + + ctx, done := context.WithCancel(context.Background()) + defer done() + + ds := datastore.NewMapDatastore() + + m, lstor, _, _, cleanup := newTestMgr(ctx, t, ds) + defer cleanup() + + localTasks := []sealtasks.TaskType{ + sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFetch, + } + + tw := newTestWorker(WorkerConfig{ + SealProof: abi.RegisteredSealProof_StackedDrg2KiBV1, + TaskTypes: localTasks, + }, lstor, m) + + err := m.AddWorker(ctx, tw) + require.NoError(t, err) + + sid := abi.SectorID{Miner: 1000, Number: 1} + + pi, err := m.AddPiece(ctx, sid, nil, 1016, strings.NewReader(strings.Repeat("testthis", 127))) + require.NoError(t, err) + require.Equal(t, abi.PaddedPieceSize(1024), pi.Size) + + piz, err := m.AddPiece(ctx, sid, nil, 1016, bytes.NewReader(make([]byte, 1016)[:])) + require.NoError(t, err) + require.Equal(t, abi.PaddedPieceSize(1024), piz.Size) + + pieces := []abi.PieceInfo{pi, piz} + + ticket := abi.SealRandomness{0, 9, 9, 9, 9, 9, 9, 9} + + tw.pc1lk.Lock() + tw.pc1wait = &sync.WaitGroup{} + tw.pc1wait.Add(1) + + var cwg sync.WaitGroup + cwg.Add(1) + + var perr error + go func() { + defer cwg.Done() + _, perr = m.SealPreCommit1(ctx, sid, ticket, pieces) + }() + + tw.pc1wait.Wait() + + require.NoError(t, m.Close(ctx)) + tw.ret = nil + + cwg.Wait() + require.Error(t, perr) + + m, _, _, _, cleanup2 := newTestMgr(ctx, t, ds) + defer cleanup2() + + tw.ret = m // simulate jsonrpc auto-reconnect + err = m.AddWorker(ctx, tw) + require.NoError(t, err) + + if returnBeforeCall { + tw.pc1lk.Unlock() + time.Sleep(100 * time.Millisecond) + + _, err = m.SealPreCommit1(ctx, sid, ticket, pieces) + } else { + done := make(chan struct{}) + go func() { + defer close(done) + _, err = m.SealPreCommit1(ctx, sid, ticket, pieces) + }() + + time.Sleep(100 * time.Millisecond) + tw.pc1lk.Unlock() + <-done + } + + require.NoError(t, err) + + require.Equal(t, 1, tw.pc1s) + + ws := m.WorkerJobs() + require.Empty(t, ws) + } + } + + t.Run("callThenReturn", test(false)) + t.Run("returnThenCall", test(true)) +} + +// Worker restarts in the middle of a task, task fails after restart +func TestRestartWorker(t *testing.T) { + logging.SetAllLoggers(logging.LevelDebug) + + ctx, done := context.WithCancel(context.Background()) + defer done() + + ds := datastore.NewMapDatastore() + + m, lstor, stor, idx, cleanup := newTestMgr(ctx, t, ds) + defer cleanup() + + localTasks := []sealtasks.TaskType{ + sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFetch, + } + + wds := datastore.NewMapDatastore() + + arch := make(chan chan apres) + w := newLocalWorker(func() (ffiwrapper.Storage, error) { + return &testExec{apch: arch}, nil + }, WorkerConfig{ + SealProof: 0, + TaskTypes: localTasks, + }, stor, lstor, idx, m, statestore.New(wds)) + + err := m.AddWorker(ctx, w) + require.NoError(t, err) + + sid := abi.SectorID{Miner: 1000, Number: 1} + + apDone := make(chan struct{}) + + go func() { + defer close(apDone) + + _, err := m.AddPiece(ctx, sid, nil, 1016, strings.NewReader(strings.Repeat("testthis", 127))) + require.Error(t, err) + }() + + // kill the worker + <-arch + require.NoError(t, w.Close()) + + for { + if len(m.WorkerStats()) == 0 { + break + } + + time.Sleep(time.Millisecond * 3) + } + + // restart the worker + w = newLocalWorker(func() (ffiwrapper.Storage, error) { + return &testExec{apch: arch}, nil + }, WorkerConfig{ + SealProof: 0, + TaskTypes: localTasks, + }, stor, lstor, idx, m, statestore.New(wds)) + + err = m.AddWorker(ctx, w) + require.NoError(t, err) + + <-apDone + + time.Sleep(12 * time.Millisecond) + uf, err := w.ct.unfinished() + require.NoError(t, err) + require.Empty(t, uf) +} + +func TestReenableWorker(t *testing.T) { + logging.SetAllLoggers(logging.LevelDebug) + stores.HeartbeatInterval = 5 * time.Millisecond + + ctx, done := context.WithCancel(context.Background()) + defer done() + + ds := datastore.NewMapDatastore() + + m, lstor, stor, idx, cleanup := newTestMgr(ctx, t, ds) + defer cleanup() + + localTasks := []sealtasks.TaskType{ + sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFetch, + } + + wds := datastore.NewMapDatastore() + + arch := make(chan chan apres) + w := newLocalWorker(func() (ffiwrapper.Storage, error) { + return &testExec{apch: arch}, nil + }, WorkerConfig{ + SealProof: 0, + TaskTypes: localTasks, + }, stor, lstor, idx, m, statestore.New(wds)) + + err := m.AddWorker(ctx, w) + require.NoError(t, err) + + time.Sleep(time.Millisecond * 100) + + i, _ := m.sched.Info(ctx) + require.Len(t, i.(SchedDiagInfo).OpenWindows, 2) + + // disable + atomic.StoreInt64(&w.testDisable, 1) + + for i := 0; i < 100; i++ { + if !m.WorkerStats()[w.session].Enabled { + break + } + + time.Sleep(time.Millisecond * 3) + } + require.False(t, m.WorkerStats()[w.session].Enabled) + + i, _ = m.sched.Info(ctx) + require.Len(t, i.(SchedDiagInfo).OpenWindows, 0) + + // reenable + atomic.StoreInt64(&w.testDisable, 0) + + for i := 0; i < 100; i++ { + if m.WorkerStats()[w.session].Enabled { + break + } + + time.Sleep(time.Millisecond * 3) + } + require.True(t, m.WorkerStats()[w.session].Enabled) + + for i := 0; i < 100; i++ { + info, _ := m.sched.Info(ctx) + if len(info.(SchedDiagInfo).OpenWindows) != 0 { + break + } + + time.Sleep(time.Millisecond * 3) + } + + i, _ = m.sched.Info(ctx) + require.Len(t, i.(SchedDiagInfo).OpenWindows, 2) } diff --git a/extern/sector-storage/mock/mock.go b/extern/sector-storage/mock/mock.go index 64207e66d..b3de99ce5 100644 --- a/extern/sector-storage/mock/mock.go +++ b/extern/sector-storage/mock/mock.go @@ -9,7 +9,7 @@ import ( "math/rand" "sync" - "github.com/filecoin-project/specs-actors/actors/runtime/proof" + proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" commcid "github.com/filecoin-project/go-fil-commcid" "github.com/filecoin-project/go-state-types/abi" @@ -127,6 +127,19 @@ func (mgr *SectorMgr) AcquireSectorNumber() (abi.SectorNumber, error) { return id, nil } +func (mgr *SectorMgr) ForceState(sid abi.SectorID, st int) error { + mgr.lk.Lock() + ss, ok := mgr.sectors[sid] + mgr.lk.Unlock() + if !ok { + return xerrors.Errorf("no sector with id %d in storage", sid) + } + + ss.state = st + + return nil +} + func (mgr *SectorMgr) SealPreCommit1(ctx context.Context, sid abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) { mgr.lk.Lock() ss, ok := mgr.sectors[sid] @@ -230,8 +243,8 @@ func (mgr *SectorMgr) SealCommit1(ctx context.Context, sid abi.SectorID, ticket } func (mgr *SectorMgr) SealCommit2(ctx context.Context, sid abi.SectorID, phase1Out storage.Commit1Out) (proof storage.Proof, err error) { - var out [32]byte - for i := range out { + var out [1920]byte + for i := range out[:len(phase1Out)] { out[i] = phase1Out[i] ^ byte(sid.Number&0xff) } @@ -280,12 +293,12 @@ func AddOpFinish(ctx context.Context) (context.Context, func()) { } } -func (mgr *SectorMgr) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.SectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, error) { +func (mgr *SectorMgr) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof2.SectorInfo, randomness abi.PoStRandomness) ([]proof2.PoStProof, error) { return generateFakePoSt(sectorInfo, abi.RegisteredSealProof.RegisteredWinningPoStProof, randomness), nil } -func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.SectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, []abi.SectorID, error) { - si := make([]proof.SectorInfo, 0, len(sectorInfo)) +func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof2.SectorInfo, randomness abi.PoStRandomness) ([]proof2.PoStProof, []abi.SectorID, error) { + si := make([]proof2.SectorInfo, 0, len(sectorInfo)) var skipped []abi.SectorID var err error @@ -313,7 +326,7 @@ func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorI return generateFakePoSt(si, abi.RegisteredSealProof.RegisteredWindowPoStProof, randomness), skipped, nil } -func generateFakePoStProof(sectorInfo []proof.SectorInfo, randomness abi.PoStRandomness) []byte { +func generateFakePoStProof(sectorInfo []proof2.SectorInfo, randomness abi.PoStRandomness) []byte { hasher := sha256.New() _, _ = hasher.Write(randomness) for _, info := range sectorInfo { @@ -326,13 +339,13 @@ func generateFakePoStProof(sectorInfo []proof.SectorInfo, randomness abi.PoStRan } -func generateFakePoSt(sectorInfo []proof.SectorInfo, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error), randomness abi.PoStRandomness) []proof.PoStProof { +func generateFakePoSt(sectorInfo []proof2.SectorInfo, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error), randomness abi.PoStRandomness) []proof2.PoStProof { wp, err := rpt(sectorInfo[0].SealProof) if err != nil { panic(err) } - return []proof.PoStProof{ + return []proof2.PoStProof{ { PoStProof: wp, ProofBytes: generateFakePoStProof(sectorInfo, randomness), @@ -392,7 +405,7 @@ func (mgr *SectorMgr) Remove(ctx context.Context, sector abi.SectorID) error { return nil } -func (mgr *SectorMgr) CheckProvable(ctx context.Context, spt abi.RegisteredSealProof, ids []abi.SectorID) ([]abi.SectorID, error) { +func (mgr *SectorMgr) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, ids []abi.SectorID) ([]abi.SectorID, error) { var bad []abi.SectorID for _, sid := range ids { @@ -406,12 +419,57 @@ func (mgr *SectorMgr) CheckProvable(ctx context.Context, spt abi.RegisteredSealP return bad, nil } -func (m mockVerif) VerifySeal(svi proof.SealVerifyInfo) (bool, error) { - if len(svi.Proof) != 32 { // Real ones are longer, but this should be fine +func (mgr *SectorMgr) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err string) error { + panic("not supported") +} + +func (mgr *SectorMgr) ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err string) error { + panic("not supported") +} + +func (mgr *SectorMgr) ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err string) error { + panic("not supported") +} + +func (mgr *SectorMgr) ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err string) error { + panic("not supported") +} + +func (mgr *SectorMgr) ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err string) error { + panic("not supported") +} + +func (mgr *SectorMgr) ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err string) error { + panic("not supported") +} + +func (mgr *SectorMgr) ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err string) error { + panic("not supported") +} + +func (mgr *SectorMgr) ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err string) error { + panic("not supported") +} + +func (mgr *SectorMgr) ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err string) error { + panic("not supported") +} + +func (mgr *SectorMgr) ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err string) error { + panic("not supported") +} + +func (mgr *SectorMgr) ReturnFetch(ctx context.Context, callID storiface.CallID, err string) error { + panic("not supported") +} + +func (m mockVerif) VerifySeal(svi proof2.SealVerifyInfo) (bool, error) { + if len(svi.Proof) != 1920 { return false, nil } - for i, b := range svi.Proof { + // only the first 32 bytes, the rest are 0. + for i, b := range svi.Proof[:32] { if b != svi.UnsealedCID.Bytes()[i]+svi.SealedCID.Bytes()[31-i]-svi.InteractiveRandomness[i]*svi.Randomness[i] { return false, nil } @@ -420,11 +478,11 @@ func (m mockVerif) VerifySeal(svi proof.SealVerifyInfo) (bool, error) { return true, nil } -func (m mockVerif) VerifyWinningPoSt(ctx context.Context, info proof.WinningPoStVerifyInfo) (bool, error) { +func (m mockVerif) VerifyWinningPoSt(ctx context.Context, info proof2.WinningPoStVerifyInfo) (bool, error) { return true, nil } -func (m mockVerif) VerifyWindowPoSt(ctx context.Context, info proof.WindowPoStVerifyInfo) (bool, error) { +func (m mockVerif) VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStVerifyInfo) (bool, error) { if len(info.Proofs) != 1 { return false, xerrors.Errorf("expected 1 proof entry") } diff --git a/extern/sector-storage/roprov.go b/extern/sector-storage/roprov.go index 2b009c63b..7f051b549 100644 --- a/extern/sector-storage/roprov.go +++ b/extern/sector-storage/roprov.go @@ -8,6 +8,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) type readonlyProvider struct { @@ -16,25 +17,30 @@ type readonlyProvider struct { spt abi.RegisteredSealProof } -func (l *readonlyProvider) AcquireSector(ctx context.Context, id abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, sealing stores.PathType) (stores.SectorPaths, func(), error) { - if allocate != stores.FTNone { - return stores.SectorPaths{}, nil, xerrors.New("read-only storage") +func (l *readonlyProvider) AcquireSector(ctx context.Context, id abi.SectorID, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType) (storiface.SectorPaths, func(), error) { + if allocate != storiface.FTNone { + return storiface.SectorPaths{}, nil, xerrors.New("read-only storage") + } + + ssize, err := l.spt.SectorSize() + if err != nil { + return storiface.SectorPaths{}, nil, xerrors.Errorf("failed to determine sector size: %w", err) } ctx, cancel := context.WithCancel(ctx) // use TryLock to avoid blocking - locked, err := l.index.StorageTryLock(ctx, id, existing, stores.FTNone) + locked, err := l.index.StorageTryLock(ctx, id, existing, storiface.FTNone) if err != nil { cancel() - return stores.SectorPaths{}, nil, xerrors.Errorf("acquiring sector lock: %w", err) + return storiface.SectorPaths{}, nil, xerrors.Errorf("acquiring sector lock: %w", err) } if !locked { cancel() - return stores.SectorPaths{}, nil, xerrors.Errorf("failed to acquire sector lock") + return storiface.SectorPaths{}, nil, xerrors.Errorf("failed to acquire sector lock") } - p, _, err := l.stor.AcquireSector(ctx, id, l.spt, existing, allocate, sealing, stores.AcquireMove) + p, _, err := l.stor.AcquireSector(ctx, id, ssize, existing, allocate, sealing, storiface.AcquireMove) return p, cancel, err } diff --git a/extern/sector-storage/sched.go b/extern/sector-storage/sched.go index 8b8ef6d46..549a16a96 100644 --- a/extern/sector-storage/sched.go +++ b/extern/sector-storage/sched.go @@ -2,12 +2,12 @@ package sectorstorage import ( "context" - "fmt" "math/rand" "sort" "sync" "time" + "github.com/google/uuid" "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" @@ -53,22 +53,20 @@ type WorkerSelector interface { type scheduler struct { spt abi.RegisteredSealProof - workersLk sync.RWMutex - nextWorker WorkerID - workers map[WorkerID]*workerHandle - - newWorkers chan *workerHandle - - watchClosing chan WorkerID - workerClosing chan WorkerID + workersLk sync.RWMutex + workers map[WorkerID]*workerHandle schedule chan *workerRequest windowRequests chan *schedWindowRequest + workerChange chan struct{} // worker added / changed/freed resources + workerDisable chan workerDisableReq // owned by the sh.runSched goroutine schedQueue *requestQueue openWindows []*schedWindowRequest + workTracker *workTracker + info chan func(interface{}) closing chan struct{} @@ -77,7 +75,7 @@ type scheduler struct { } type workerHandle struct { - w Worker + workerRpc Worker info storiface.WorkerInfo @@ -89,8 +87,7 @@ type workerHandle struct { wndLk sync.Mutex activeWindows []*schedWindow - // stats / tracking - wt *workTracker + enabled bool // for sync manager goroutine closing cleanupStarted bool @@ -109,6 +106,12 @@ type schedWindow struct { todo []*workerRequest } +type workerDisableReq struct { + activeWindows []*schedWindow + wid WorkerID + done func() +} + type activeResources struct { memUsedMin uint64 memUsedMax uint64 @@ -144,19 +147,20 @@ func newScheduler(spt abi.RegisteredSealProof) *scheduler { return &scheduler{ spt: spt, - nextWorker: 0, - workers: map[WorkerID]*workerHandle{}, - - newWorkers: make(chan *workerHandle), - - watchClosing: make(chan WorkerID), - workerClosing: make(chan WorkerID), + workers: map[WorkerID]*workerHandle{}, schedule: make(chan *workerRequest), windowRequests: make(chan *schedWindowRequest, 20), + workerChange: make(chan struct{}, 20), + workerDisable: make(chan workerDisableReq), schedQueue: &requestQueue{}, + workTracker: &workTracker{ + done: map[storiface.CallID]struct{}{}, + running: map[storiface.CallID]trackedWork{}, + }, + info: make(chan func(interface{})), closing: make(chan struct{}), @@ -214,27 +218,25 @@ type SchedDiagRequestInfo struct { type SchedDiagInfo struct { Requests []SchedDiagRequestInfo - OpenWindows []WorkerID + OpenWindows []string } func (sh *scheduler) runSched() { defer close(sh.closed) - go sh.runWorkerWatcher() - iw := time.After(InitWait) var initialised bool for { var doSched bool + var toDisable []workerDisableReq select { - case w := <-sh.newWorkers: - sh.newWorker(w) - - case wid := <-sh.workerClosing: - sh.dropWorker(wid) - + case <-sh.workerChange: + doSched = true + case dreq := <-sh.workerDisable: + toDisable = append(toDisable, dreq) + doSched = true case req := <-sh.schedule: sh.schedQueue.Push(req) doSched = true @@ -263,6 +265,9 @@ func (sh *scheduler) runSched() { loop: for { select { + case <-sh.workerChange: + case dreq := <-sh.workerDisable: + toDisable = append(toDisable, dreq) case req := <-sh.schedule: sh.schedQueue.Push(req) if sh.testSync != nil { @@ -275,6 +280,28 @@ func (sh *scheduler) runSched() { } } + for _, req := range toDisable { + for _, window := range req.activeWindows { + for _, request := range window.todo { + sh.schedQueue.Push(request) + } + } + + openWindows := make([]*schedWindowRequest, 0, len(sh.openWindows)) + for _, window := range sh.openWindows { + if window.worker != req.wid { + openWindows = append(openWindows, window) + } + } + sh.openWindows = openWindows + + sh.workersLk.Lock() + sh.workers[req.wid].enabled = false + sh.workersLk.Unlock() + + req.done() + } + sh.trySched() } @@ -294,8 +321,11 @@ func (sh *scheduler) diag() SchedDiagInfo { }) } + sh.workersLk.RLock() + defer sh.workersLk.RUnlock() + for _, window := range sh.openWindows { - out.OpenWindows = append(out.OpenWindows, window.worker) + out.OpenWindows = append(out.OpenWindows, uuid.UUID(window.worker).String()) } return out @@ -318,13 +348,14 @@ func (sh *scheduler) trySched() { */ + sh.workersLk.RLock() + defer sh.workersLk.RUnlock() + windows := make([]schedWindow, len(sh.openWindows)) acceptableWindows := make([][]int, sh.schedQueue.Len()) log.Debugf("SCHED %d queued; %d open windows", sh.schedQueue.Len(), len(windows)) - sh.workersLk.RLock() - defer sh.workersLk.RUnlock() if len(sh.openWindows) == 0 { // nothing to schedule on return @@ -353,11 +384,16 @@ func (sh *scheduler) trySched() { for wnd, windowRequest := range sh.openWindows { worker, ok := sh.workers[windowRequest.worker] if !ok { - log.Errorf("worker referenced by windowRequest not found (worker: %d)", windowRequest.worker) + log.Errorf("worker referenced by windowRequest not found (worker: %s)", windowRequest.worker) // TODO: How to move forward here? continue } + if !worker.enabled { + log.Debugw("skipping disabled worker", "worker", windowRequest.worker) + continue + } + // TODO: allow bigger windows if !windows[wnd].allocated.canHandleRequest(needRes, windowRequest.worker, "schedAcceptable", worker.info.Resources) { continue @@ -495,320 +531,6 @@ func (sh *scheduler) trySched() { sh.openWindows = newOpenWindows } -func (sh *scheduler) runWorker(wid WorkerID) { - var ready sync.WaitGroup - ready.Add(1) - defer ready.Wait() - - go func() { - sh.workersLk.RLock() - worker, found := sh.workers[wid] - sh.workersLk.RUnlock() - - ready.Done() - - if !found { - panic(fmt.Sprintf("worker %d not found", wid)) - } - - defer close(worker.closedMgr) - - scheduledWindows := make(chan *schedWindow, SchedWindows) - taskDone := make(chan struct{}, 1) - windowsRequested := 0 - - ctx, cancel := context.WithCancel(context.TODO()) - defer cancel() - - workerClosing, err := worker.w.Closing(ctx) - if err != nil { - return - } - - defer func() { - log.Warnw("Worker closing", "workerid", wid) - - // TODO: close / return all queued tasks - }() - - for { - // ask for more windows if we need them - for ; windowsRequested < SchedWindows; windowsRequested++ { - select { - case sh.windowRequests <- &schedWindowRequest{ - worker: wid, - done: scheduledWindows, - }: - case <-sh.closing: - return - case <-workerClosing: - return - case <-worker.closingMgr: - return - } - } - - select { - case w := <-scheduledWindows: - worker.wndLk.Lock() - worker.activeWindows = append(worker.activeWindows, w) - worker.wndLk.Unlock() - case <-taskDone: - log.Debugw("task done", "workerid", wid) - case <-sh.closing: - return - case <-workerClosing: - return - case <-worker.closingMgr: - return - } - - sh.workersLk.RLock() - worker.wndLk.Lock() - - windowsRequested -= sh.workerCompactWindows(worker, wid) - - assignLoop: - // process windows in order - for len(worker.activeWindows) > 0 { - firstWindow := worker.activeWindows[0] - - // process tasks within a window, preferring tasks at lower indexes - for len(firstWindow.todo) > 0 { - tidx := -1 - - worker.lk.Lock() - for t, todo := range firstWindow.todo { - needRes := ResourceTable[todo.taskType][sh.spt] - if worker.preparing.canHandleRequest(needRes, wid, "startPreparing", worker.info.Resources) { - tidx = t - break - } - } - worker.lk.Unlock() - - if tidx == -1 { - break assignLoop - } - - todo := firstWindow.todo[tidx] - - log.Debugf("assign worker sector %d", todo.sector.Number) - err := sh.assignWorker(taskDone, wid, worker, todo) - - if err != nil { - log.Error("assignWorker error: %+v", err) - go todo.respond(xerrors.Errorf("assignWorker error: %w", err)) - } - - // Note: we're not freeing window.allocated resources here very much on purpose - copy(firstWindow.todo[tidx:], firstWindow.todo[tidx+1:]) - firstWindow.todo[len(firstWindow.todo)-1] = nil - firstWindow.todo = firstWindow.todo[:len(firstWindow.todo)-1] - } - - copy(worker.activeWindows, worker.activeWindows[1:]) - worker.activeWindows[len(worker.activeWindows)-1] = nil - worker.activeWindows = worker.activeWindows[:len(worker.activeWindows)-1] - - windowsRequested-- - } - - worker.wndLk.Unlock() - sh.workersLk.RUnlock() - } - }() -} - -func (sh *scheduler) workerCompactWindows(worker *workerHandle, wid WorkerID) int { - // move tasks from older windows to newer windows if older windows - // still can fit them - if len(worker.activeWindows) > 1 { - for wi, window := range worker.activeWindows[1:] { - lower := worker.activeWindows[wi] - var moved []int - - for ti, todo := range window.todo { - needRes := ResourceTable[todo.taskType][sh.spt] - if !lower.allocated.canHandleRequest(needRes, wid, "compactWindows", worker.info.Resources) { - continue - } - - moved = append(moved, ti) - lower.todo = append(lower.todo, todo) - lower.allocated.add(worker.info.Resources, needRes) - window.allocated.free(worker.info.Resources, needRes) - } - - if len(moved) > 0 { - newTodo := make([]*workerRequest, 0, len(window.todo)-len(moved)) - for i, t := range window.todo { - if len(moved) > 0 && moved[0] == i { - moved = moved[1:] - continue - } - - newTodo = append(newTodo, t) - } - window.todo = newTodo - } - } - } - - var compacted int - var newWindows []*schedWindow - - for _, window := range worker.activeWindows { - if len(window.todo) == 0 { - compacted++ - continue - } - - newWindows = append(newWindows, window) - } - - worker.activeWindows = newWindows - - return compacted -} - -func (sh *scheduler) assignWorker(taskDone chan struct{}, wid WorkerID, w *workerHandle, req *workerRequest) error { - needRes := ResourceTable[req.taskType][sh.spt] - - w.lk.Lock() - w.preparing.add(w.info.Resources, needRes) - w.lk.Unlock() - - go func() { - err := req.prepare(req.ctx, w.wt.worker(w.w)) - sh.workersLk.Lock() - - if err != nil { - w.lk.Lock() - w.preparing.free(w.info.Resources, needRes) - w.lk.Unlock() - sh.workersLk.Unlock() - - select { - case taskDone <- struct{}{}: - case <-sh.closing: - log.Warnf("scheduler closed while sending response (prepare error: %+v)", err) - } - - select { - case req.ret <- workerResponse{err: err}: - case <-req.ctx.Done(): - log.Warnf("request got cancelled before we could respond (prepare error: %+v)", err) - case <-sh.closing: - log.Warnf("scheduler closed while sending response (prepare error: %+v)", err) - } - return - } - - err = w.active.withResources(wid, w.info.Resources, needRes, &sh.workersLk, func() error { - w.lk.Lock() - w.preparing.free(w.info.Resources, needRes) - w.lk.Unlock() - sh.workersLk.Unlock() - defer sh.workersLk.Lock() // we MUST return locked from this function - - select { - case taskDone <- struct{}{}: - case <-sh.closing: - } - - err = req.work(req.ctx, w.wt.worker(w.w)) - - select { - case req.ret <- workerResponse{err: err}: - case <-req.ctx.Done(): - log.Warnf("request got cancelled before we could respond") - case <-sh.closing: - log.Warnf("scheduler closed while sending response") - } - - return nil - }) - - sh.workersLk.Unlock() - - // This error should always be nil, since nothing is setting it, but just to be safe: - if err != nil { - log.Errorf("error executing worker (withResources): %+v", err) - } - }() - - return nil -} - -func (sh *scheduler) newWorker(w *workerHandle) { - w.closedMgr = make(chan struct{}) - w.closingMgr = make(chan struct{}) - - sh.workersLk.Lock() - - id := sh.nextWorker - sh.workers[id] = w - sh.nextWorker++ - - sh.workersLk.Unlock() - - sh.runWorker(id) - - select { - case sh.watchClosing <- id: - case <-sh.closing: - return - } -} - -func (sh *scheduler) dropWorker(wid WorkerID) { - sh.workersLk.Lock() - defer sh.workersLk.Unlock() - - w := sh.workers[wid] - - sh.workerCleanup(wid, w) - - delete(sh.workers, wid) -} - -func (sh *scheduler) workerCleanup(wid WorkerID, w *workerHandle) { - select { - case <-w.closingMgr: - default: - close(w.closingMgr) - } - - sh.workersLk.Unlock() - select { - case <-w.closedMgr: - case <-time.After(time.Second): - log.Errorf("timeout closing worker manager goroutine %d", wid) - } - sh.workersLk.Lock() - - if !w.cleanupStarted { - w.cleanupStarted = true - - newWindows := make([]*schedWindowRequest, 0, len(sh.openWindows)) - for _, window := range sh.openWindows { - if window.worker != wid { - newWindows = append(newWindows, window) - } - } - sh.openWindows = newWindows - - log.Debugf("dropWorker %d", wid) - - go func() { - if err := w.w.Close(); err != nil { - log.Warnf("closing worker %d: %+v", err) - } - }() - } -} - func (sh *scheduler) schedClose() { sh.workersLk.Lock() defer sh.workersLk.Unlock() diff --git a/extern/sector-storage/sched_resources.go b/extern/sector-storage/sched_resources.go index d6dae577b..10fe29aae 100644 --- a/extern/sector-storage/sched_resources.go +++ b/extern/sector-storage/sched_resources.go @@ -27,7 +27,9 @@ func (a *activeResources) withResources(id WorkerID, wr storiface.WorkerResource } func (a *activeResources) add(wr storiface.WorkerResources, r Resources) { - a.gpuUsed = r.CanGPU + if r.CanGPU { + a.gpuUsed = true + } a.cpuUse += r.Threads(wr.CPUs) a.memUsedMin += r.MinMemory a.memUsedMax += r.MaxMemory diff --git a/extern/sector-storage/sched_test.go b/extern/sector-storage/sched_test.go index 579a6d913..849896ff6 100644 --- a/extern/sector-storage/sched_test.go +++ b/extern/sector-storage/sched_test.go @@ -10,6 +10,7 @@ import ( "testing" "time" + "github.com/google/uuid" "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log/v2" "github.com/stretchr/testify/require" @@ -43,58 +44,58 @@ type schedTestWorker struct { paths []stores.StoragePath closed bool - closing chan struct{} + session uuid.UUID } -func (s *schedTestWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) { +func (s *schedTestWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storage.SectorCids, error) { +func (s *schedTestWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) { +func (s *schedTestWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storage.Proof, error) { +func (s *schedTestWorker) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error { +func (s *schedTestWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error { +func (s *schedTestWorker) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) Remove(ctx context.Context, sector abi.SectorID) error { +func (s *schedTestWorker) Remove(ctx context.Context, sector abi.SectorID) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) NewSector(ctx context.Context, sector abi.SectorID) error { +func (s *schedTestWorker) NewSector(ctx context.Context, sector abi.SectorID) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) { +func (s *schedTestWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) MoveStorage(ctx context.Context, sector abi.SectorID, types stores.SectorFileType) error { +func (s *schedTestWorker) MoveStorage(ctx context.Context, sector abi.SectorID, types storiface.SectorFileType) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) Fetch(ctx context.Context, id abi.SectorID, ft stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) error { +func (s *schedTestWorker) Fetch(ctx context.Context, id abi.SectorID, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) error { +func (s *schedTestWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { +func (s *schedTestWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) { panic("implement me") } @@ -121,15 +122,15 @@ func (s *schedTestWorker) Info(ctx context.Context) (storiface.WorkerInfo, error }, nil } -func (s *schedTestWorker) Closing(ctx context.Context) (<-chan struct{}, error) { - return s.closing, nil +func (s *schedTestWorker) Session(context.Context) (uuid.UUID, error) { + return s.session, nil } func (s *schedTestWorker) Close() error { if !s.closed { log.Info("close schedTestWorker") s.closed = true - close(s.closing) + s.session = uuid.UUID{} } return nil } @@ -142,7 +143,7 @@ func addTestWorker(t *testing.T, sched *scheduler, index *stores.Index, name str taskTypes: taskTypes, paths: []stores.StoragePath{{ID: "bb-8", Weight: 2, LocalPath: "food", CanSeal: true, CanStore: true}}, - closing: make(chan struct{}), + session: uuid.New(), } for _, path := range w.paths { @@ -160,18 +161,7 @@ func addTestWorker(t *testing.T, sched *scheduler, index *stores.Index, name str require.NoError(t, err) } - info, err := w.Info(context.TODO()) - require.NoError(t, err) - - sched.newWorkers <- &workerHandle{ - w: w, - wt: &workTracker{ - running: map[uint64]storiface.WorkerJob{}, - }, - info: info, - preparing: &activeResources{}, - active: &activeResources{}, - } + require.NoError(t, sched.runWorker(context.TODO(), w)) } func TestSchedStartStop(t *testing.T) { @@ -215,7 +205,7 @@ func TestSched(t *testing.T) { done := make(chan struct{}) rm.done[taskName] = done - sel := newAllocSelector(index, stores.FTCache, stores.PathSealing) + sel := newAllocSelector(index, storiface.FTCache, storiface.PathSealing) rm.wg.Add(1) go func() { @@ -435,7 +425,7 @@ func TestSched(t *testing.T) { type line struct { storiface.WorkerJob - wid uint64 + wid uuid.UUID } lines := make([]line, 0) @@ -539,8 +529,8 @@ func BenchmarkTrySched(b *testing.B) { b.StopTimer() sched := newScheduler(spt) - sched.workers[0] = &workerHandle{ - w: nil, + sched.workers[WorkerID{}] = &workerHandle{ + workerRpc: nil, info: storiface.WorkerInfo{ Hostname: "t", Resources: decentWorkerResources, @@ -551,7 +541,7 @@ func BenchmarkTrySched(b *testing.B) { for i := 0; i < windows; i++ { sched.openWindows = append(sched.openWindows, &schedWindowRequest{ - worker: 0, + worker: WorkerID{}, done: make(chan *schedWindow, 1000), }) } @@ -601,8 +591,13 @@ func TestWindowCompact(t *testing.T) { wh.activeWindows = append(wh.activeWindows, window) } - n := sh.workerCompactWindows(wh, 0) - require.Equal(t, len(start)-len(expect), n) + sw := schedWorker{ + sched: &sh, + worker: wh, + } + + sw.workerCompactWindows() + require.Equal(t, len(start)-len(expect), -sw.windowsRequested) for wi, tasks := range expect { var expectRes activeResources diff --git a/extern/sector-storage/sched_watch.go b/extern/sector-storage/sched_watch.go deleted file mode 100644 index 2dd9875d7..000000000 --- a/extern/sector-storage/sched_watch.go +++ /dev/null @@ -1,100 +0,0 @@ -package sectorstorage - -import ( - "context" - "reflect" -) - -func (sh *scheduler) runWorkerWatcher() { - ctx, cancel := context.WithCancel(context.TODO()) - defer cancel() - - nilch := reflect.ValueOf(new(chan struct{})).Elem() - - cases := []reflect.SelectCase{ - { - Dir: reflect.SelectRecv, - Chan: reflect.ValueOf(sh.closing), - }, - { - Dir: reflect.SelectRecv, - Chan: reflect.ValueOf(sh.watchClosing), - }, - } - - caseToWorker := map[int]WorkerID{} - - for { - n, rv, ok := reflect.Select(cases) - - switch { - case n == 0: // sh.closing - return - case n == 1: // sh.watchClosing - if !ok { - log.Errorf("watchClosing channel closed") - return - } - - wid, ok := rv.Interface().(WorkerID) - if !ok { - panic("got a non-WorkerID message") - } - - sh.workersLk.Lock() - workerClosing, err := sh.workers[wid].w.Closing(ctx) - sh.workersLk.Unlock() - if err != nil { - log.Errorf("getting worker closing channel: %+v", err) - select { - case sh.workerClosing <- wid: - case <-sh.closing: - return - } - - continue - } - - toSet := -1 - for i, sc := range cases { - if sc.Chan == nilch { - toSet = i - break - } - } - if toSet == -1 { - toSet = len(cases) - cases = append(cases, reflect.SelectCase{}) - } - - cases[toSet] = reflect.SelectCase{ - Dir: reflect.SelectRecv, - Chan: reflect.ValueOf(workerClosing), - } - - caseToWorker[toSet] = wid - default: - wid, found := caseToWorker[n] - if !found { - log.Errorf("worker ID not found for case %d", n) - continue - } - - delete(caseToWorker, n) - cases[n] = reflect.SelectCase{ - Dir: reflect.SelectRecv, - Chan: nilch, - } - - log.Warnf("worker %d dropped", wid) - // send in a goroutine to avoid a deadlock between workerClosing / watchClosing - go func() { - select { - case sh.workerClosing <- wid: - case <-sh.closing: - return - } - }() - } - } -} diff --git a/extern/sector-storage/sched_worker.go b/extern/sector-storage/sched_worker.go new file mode 100644 index 000000000..e56e9056d --- /dev/null +++ b/extern/sector-storage/sched_worker.go @@ -0,0 +1,484 @@ +package sectorstorage + +import ( + "context" + "time" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/extern/sector-storage/stores" +) + +type schedWorker struct { + sched *scheduler + worker *workerHandle + + wid WorkerID + + heartbeatTimer *time.Ticker + scheduledWindows chan *schedWindow + taskDone chan struct{} + + windowsRequested int +} + +// context only used for startup +func (sh *scheduler) runWorker(ctx context.Context, w Worker) error { + info, err := w.Info(ctx) + if err != nil { + return xerrors.Errorf("getting worker info: %w", err) + } + + sessID, err := w.Session(ctx) + if err != nil { + return xerrors.Errorf("getting worker session: %w", err) + } + if sessID == ClosedWorkerID { + return xerrors.Errorf("worker already closed") + } + + worker := &workerHandle{ + workerRpc: w, + info: info, + + preparing: &activeResources{}, + active: &activeResources{}, + enabled: true, + + closingMgr: make(chan struct{}), + closedMgr: make(chan struct{}), + } + + wid := WorkerID(sessID) + + sh.workersLk.Lock() + _, exist := sh.workers[wid] + if exist { + log.Warnw("duplicated worker added", "id", wid) + + // this is ok, we're already handling this worker in a different goroutine + return nil + } + + sh.workers[wid] = worker + sh.workersLk.Unlock() + + sw := &schedWorker{ + sched: sh, + worker: worker, + + wid: wid, + + heartbeatTimer: time.NewTicker(stores.HeartbeatInterval), + scheduledWindows: make(chan *schedWindow, SchedWindows), + taskDone: make(chan struct{}, 1), + + windowsRequested: 0, + } + + go sw.handleWorker() + + return nil +} + +func (sw *schedWorker) handleWorker() { + worker, sched := sw.worker, sw.sched + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + defer close(worker.closedMgr) + + defer func() { + log.Warnw("Worker closing", "workerid", sw.wid) + + if err := sw.disable(ctx); err != nil { + log.Warnw("failed to disable worker", "worker", sw.wid, "error", err) + } + + sched.workersLk.Lock() + delete(sched.workers, sw.wid) + sched.workersLk.Unlock() + }() + + defer sw.heartbeatTimer.Stop() + + for { + { + sched.workersLk.Lock() + enabled := worker.enabled + sched.workersLk.Unlock() + + // ask for more windows if we need them (non-blocking) + if enabled { + if !sw.requestWindows() { + return // graceful shutdown + } + } + } + + // wait for more windows to come in, or for tasks to get finished (blocking) + for { + // ping the worker and check session + if !sw.checkSession(ctx) { + return // invalid session / exiting + } + + // session looks good + { + sched.workersLk.Lock() + enabled := worker.enabled + worker.enabled = true + sched.workersLk.Unlock() + + if !enabled { + // go send window requests + break + } + } + + // wait for more tasks to be assigned by the main scheduler or for the worker + // to finish precessing a task + update, ok := sw.waitForUpdates() + if !ok { + return + } + if update { + break + } + } + + // process assigned windows (non-blocking) + sched.workersLk.RLock() + worker.wndLk.Lock() + + sw.workerCompactWindows() + + // send tasks to the worker + sw.processAssignedWindows() + + worker.wndLk.Unlock() + sched.workersLk.RUnlock() + } +} + +func (sw *schedWorker) disable(ctx context.Context) error { + done := make(chan struct{}) + + // request cleanup in the main scheduler goroutine + select { + case sw.sched.workerDisable <- workerDisableReq{ + activeWindows: sw.worker.activeWindows, + wid: sw.wid, + done: func() { + close(done) + }, + }: + case <-ctx.Done(): + return ctx.Err() + case <-sw.sched.closing: + return nil + } + + // wait for cleanup to complete + select { + case <-done: + case <-ctx.Done(): + return ctx.Err() + case <-sw.sched.closing: + return nil + } + + sw.worker.activeWindows = sw.worker.activeWindows[:0] + sw.windowsRequested = 0 + return nil +} + +func (sw *schedWorker) checkSession(ctx context.Context) bool { + for { + sctx, scancel := context.WithTimeout(ctx, stores.HeartbeatInterval/2) + curSes, err := sw.worker.workerRpc.Session(sctx) + scancel() + if err != nil { + // Likely temporary error + + log.Warnw("failed to check worker session", "error", err) + + if err := sw.disable(ctx); err != nil { + log.Warnw("failed to disable worker with session error", "worker", sw.wid, "error", err) + } + + select { + case <-sw.heartbeatTimer.C: + continue + case w := <-sw.scheduledWindows: + // was in flight when initially disabled, return + sw.worker.wndLk.Lock() + sw.worker.activeWindows = append(sw.worker.activeWindows, w) + sw.worker.wndLk.Unlock() + + if err := sw.disable(ctx); err != nil { + log.Warnw("failed to disable worker with session error", "worker", sw.wid, "error", err) + } + case <-sw.sched.closing: + return false + case <-sw.worker.closingMgr: + return false + } + continue + } + + if WorkerID(curSes) != sw.wid { + if curSes != ClosedWorkerID { + // worker restarted + log.Warnw("worker session changed (worker restarted?)", "initial", sw.wid, "current", curSes) + } + + return false + } + + return true + } +} + +func (sw *schedWorker) requestWindows() bool { + for ; sw.windowsRequested < SchedWindows; sw.windowsRequested++ { + select { + case sw.sched.windowRequests <- &schedWindowRequest{ + worker: sw.wid, + done: sw.scheduledWindows, + }: + case <-sw.sched.closing: + return false + case <-sw.worker.closingMgr: + return false + } + } + return true +} + +func (sw *schedWorker) waitForUpdates() (update bool, ok bool) { + select { + case <-sw.heartbeatTimer.C: + return false, true + case w := <-sw.scheduledWindows: + sw.worker.wndLk.Lock() + sw.worker.activeWindows = append(sw.worker.activeWindows, w) + sw.worker.wndLk.Unlock() + return true, true + case <-sw.taskDone: + log.Debugw("task done", "workerid", sw.wid) + return true, true + case <-sw.sched.closing: + case <-sw.worker.closingMgr: + } + + return false, false +} + +func (sw *schedWorker) workerCompactWindows() { + worker := sw.worker + + // move tasks from older windows to newer windows if older windows + // still can fit them + if len(worker.activeWindows) > 1 { + for wi, window := range worker.activeWindows[1:] { + lower := worker.activeWindows[wi] + var moved []int + + for ti, todo := range window.todo { + needRes := ResourceTable[todo.taskType][sw.sched.spt] + if !lower.allocated.canHandleRequest(needRes, sw.wid, "compactWindows", worker.info.Resources) { + continue + } + + moved = append(moved, ti) + lower.todo = append(lower.todo, todo) + lower.allocated.add(worker.info.Resources, needRes) + window.allocated.free(worker.info.Resources, needRes) + } + + if len(moved) > 0 { + newTodo := make([]*workerRequest, 0, len(window.todo)-len(moved)) + for i, t := range window.todo { + if len(moved) > 0 && moved[0] == i { + moved = moved[1:] + continue + } + + newTodo = append(newTodo, t) + } + window.todo = newTodo + } + } + } + + var compacted int + var newWindows []*schedWindow + + for _, window := range worker.activeWindows { + if len(window.todo) == 0 { + compacted++ + continue + } + + newWindows = append(newWindows, window) + } + + worker.activeWindows = newWindows + sw.windowsRequested -= compacted +} + +func (sw *schedWorker) processAssignedWindows() { + worker := sw.worker + +assignLoop: + // process windows in order + for len(worker.activeWindows) > 0 { + firstWindow := worker.activeWindows[0] + + // process tasks within a window, preferring tasks at lower indexes + for len(firstWindow.todo) > 0 { + tidx := -1 + + worker.lk.Lock() + for t, todo := range firstWindow.todo { + needRes := ResourceTable[todo.taskType][sw.sched.spt] + if worker.preparing.canHandleRequest(needRes, sw.wid, "startPreparing", worker.info.Resources) { + tidx = t + break + } + } + worker.lk.Unlock() + + if tidx == -1 { + break assignLoop + } + + todo := firstWindow.todo[tidx] + + log.Debugf("assign worker sector %d", todo.sector.Number) + err := sw.startProcessingTask(sw.taskDone, todo) + + if err != nil { + log.Error("startProcessingTask error: %+v", err) + go todo.respond(xerrors.Errorf("startProcessingTask error: %w", err)) + } + + // Note: we're not freeing window.allocated resources here very much on purpose + copy(firstWindow.todo[tidx:], firstWindow.todo[tidx+1:]) + firstWindow.todo[len(firstWindow.todo)-1] = nil + firstWindow.todo = firstWindow.todo[:len(firstWindow.todo)-1] + } + + copy(worker.activeWindows, worker.activeWindows[1:]) + worker.activeWindows[len(worker.activeWindows)-1] = nil + worker.activeWindows = worker.activeWindows[:len(worker.activeWindows)-1] + + sw.windowsRequested-- + } +} + +func (sw *schedWorker) startProcessingTask(taskDone chan struct{}, req *workerRequest) error { + w, sh := sw.worker, sw.sched + + needRes := ResourceTable[req.taskType][sh.spt] + + w.lk.Lock() + w.preparing.add(w.info.Resources, needRes) + w.lk.Unlock() + + go func() { + // first run the prepare step (e.g. fetching sector data from other worker) + err := req.prepare(req.ctx, sh.workTracker.worker(sw.wid, w.workerRpc)) + sh.workersLk.Lock() + + if err != nil { + w.lk.Lock() + w.preparing.free(w.info.Resources, needRes) + w.lk.Unlock() + sh.workersLk.Unlock() + + select { + case taskDone <- struct{}{}: + case <-sh.closing: + log.Warnf("scheduler closed while sending response (prepare error: %+v)", err) + } + + select { + case req.ret <- workerResponse{err: err}: + case <-req.ctx.Done(): + log.Warnf("request got cancelled before we could respond (prepare error: %+v)", err) + case <-sh.closing: + log.Warnf("scheduler closed while sending response (prepare error: %+v)", err) + } + return + } + + // wait (if needed) for resources in the 'active' window + err = w.active.withResources(sw.wid, w.info.Resources, needRes, &sh.workersLk, func() error { + w.lk.Lock() + w.preparing.free(w.info.Resources, needRes) + w.lk.Unlock() + sh.workersLk.Unlock() + defer sh.workersLk.Lock() // we MUST return locked from this function + + select { + case taskDone <- struct{}{}: + case <-sh.closing: + } + + // Do the work! + err = req.work(req.ctx, sh.workTracker.worker(sw.wid, w.workerRpc)) + + select { + case req.ret <- workerResponse{err: err}: + case <-req.ctx.Done(): + log.Warnf("request got cancelled before we could respond") + case <-sh.closing: + log.Warnf("scheduler closed while sending response") + } + + return nil + }) + + sh.workersLk.Unlock() + + // This error should always be nil, since nothing is setting it, but just to be safe: + if err != nil { + log.Errorf("error executing worker (withResources): %+v", err) + } + }() + + return nil +} + +func (sh *scheduler) workerCleanup(wid WorkerID, w *workerHandle) { + select { + case <-w.closingMgr: + default: + close(w.closingMgr) + } + + sh.workersLk.Unlock() + select { + case <-w.closedMgr: + case <-time.After(time.Second): + log.Errorf("timeout closing worker manager goroutine %d", wid) + } + sh.workersLk.Lock() + + if !w.cleanupStarted { + w.cleanupStarted = true + + newWindows := make([]*schedWindowRequest, 0, len(sh.openWindows)) + for _, window := range sh.openWindows { + if window.worker != wid { + newWindows = append(newWindows, window) + } + } + sh.openWindows = newWindows + + log.Debugf("worker %d dropped", wid) + } +} diff --git a/extern/sector-storage/selector_alloc.go b/extern/sector-storage/selector_alloc.go index b891383fb..14724fbe8 100644 --- a/extern/sector-storage/selector_alloc.go +++ b/extern/sector-storage/selector_alloc.go @@ -9,15 +9,16 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) type allocSelector struct { index stores.SectorIndex - alloc stores.SectorFileType - ptype stores.PathType + alloc storiface.SectorFileType + ptype storiface.PathType } -func newAllocSelector(index stores.SectorIndex, alloc stores.SectorFileType, ptype stores.PathType) *allocSelector { +func newAllocSelector(index stores.SectorIndex, alloc storiface.SectorFileType, ptype storiface.PathType) *allocSelector { return &allocSelector{ index: index, alloc: alloc, @@ -26,7 +27,7 @@ func newAllocSelector(index stores.SectorIndex, alloc stores.SectorFileType, pty } func (s *allocSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, whnd *workerHandle) (bool, error) { - tasks, err := whnd.w.TaskTypes(ctx) + tasks, err := whnd.workerRpc.TaskTypes(ctx) if err != nil { return false, xerrors.Errorf("getting supported worker task types: %w", err) } @@ -34,7 +35,7 @@ func (s *allocSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi return false, nil } - paths, err := whnd.w.Paths(ctx) + paths, err := whnd.workerRpc.Paths(ctx) if err != nil { return false, xerrors.Errorf("getting worker paths: %w", err) } @@ -44,7 +45,12 @@ func (s *allocSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi have[path.ID] = struct{}{} } - best, err := s.index.StorageBestAlloc(ctx, s.alloc, spt, s.ptype) + ssize, err := spt.SectorSize() + if err != nil { + return false, xerrors.Errorf("getting sector size: %w", err) + } + + best, err := s.index.StorageBestAlloc(ctx, s.alloc, ssize, s.ptype) if err != nil { return false, xerrors.Errorf("finding best alloc storage: %w", err) } diff --git a/extern/sector-storage/selector_existing.go b/extern/sector-storage/selector_existing.go index fb161f085..0e3a41aeb 100644 --- a/extern/sector-storage/selector_existing.go +++ b/extern/sector-storage/selector_existing.go @@ -9,16 +9,17 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) type existingSelector struct { index stores.SectorIndex sector abi.SectorID - alloc stores.SectorFileType + alloc storiface.SectorFileType allowFetch bool } -func newExistingSelector(index stores.SectorIndex, sector abi.SectorID, alloc stores.SectorFileType, allowFetch bool) *existingSelector { +func newExistingSelector(index stores.SectorIndex, sector abi.SectorID, alloc storiface.SectorFileType, allowFetch bool) *existingSelector { return &existingSelector{ index: index, sector: sector, @@ -28,7 +29,7 @@ func newExistingSelector(index stores.SectorIndex, sector abi.SectorID, alloc st } func (s *existingSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, whnd *workerHandle) (bool, error) { - tasks, err := whnd.w.TaskTypes(ctx) + tasks, err := whnd.workerRpc.TaskTypes(ctx) if err != nil { return false, xerrors.Errorf("getting supported worker task types: %w", err) } @@ -36,7 +37,7 @@ func (s *existingSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt return false, nil } - paths, err := whnd.w.Paths(ctx) + paths, err := whnd.workerRpc.Paths(ctx) if err != nil { return false, xerrors.Errorf("getting worker paths: %w", err) } @@ -46,7 +47,12 @@ func (s *existingSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt have[path.ID] = struct{}{} } - best, err := s.index.StorageFindSector(ctx, s.sector, s.alloc, spt, s.allowFetch) + ssize, err := spt.SectorSize() + if err != nil { + return false, xerrors.Errorf("getting sector size: %w", err) + } + + best, err := s.index.StorageFindSector(ctx, s.sector, s.alloc, ssize, s.allowFetch) if err != nil { return false, xerrors.Errorf("finding best storage: %w", err) } diff --git a/extern/sector-storage/selector_task.go b/extern/sector-storage/selector_task.go index 807b53103..ffed40d68 100644 --- a/extern/sector-storage/selector_task.go +++ b/extern/sector-storage/selector_task.go @@ -20,7 +20,7 @@ func newTaskSelector() *taskSelector { } func (s *taskSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, whnd *workerHandle) (bool, error) { - tasks, err := whnd.w.TaskTypes(ctx) + tasks, err := whnd.workerRpc.TaskTypes(ctx) if err != nil { return false, xerrors.Errorf("getting supported worker task types: %w", err) } @@ -30,11 +30,11 @@ func (s *taskSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi. } func (s *taskSelector) Cmp(ctx context.Context, _ sealtasks.TaskType, a, b *workerHandle) (bool, error) { - atasks, err := a.w.TaskTypes(ctx) + atasks, err := a.workerRpc.TaskTypes(ctx) if err != nil { return false, xerrors.Errorf("getting supported worker task types: %w", err) } - btasks, err := b.w.TaskTypes(ctx) + btasks, err := b.workerRpc.TaskTypes(ctx) if err != nil { return false, xerrors.Errorf("getting supported worker task types: %w", err) } diff --git a/extern/sector-storage/stats.go b/extern/sector-storage/stats.go index 7f95e3bc3..bae60b426 100644 --- a/extern/sector-storage/stats.go +++ b/extern/sector-storage/stats.go @@ -1,18 +1,24 @@ package sectorstorage import ( + "time" + + "github.com/google/uuid" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) -func (m *Manager) WorkerStats() map[uint64]storiface.WorkerStats { +func (m *Manager) WorkerStats() map[uuid.UUID]storiface.WorkerStats { m.sched.workersLk.RLock() defer m.sched.workersLk.RUnlock() - out := map[uint64]storiface.WorkerStats{} + out := map[uuid.UUID]storiface.WorkerStats{} for id, handle := range m.sched.workers { - out[uint64(id)] = storiface.WorkerStats{ - Info: handle.info, + out[uuid.UUID(id)] = storiface.WorkerStats{ + Info: handle.info, + Enabled: handle.enabled, + MemUsedMin: handle.active.memUsedMin, MemUsedMax: handle.active.memUsedMax, GpuUsed: handle.active.gpuUsed, @@ -23,20 +29,23 @@ func (m *Manager) WorkerStats() map[uint64]storiface.WorkerStats { return out } -func (m *Manager) WorkerJobs() map[uint64][]storiface.WorkerJob { - m.sched.workersLk.RLock() - defer m.sched.workersLk.RUnlock() +func (m *Manager) WorkerJobs() map[uuid.UUID][]storiface.WorkerJob { + out := map[uuid.UUID][]storiface.WorkerJob{} + calls := map[storiface.CallID]struct{}{} - out := map[uint64][]storiface.WorkerJob{} + for _, t := range m.sched.workTracker.Running() { + out[uuid.UUID(t.worker)] = append(out[uuid.UUID(t.worker)], t.job) + calls[t.job.ID] = struct{}{} + } + + m.sched.workersLk.RLock() for id, handle := range m.sched.workers { - out[uint64(id)] = handle.wt.Running() - handle.wndLk.Lock() for wi, window := range handle.activeWindows { for _, request := range window.todo { - out[uint64(id)] = append(out[uint64(id)], storiface.WorkerJob{ - ID: 0, + out[uuid.UUID(id)] = append(out[uuid.UUID(id)], storiface.WorkerJob{ + ID: storiface.UndefCall, Sector: request.sector, Task: request.taskType, RunWait: wi + 1, @@ -47,5 +56,25 @@ func (m *Manager) WorkerJobs() map[uint64][]storiface.WorkerJob { handle.wndLk.Unlock() } + m.sched.workersLk.RUnlock() + + m.workLk.Lock() + defer m.workLk.Unlock() + + for id, work := range m.callToWork { + _, found := calls[id] + if found { + continue + } + + out[uuid.UUID{}] = append(out[uuid.UUID{}], storiface.WorkerJob{ + ID: id, + Sector: id.Sector, + Task: work.Method, + RunWait: -1, + Start: time.Time{}, + }) + } + return out } diff --git a/extern/sector-storage/stores/http_handler.go b/extern/sector-storage/stores/http_handler.go index 97af6e769..2237bd407 100644 --- a/extern/sector-storage/stores/http_handler.go +++ b/extern/sector-storage/stores/http_handler.go @@ -10,6 +10,7 @@ import ( logging "github.com/ipfs/go-log/v2" "golang.org/x/xerrors" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" "github.com/filecoin-project/lotus/extern/sector-storage/tarutil" ) @@ -55,16 +56,16 @@ func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Requ log.Infof("SERVE GET %s", r.URL) vars := mux.Vars(r) - id, err := ParseSectorID(vars["id"]) + id, err := storiface.ParseSectorID(vars["id"]) if err != nil { - log.Error("%+v", err) + log.Errorf("%+v", err) w.WriteHeader(500) return } ft, err := ftFromString(vars["type"]) if err != nil { - log.Error("%+v", err) + log.Errorf("%+v", err) w.WriteHeader(500) return } @@ -72,16 +73,16 @@ func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Requ // The caller has a lock on this sector already, no need to get one here // passing 0 spt because we don't allocate anything - paths, _, err := handler.Local.AcquireSector(r.Context(), id, 0, ft, FTNone, PathStorage, AcquireMove) + paths, _, err := handler.Local.AcquireSector(r.Context(), id, 0, ft, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) if err != nil { - log.Error("%+v", err) + log.Errorf("%+v", err) w.WriteHeader(500) return } // TODO: reserve local storage here - path := PathByType(paths, ft) + path := storiface.PathByType(paths, ft) if path == "" { log.Error("acquired path was empty") w.WriteHeader(500) @@ -90,7 +91,7 @@ func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Requ stat, err := os.Stat(path) if err != nil { - log.Error("%+v", err) + log.Errorf("%+v", err) w.WriteHeader(500) return } @@ -104,14 +105,14 @@ func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Requ w.Header().Set("Content-Type", "application/octet-stream") } if err != nil { - log.Error("%+v", err) + log.Errorf("%+v", err) w.WriteHeader(500) return } w.WriteHeader(200) if _, err := io.Copy(w, rd); err != nil { // TODO: default 32k buf may be too small - log.Error("%+v", err) + log.Errorf("%+v", err) return } } @@ -120,35 +121,35 @@ func (handler *FetchHandler) remoteDeleteSector(w http.ResponseWriter, r *http.R log.Infof("SERVE DELETE %s", r.URL) vars := mux.Vars(r) - id, err := ParseSectorID(vars["id"]) + id, err := storiface.ParseSectorID(vars["id"]) if err != nil { - log.Error("%+v", err) + log.Errorf("%+v", err) w.WriteHeader(500) return } ft, err := ftFromString(vars["type"]) if err != nil { - log.Error("%+v", err) + log.Errorf("%+v", err) w.WriteHeader(500) return } if err := handler.Remove(r.Context(), id, ft, false); err != nil { - log.Error("%+v", err) + log.Errorf("%+v", err) w.WriteHeader(500) return } } -func ftFromString(t string) (SectorFileType, error) { +func ftFromString(t string) (storiface.SectorFileType, error) { switch t { - case FTUnsealed.String(): - return FTUnsealed, nil - case FTSealed.String(): - return FTSealed, nil - case FTCache.String(): - return FTCache, nil + case storiface.FTUnsealed.String(): + return storiface.FTUnsealed, nil + case storiface.FTSealed.String(): + return storiface.FTSealed, nil + case storiface.FTCache.String(): + return storiface.FTCache, nil default: return 0, xerrors.Errorf("unknown sector file type: '%s'", t) } diff --git a/extern/sector-storage/stores/index.go b/extern/sector-storage/stores/index.go index e2bd7e4ee..acd799ab7 100644 --- a/extern/sector-storage/stores/index.go +++ b/extern/sector-storage/stores/index.go @@ -10,10 +10,11 @@ import ( "golang.org/x/xerrors" - "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" - "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) var HeartbeatInterval = 10 * time.Second @@ -53,20 +54,20 @@ type SectorIndex interface { // part of storage-miner api StorageInfo(context.Context, ID) (StorageInfo, error) StorageReportHealth(context.Context, ID, HealthReport) error - StorageDeclareSector(ctx context.Context, storageID ID, s abi.SectorID, ft SectorFileType, primary bool) error - StorageDropSector(ctx context.Context, storageID ID, s abi.SectorID, ft SectorFileType) error - StorageFindSector(ctx context.Context, sector abi.SectorID, ft SectorFileType, spt abi.RegisteredSealProof, allowFetch bool) ([]SectorStorageInfo, error) + StorageDeclareSector(ctx context.Context, storageID ID, s abi.SectorID, ft storiface.SectorFileType, primary bool) error + StorageDropSector(ctx context.Context, storageID ID, s abi.SectorID, ft storiface.SectorFileType) error + StorageFindSector(ctx context.Context, sector abi.SectorID, ft storiface.SectorFileType, ssize abi.SectorSize, allowFetch bool) ([]SectorStorageInfo, error) - StorageBestAlloc(ctx context.Context, allocate SectorFileType, spt abi.RegisteredSealProof, pathType PathType) ([]StorageInfo, error) + StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType) ([]StorageInfo, error) // atomically acquire locks on all sector file types. close ctx to unlock - StorageLock(ctx context.Context, sector abi.SectorID, read SectorFileType, write SectorFileType) error - StorageTryLock(ctx context.Context, sector abi.SectorID, read SectorFileType, write SectorFileType) (bool, error) + StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error + StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) } type Decl struct { abi.SectorID - SectorFileType + storiface.SectorFileType } type declMeta struct { @@ -104,10 +105,10 @@ func (i *Index) StorageList(ctx context.Context) (map[ID][]Decl, error) { i.lk.RLock() defer i.lk.RUnlock() - byID := map[ID]map[abi.SectorID]SectorFileType{} + byID := map[ID]map[abi.SectorID]storiface.SectorFileType{} for id := range i.stores { - byID[id] = map[abi.SectorID]SectorFileType{} + byID[id] = map[abi.SectorID]storiface.SectorFileType{} } for decl, ids := range i.sectors { for _, id := range ids { @@ -180,12 +181,12 @@ func (i *Index) StorageReportHealth(ctx context.Context, id ID, report HealthRep return nil } -func (i *Index) StorageDeclareSector(ctx context.Context, storageID ID, s abi.SectorID, ft SectorFileType, primary bool) error { +func (i *Index) StorageDeclareSector(ctx context.Context, storageID ID, s abi.SectorID, ft storiface.SectorFileType, primary bool) error { i.lk.Lock() defer i.lk.Unlock() loop: - for _, fileType := range PathTypes { + for _, fileType := range storiface.PathTypes { if fileType&ft == 0 { continue } @@ -212,11 +213,11 @@ loop: return nil } -func (i *Index) StorageDropSector(ctx context.Context, storageID ID, s abi.SectorID, ft SectorFileType) error { +func (i *Index) StorageDropSector(ctx context.Context, storageID ID, s abi.SectorID, ft storiface.SectorFileType) error { i.lk.Lock() defer i.lk.Unlock() - for _, fileType := range PathTypes { + for _, fileType := range storiface.PathTypes { if fileType&ft == 0 { continue } @@ -246,14 +247,14 @@ func (i *Index) StorageDropSector(ctx context.Context, storageID ID, s abi.Secto return nil } -func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft SectorFileType, spt abi.RegisteredSealProof, allowFetch bool) ([]SectorStorageInfo, error) { +func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft storiface.SectorFileType, ssize abi.SectorSize, allowFetch bool) ([]SectorStorageInfo, error) { i.lk.RLock() defer i.lk.RUnlock() storageIDs := map[ID]uint64{} isprimary := map[ID]bool{} - for _, pathType := range PathTypes { + for _, pathType := range storiface.PathTypes { if ft&pathType == 0 { continue } @@ -280,7 +281,7 @@ func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft Sector return nil, xerrors.Errorf("failed to parse url: %w", err) } - rl.Path = gopath.Join(rl.Path, ft.String(), SectorName(s)) + rl.Path = gopath.Join(rl.Path, ft.String(), storiface.SectorName(s)) urls[k] = rl.String() } @@ -297,7 +298,7 @@ func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft Sector } if allowFetch { - spaceReq, err := ft.SealSpaceUse(spt) + spaceReq, err := ft.SealSpaceUse(ssize) if err != nil { return nil, xerrors.Errorf("estimating required space: %w", err) } @@ -333,7 +334,7 @@ func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft Sector return nil, xerrors.Errorf("failed to parse url: %w", err) } - rl.Path = gopath.Join(rl.Path, ft.String(), SectorName(s)) + rl.Path = gopath.Join(rl.Path, ft.String(), storiface.SectorName(s)) urls[k] = rl.String() } @@ -365,22 +366,22 @@ func (i *Index) StorageInfo(ctx context.Context, id ID) (StorageInfo, error) { return *si.info, nil } -func (i *Index) StorageBestAlloc(ctx context.Context, allocate SectorFileType, spt abi.RegisteredSealProof, pathType PathType) ([]StorageInfo, error) { +func (i *Index) StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType) ([]StorageInfo, error) { i.lk.RLock() defer i.lk.RUnlock() var candidates []storageEntry - spaceReq, err := allocate.SealSpaceUse(spt) + spaceReq, err := allocate.SealSpaceUse(ssize) if err != nil { return nil, xerrors.Errorf("estimating required space: %w", err) } for _, p := range i.stores { - if (pathType == PathSealing) && !p.info.CanSeal { + if (pathType == storiface.PathSealing) && !p.info.CanSeal { continue } - if (pathType == PathStorage) && !p.info.CanStore { + if (pathType == storiface.PathStorage) && !p.info.CanStore { continue } @@ -421,7 +422,7 @@ func (i *Index) StorageBestAlloc(ctx context.Context, allocate SectorFileType, s return out, nil } -func (i *Index) FindSector(id abi.SectorID, typ SectorFileType) ([]ID, error) { +func (i *Index) FindSector(id abi.SectorID, typ storiface.SectorFileType) ([]ID, error) { i.lk.RLock() defer i.lk.RUnlock() diff --git a/extern/sector-storage/stores/index_locks.go b/extern/sector-storage/stores/index_locks.go index 32c963a41..3a5ff940e 100644 --- a/extern/sector-storage/stores/index_locks.go +++ b/extern/sector-storage/stores/index_locks.go @@ -7,18 +7,20 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) type sectorLock struct { cond *ctxCond - r [FileTypes]uint - w SectorFileType + r [storiface.FileTypes]uint + w storiface.SectorFileType refs uint // access with indexLocks.lk } -func (l *sectorLock) canLock(read SectorFileType, write SectorFileType) bool { +func (l *sectorLock) canLock(read storiface.SectorFileType, write storiface.SectorFileType) bool { for i, b := range write.All() { if b && l.r[i] > 0 { return false @@ -29,7 +31,7 @@ func (l *sectorLock) canLock(read SectorFileType, write SectorFileType) bool { return l.w&read == 0 && l.w&write == 0 } -func (l *sectorLock) tryLock(read SectorFileType, write SectorFileType) bool { +func (l *sectorLock) tryLock(read storiface.SectorFileType, write storiface.SectorFileType) bool { if !l.canLock(read, write) { return false } @@ -45,16 +47,16 @@ func (l *sectorLock) tryLock(read SectorFileType, write SectorFileType) bool { return true } -type lockFn func(l *sectorLock, ctx context.Context, read SectorFileType, write SectorFileType) (bool, error) +type lockFn func(l *sectorLock, ctx context.Context, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) -func (l *sectorLock) tryLockSafe(ctx context.Context, read SectorFileType, write SectorFileType) (bool, error) { +func (l *sectorLock) tryLockSafe(ctx context.Context, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) { l.cond.L.Lock() defer l.cond.L.Unlock() return l.tryLock(read, write), nil } -func (l *sectorLock) lock(ctx context.Context, read SectorFileType, write SectorFileType) (bool, error) { +func (l *sectorLock) lock(ctx context.Context, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) { l.cond.L.Lock() defer l.cond.L.Unlock() @@ -67,7 +69,7 @@ func (l *sectorLock) lock(ctx context.Context, read SectorFileType, write Sector return true, nil } -func (l *sectorLock) unlock(read SectorFileType, write SectorFileType) { +func (l *sectorLock) unlock(read storiface.SectorFileType, write storiface.SectorFileType) { l.cond.L.Lock() defer l.cond.L.Unlock() @@ -88,12 +90,12 @@ type indexLocks struct { locks map[abi.SectorID]*sectorLock } -func (i *indexLocks) lockWith(ctx context.Context, lockFn lockFn, sector abi.SectorID, read SectorFileType, write SectorFileType) (bool, error) { +func (i *indexLocks) lockWith(ctx context.Context, lockFn lockFn, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) { if read|write == 0 { return false, nil } - if read|write > (1< (1< %s: %w", sid, t, meta.ID, err) - } - } + if err := st.declareSectors(ctx, p, meta.ID, meta.CanStore); err != nil { + return err } st.paths[meta.ID] = out @@ -236,6 +210,83 @@ func (st *Local) open(ctx context.Context) error { return nil } +func (st *Local) Redeclare(ctx context.Context) error { + st.localLk.Lock() + defer st.localLk.Unlock() + + for id, p := range st.paths { + mb, err := ioutil.ReadFile(filepath.Join(p.local, MetaFile)) + if err != nil { + return xerrors.Errorf("reading storage metadata for %s: %w", p.local, err) + } + + var meta LocalStorageMeta + if err := json.Unmarshal(mb, &meta); err != nil { + return xerrors.Errorf("unmarshalling storage metadata for %s: %w", p.local, err) + } + + fst, err := p.stat(st.localStorage) + if err != nil { + return err + } + + if id != meta.ID { + log.Errorf("storage path ID changed: %s; %s -> %s", p.local, id, meta.ID) + continue + } + + err = st.index.StorageAttach(ctx, StorageInfo{ + ID: id, + URLs: st.urls, + Weight: meta.Weight, + CanSeal: meta.CanSeal, + CanStore: meta.CanStore, + }, fst) + if err != nil { + return xerrors.Errorf("redeclaring storage in index: %w", err) + } + + if err := st.declareSectors(ctx, p.local, meta.ID, meta.CanStore); err != nil { + return xerrors.Errorf("redeclaring sectors: %w", err) + } + } + + return nil +} + +func (st *Local) declareSectors(ctx context.Context, p string, id ID, primary bool) error { + for _, t := range storiface.PathTypes { + ents, err := ioutil.ReadDir(filepath.Join(p, t.String())) + if err != nil { + if os.IsNotExist(err) { + if err := os.MkdirAll(filepath.Join(p, t.String()), 0755); err != nil { // nolint + return xerrors.Errorf("openPath mkdir '%s': %w", filepath.Join(p, t.String()), err) + } + + continue + } + return xerrors.Errorf("listing %s: %w", filepath.Join(p, t.String()), err) + } + + for _, ent := range ents { + if ent.Name() == FetchTempSubdir { + continue + } + + sid, err := storiface.ParseSectorID(ent.Name()) + if err != nil { + return xerrors.Errorf("parse sector id %s: %w", ent.Name(), err) + } + + if err := st.index.StorageDeclareSector(ctx, id, sid, t, primary); err != nil { + return xerrors.Errorf("declare sector %d(t:%d) -> %s: %w", sid, t, id, err) + } + } + } + + return nil +} + func (st *Local) reportHealth(ctx context.Context) { // randomize interval by ~10% interval := (HeartbeatInterval*100_000 + time.Duration(rand.Int63n(10_000))) / 100_000 @@ -263,18 +314,13 @@ func (st *Local) reportHealth(ctx context.Context) { for id, report := range toReport { if err := st.index.StorageReportHealth(ctx, id, report); err != nil { - log.Warnf("error reporting storage health for %s: %+v", id, report) + log.Warnf("error reporting storage health for %s (%+v): %+v", id, report, err) } } } } -func (st *Local) Reserve(ctx context.Context, sid abi.SectorID, spt abi.RegisteredSealProof, ft SectorFileType, storageIDs SectorPaths, overheadTab map[SectorFileType]int) (func(), error) { - ssize, err := spt.SectorSize() - if err != nil { - return nil, xerrors.Errorf("getting sector size: %w", err) - } - +func (st *Local) Reserve(ctx context.Context, sid abi.SectorID, ssize abi.SectorSize, ft storiface.SectorFileType, storageIDs storiface.SectorPaths, overheadTab map[storiface.SectorFileType]int) (func(), error) { st.localLk.Lock() done := func() {} @@ -284,12 +330,12 @@ func (st *Local) Reserve(ctx context.Context, sid abi.SectorID, spt abi.Register deferredDone() }() - for _, fileType := range PathTypes { + for _, fileType := range storiface.PathTypes { if fileType&ft == 0 { continue } - id := ID(PathByType(storageIDs, fileType)) + id := ID(storiface.PathByType(storageIDs, fileType)) p, ok := st.paths[id] if !ok { @@ -301,7 +347,7 @@ func (st *Local) Reserve(ctx context.Context, sid abi.SectorID, spt abi.Register return nil, xerrors.Errorf("getting local storage stat: %w", err) } - overhead := int64(overheadTab[fileType]) * int64(ssize) / FSOverheadDen + overhead := int64(overheadTab[fileType]) * int64(ssize) / storiface.FSOverheadDen if stat.Available < overhead { return nil, xerrors.Errorf("can't reserve %d bytes in '%s' (id:%s), only %d available", overhead, p.local, id, stat.Available) @@ -324,23 +370,23 @@ func (st *Local) Reserve(ctx context.Context, sid abi.SectorID, spt abi.Register return done, nil } -func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, spt abi.RegisteredSealProof, existing SectorFileType, allocate SectorFileType, pathType PathType, op AcquireMode) (SectorPaths, SectorPaths, error) { +func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, ssize abi.SectorSize, existing storiface.SectorFileType, allocate storiface.SectorFileType, pathType storiface.PathType, op storiface.AcquireMode) (storiface.SectorPaths, storiface.SectorPaths, error) { if existing|allocate != existing^allocate { - return SectorPaths{}, SectorPaths{}, xerrors.New("can't both find and allocate a sector") + return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.New("can't both find and allocate a sector") } st.localLk.RLock() defer st.localLk.RUnlock() - var out SectorPaths - var storageIDs SectorPaths + var out storiface.SectorPaths + var storageIDs storiface.SectorPaths - for _, fileType := range PathTypes { + for _, fileType := range storiface.PathTypes { if fileType&existing == 0 { continue } - si, err := st.index.StorageFindSector(ctx, sid, fileType, spt, false) + si, err := st.index.StorageFindSector(ctx, sid, fileType, ssize, false) if err != nil { log.Warnf("finding existing sector %d(t:%d) failed: %+v", sid, fileType, err) continue @@ -357,22 +403,22 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, spt abi.Re } spath := p.sectorPath(sid, fileType) - SetPathByType(&out, fileType, spath) - SetPathByType(&storageIDs, fileType, string(info.ID)) + storiface.SetPathByType(&out, fileType, spath) + storiface.SetPathByType(&storageIDs, fileType, string(info.ID)) existing ^= fileType break } } - for _, fileType := range PathTypes { + for _, fileType := range storiface.PathTypes { if fileType&allocate == 0 { continue } - sis, err := st.index.StorageBestAlloc(ctx, fileType, spt, pathType) + sis, err := st.index.StorageBestAlloc(ctx, fileType, ssize, pathType) if err != nil { - return SectorPaths{}, SectorPaths{}, xerrors.Errorf("finding best storage for allocating : %w", err) + return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.Errorf("finding best storage for allocating : %w", err) } var best string @@ -388,11 +434,11 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, spt abi.Re continue } - if (pathType == PathSealing) && !si.CanSeal { + if (pathType == storiface.PathSealing) && !si.CanSeal { continue } - if (pathType == PathStorage) && !si.CanStore { + if (pathType == storiface.PathStorage) && !si.CanStore { continue } @@ -404,11 +450,11 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, spt abi.Re } if best == "" { - return SectorPaths{}, SectorPaths{}, xerrors.Errorf("couldn't find a suitable path for a sector") + return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.Errorf("couldn't find a suitable path for a sector") } - SetPathByType(&out, fileType, best) - SetPathByType(&storageIDs, fileType, string(bestID)) + storiface.SetPathByType(&out, fileType, best) + storiface.SetPathByType(&storageIDs, fileType, string(bestID)) allocate ^= fileType } @@ -442,7 +488,7 @@ func (st *Local) Local(ctx context.Context) ([]StoragePath, error) { return out, nil } -func (st *Local) Remove(ctx context.Context, sid abi.SectorID, typ SectorFileType, force bool) error { +func (st *Local) Remove(ctx context.Context, sid abi.SectorID, typ storiface.SectorFileType, force bool) error { if bits.OnesCount(uint(typ)) != 1 { return xerrors.New("delete expects one file type") } @@ -465,7 +511,7 @@ func (st *Local) Remove(ctx context.Context, sid abi.SectorID, typ SectorFileTyp return nil } -func (st *Local) RemoveCopies(ctx context.Context, sid abi.SectorID, typ SectorFileType) error { +func (st *Local) RemoveCopies(ctx context.Context, sid abi.SectorID, typ storiface.SectorFileType) error { if bits.OnesCount(uint(typ)) != 1 { return xerrors.New("delete expects one file type") } @@ -501,7 +547,7 @@ func (st *Local) RemoveCopies(ctx context.Context, sid abi.SectorID, typ SectorF return nil } -func (st *Local) removeSector(ctx context.Context, sid abi.SectorID, typ SectorFileType, storage ID) error { +func (st *Local) removeSector(ctx context.Context, sid abi.SectorID, typ storiface.SectorFileType, storage ID) error { p, ok := st.paths[storage] if !ok { return nil @@ -525,28 +571,28 @@ func (st *Local) removeSector(ctx context.Context, sid abi.SectorID, typ SectorF return nil } -func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, spt abi.RegisteredSealProof, types SectorFileType) error { - dest, destIds, err := st.AcquireSector(ctx, s, spt, FTNone, types, PathStorage, AcquireMove) +func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, ssize abi.SectorSize, types storiface.SectorFileType) error { + dest, destIds, err := st.AcquireSector(ctx, s, ssize, storiface.FTNone, types, storiface.PathStorage, storiface.AcquireMove) if err != nil { return xerrors.Errorf("acquire dest storage: %w", err) } - src, srcIds, err := st.AcquireSector(ctx, s, spt, types, FTNone, PathStorage, AcquireMove) + src, srcIds, err := st.AcquireSector(ctx, s, ssize, types, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) if err != nil { return xerrors.Errorf("acquire src storage: %w", err) } - for _, fileType := range PathTypes { + for _, fileType := range storiface.PathTypes { if fileType&types == 0 { continue } - sst, err := st.index.StorageInfo(ctx, ID(PathByType(srcIds, fileType))) + sst, err := st.index.StorageInfo(ctx, ID(storiface.PathByType(srcIds, fileType))) if err != nil { return xerrors.Errorf("failed to get source storage info: %w", err) } - dst, err := st.index.StorageInfo(ctx, ID(PathByType(destIds, fileType))) + dst, err := st.index.StorageInfo(ctx, ID(storiface.PathByType(destIds, fileType))) if err != nil { return xerrors.Errorf("failed to get source storage info: %w", err) } @@ -563,17 +609,17 @@ func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, spt abi.Regist log.Debugf("moving %v(%d) to storage: %s(se:%t; st:%t) -> %s(se:%t; st:%t)", s, fileType, sst.ID, sst.CanSeal, sst.CanStore, dst.ID, dst.CanSeal, dst.CanStore) - if err := st.index.StorageDropSector(ctx, ID(PathByType(srcIds, fileType)), s, fileType); err != nil { + if err := st.index.StorageDropSector(ctx, ID(storiface.PathByType(srcIds, fileType)), s, fileType); err != nil { return xerrors.Errorf("dropping source sector from index: %w", err) } - if err := move(PathByType(src, fileType), PathByType(dest, fileType)); err != nil { + if err := move(storiface.PathByType(src, fileType), storiface.PathByType(dest, fileType)); err != nil { // TODO: attempt some recovery (check if src is still there, re-declare) return xerrors.Errorf("moving sector %v(%d): %w", s, fileType, err) } - if err := st.index.StorageDeclareSector(ctx, ID(PathByType(destIds, fileType)), s, fileType, true); err != nil { - return xerrors.Errorf("declare sector %d(t:%d) -> %s: %w", s, fileType, ID(PathByType(destIds, fileType)), err) + if err := st.index.StorageDeclareSector(ctx, ID(storiface.PathByType(destIds, fileType)), s, fileType, true); err != nil { + return xerrors.Errorf("declare sector %d(t:%d) -> %s: %w", s, fileType, ID(storiface.PathByType(destIds, fileType)), err) } } diff --git a/extern/sector-storage/stores/remote.go b/extern/sector-storage/stores/remote.go index b9d241b5f..37dde910d 100644 --- a/extern/sector-storage/stores/remote.go +++ b/extern/sector-storage/stores/remote.go @@ -38,7 +38,7 @@ type Remote struct { fetching map[abi.SectorID]chan struct{} } -func (r *Remote) RemoveCopies(ctx context.Context, s abi.SectorID, types SectorFileType) error { +func (r *Remote) RemoveCopies(ctx context.Context, s abi.SectorID, types storiface.SectorFileType) error { // TODO: do this on remotes too // (not that we really need to do that since it's always called by the // worker which pulled the copy) @@ -58,9 +58,9 @@ func NewRemote(local *Local, index SectorIndex, auth http.Header, fetchLimit int } } -func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, spt abi.RegisteredSealProof, existing SectorFileType, allocate SectorFileType, pathType PathType, op AcquireMode) (SectorPaths, SectorPaths, error) { +func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.SectorSize, existing storiface.SectorFileType, allocate storiface.SectorFileType, pathType storiface.PathType, op storiface.AcquireMode) (storiface.SectorPaths, storiface.SectorPaths, error) { if existing|allocate != existing^allocate { - return SectorPaths{}, SectorPaths{}, xerrors.New("can't both find and allocate a sector") + return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.New("can't both find and allocate a sector") } for { @@ -79,7 +79,7 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, spt abi.Regi case <-c: continue case <-ctx.Done(): - return SectorPaths{}, SectorPaths{}, ctx.Err() + return storiface.SectorPaths{}, storiface.SectorPaths{}, ctx.Err() } } @@ -90,64 +90,64 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, spt abi.Regi r.fetchLk.Unlock() }() - paths, stores, err := r.local.AcquireSector(ctx, s, spt, existing, allocate, pathType, op) + paths, stores, err := r.local.AcquireSector(ctx, s, ssize, existing, allocate, pathType, op) if err != nil { - return SectorPaths{}, SectorPaths{}, xerrors.Errorf("local acquire error: %w", err) + return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.Errorf("local acquire error: %w", err) } - var toFetch SectorFileType - for _, fileType := range PathTypes { + var toFetch storiface.SectorFileType + for _, fileType := range storiface.PathTypes { if fileType&existing == 0 { continue } - if PathByType(paths, fileType) == "" { + if storiface.PathByType(paths, fileType) == "" { toFetch |= fileType } } - apaths, ids, err := r.local.AcquireSector(ctx, s, spt, FTNone, toFetch, pathType, op) + apaths, ids, err := r.local.AcquireSector(ctx, s, ssize, storiface.FTNone, toFetch, pathType, op) if err != nil { - return SectorPaths{}, SectorPaths{}, xerrors.Errorf("allocate local sector for fetching: %w", err) + return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.Errorf("allocate local sector for fetching: %w", err) } - odt := FSOverheadSeal - if pathType == PathStorage { - odt = FsOverheadFinalized + odt := storiface.FSOverheadSeal + if pathType == storiface.PathStorage { + odt = storiface.FsOverheadFinalized } - releaseStorage, err := r.local.Reserve(ctx, s, spt, toFetch, ids, odt) + releaseStorage, err := r.local.Reserve(ctx, s, ssize, toFetch, ids, odt) if err != nil { - return SectorPaths{}, SectorPaths{}, xerrors.Errorf("reserving storage space: %w", err) + return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.Errorf("reserving storage space: %w", err) } defer releaseStorage() - for _, fileType := range PathTypes { + for _, fileType := range storiface.PathTypes { if fileType&existing == 0 { continue } - if PathByType(paths, fileType) != "" { + if storiface.PathByType(paths, fileType) != "" { continue } - dest := PathByType(apaths, fileType) - storageID := PathByType(ids, fileType) + dest := storiface.PathByType(apaths, fileType) + storageID := storiface.PathByType(ids, fileType) url, err := r.acquireFromRemote(ctx, s, fileType, dest) if err != nil { - return SectorPaths{}, SectorPaths{}, err + return storiface.SectorPaths{}, storiface.SectorPaths{}, err } - SetPathByType(&paths, fileType, dest) - SetPathByType(&stores, fileType, storageID) + storiface.SetPathByType(&paths, fileType, dest) + storiface.SetPathByType(&stores, fileType, storageID) - if err := r.index.StorageDeclareSector(ctx, ID(storageID), s, fileType, op == AcquireMove); err != nil { + if err := r.index.StorageDeclareSector(ctx, ID(storageID), s, fileType, op == storiface.AcquireMove); err != nil { log.Warnf("declaring sector %v in %s failed: %+v", s, storageID, err) continue } - if op == AcquireMove { + if op == storiface.AcquireMove { if err := r.deleteFromRemote(ctx, url); err != nil { log.Warnf("deleting sector %v from %s (delete %s): %+v", s, storageID, url, err) } @@ -169,7 +169,7 @@ func tempFetchDest(spath string, create bool) (string, error) { return filepath.Join(tempdir, b), nil } -func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, fileType SectorFileType, dest string) (string, error) { +func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, fileType storiface.SectorFileType, dest string) (string, error) { si, err := r.index.StorageFindSector(ctx, s, fileType, 0, false) if err != nil { return "", err @@ -281,17 +281,17 @@ func (r *Remote) fetch(ctx context.Context, url, outname string) error { } } -func (r *Remote) MoveStorage(ctx context.Context, s abi.SectorID, spt abi.RegisteredSealProof, types SectorFileType) error { +func (r *Remote) MoveStorage(ctx context.Context, s abi.SectorID, ssize abi.SectorSize, types storiface.SectorFileType) error { // Make sure we have the data local - _, _, err := r.AcquireSector(ctx, s, spt, types, FTNone, PathStorage, AcquireMove) + _, _, err := r.AcquireSector(ctx, s, ssize, types, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) if err != nil { return xerrors.Errorf("acquire src storage (remote): %w", err) } - return r.local.MoveStorage(ctx, s, spt, types) + return r.local.MoveStorage(ctx, s, ssize, types) } -func (r *Remote) Remove(ctx context.Context, sid abi.SectorID, typ SectorFileType, force bool) error { +func (r *Remote) Remove(ctx context.Context, sid abi.SectorID, typ storiface.SectorFileType, force bool) error { if bits.OnesCount(uint(typ)) != 1 { return xerrors.New("delete expects one file type") } diff --git a/extern/sector-storage/storiface/cbor_gen.go b/extern/sector-storage/storiface/cbor_gen.go new file mode 100644 index 000000000..0efbc125b --- /dev/null +++ b/extern/sector-storage/storiface/cbor_gen.go @@ -0,0 +1,142 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package storiface + +import ( + "fmt" + "io" + + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf + +func (t *CallID) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{162}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Sector (abi.SectorID) (struct) + if len("Sector") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Sector\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Sector"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Sector")); err != nil { + return err + } + + if err := t.Sector.MarshalCBOR(w); err != nil { + return err + } + + // t.ID (uuid.UUID) (array) + if len("ID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ID")); err != nil { + return err + } + + if len(t.ID) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.ID was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.ID))); err != nil { + return err + } + + if _, err := w.Write(t.ID[:]); err != nil { + return err + } + return nil +} + +func (t *CallID) UnmarshalCBOR(r io.Reader) error { + *t = CallID{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("CallID: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Sector (abi.SectorID) (struct) + case "Sector": + + { + + if err := t.Sector.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Sector: %w", err) + } + + } + // t.ID (uuid.UUID) (array) + case "ID": + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.ID: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra != 16 { + return fmt.Errorf("expected array to have 16 elements") + } + + t.ID = [16]uint8{} + + if _, err := io.ReadFull(br, t.ID[:]); err != nil { + return err + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } + } + + return nil +} diff --git a/extern/sector-storage/stores/filetype.go b/extern/sector-storage/storiface/filetype.go similarity index 91% rename from extern/sector-storage/stores/filetype.go rename to extern/sector-storage/storiface/filetype.go index 90cc1d160..3f7c7455e 100644 --- a/extern/sector-storage/stores/filetype.go +++ b/extern/sector-storage/storiface/filetype.go @@ -1,4 +1,4 @@ -package stores +package storiface import ( "fmt" @@ -16,6 +16,8 @@ const ( FileTypes = iota ) +var PathTypes = []SectorFileType{FTUnsealed, FTSealed, FTCache} + const ( FTNone SectorFileType = 0 ) @@ -53,12 +55,7 @@ func (t SectorFileType) Has(singleType SectorFileType) bool { return t&singleType == singleType } -func (t SectorFileType) SealSpaceUse(spt abi.RegisteredSealProof) (uint64, error) { - ssize, err := spt.SectorSize() - if err != nil { - return 0, xerrors.Errorf("getting sector size: %w", err) - } - +func (t SectorFileType) SealSpaceUse(ssize abi.SectorSize) (uint64, error) { var need uint64 for _, pathType := range PathTypes { if !t.Has(pathType) { diff --git a/extern/sector-storage/storiface/storage.go b/extern/sector-storage/storiface/storage.go new file mode 100644 index 000000000..e836002d5 --- /dev/null +++ b/extern/sector-storage/storiface/storage.go @@ -0,0 +1,15 @@ +package storiface + +type PathType string + +const ( + PathStorage PathType = "storage" + PathSealing PathType = "sealing" +) + +type AcquireMode string + +const ( + AcquireMove AcquireMode = "move" + AcquireCopy AcquireMode = "copy" +) diff --git a/extern/sector-storage/storiface/worker.go b/extern/sector-storage/storiface/worker.go index 25e3175bd..bbc9ca554 100644 --- a/extern/sector-storage/storiface/worker.go +++ b/extern/sector-storage/storiface/worker.go @@ -1,9 +1,17 @@ package storiface import ( + "context" + "fmt" + "io" "time" + "github.com/google/uuid" + "github.com/ipfs/go-cid" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-storage/storage" + "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" ) @@ -24,7 +32,8 @@ type WorkerResources struct { } type WorkerStats struct { - Info WorkerInfo + Info WorkerInfo + Enabled bool MemUsedMin uint64 MemUsedMax uint64 @@ -33,10 +42,51 @@ type WorkerStats struct { } type WorkerJob struct { - ID uint64 + ID CallID Sector abi.SectorID Task sealtasks.TaskType - RunWait int // 0 - running, 1+ - assigned + RunWait int // -1 - ret-wait, 0 - running, 1+ - assigned Start time.Time } + +type CallID struct { + Sector abi.SectorID + ID uuid.UUID +} + +func (c CallID) String() string { + return fmt.Sprintf("%d-%d-%s", c.Sector.Miner, c.Sector.Number, c.ID) +} + +var _ fmt.Stringer = &CallID{} + +var UndefCall CallID + +type WorkerCalls interface { + AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (CallID, error) + SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (CallID, error) + SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (CallID, error) + SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (CallID, error) + SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (CallID, error) + FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (CallID, error) + ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) (CallID, error) + MoveStorage(ctx context.Context, sector abi.SectorID, types SectorFileType) (CallID, error) + UnsealPiece(context.Context, abi.SectorID, UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (CallID, error) + ReadPiece(context.Context, io.Writer, abi.SectorID, UnpaddedByteIndex, abi.UnpaddedPieceSize) (CallID, error) + Fetch(context.Context, abi.SectorID, SectorFileType, PathType, AcquireMode) (CallID, error) +} + +type WorkerReturn interface { + ReturnAddPiece(ctx context.Context, callID CallID, pi abi.PieceInfo, err string) error + ReturnSealPreCommit1(ctx context.Context, callID CallID, p1o storage.PreCommit1Out, err string) error + ReturnSealPreCommit2(ctx context.Context, callID CallID, sealed storage.SectorCids, err string) error + ReturnSealCommit1(ctx context.Context, callID CallID, out storage.Commit1Out, err string) error + ReturnSealCommit2(ctx context.Context, callID CallID, proof storage.Proof, err string) error + ReturnFinalizeSector(ctx context.Context, callID CallID, err string) error + ReturnReleaseUnsealed(ctx context.Context, callID CallID, err string) error + ReturnMoveStorage(ctx context.Context, callID CallID, err string) error + ReturnUnsealPiece(ctx context.Context, callID CallID, err string) error + ReturnReadPiece(ctx context.Context, callID CallID, ok bool, err string) error + ReturnFetch(ctx context.Context, callID CallID, err string) error +} diff --git a/extern/sector-storage/teststorage_test.go b/extern/sector-storage/teststorage_test.go new file mode 100644 index 000000000..0c8a240a3 --- /dev/null +++ b/extern/sector-storage/teststorage_test.go @@ -0,0 +1,81 @@ +package sectorstorage + +import ( + "context" + "io" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-actors/actors/runtime/proof" + "github.com/filecoin-project/specs-storage/storage" + + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" +) + +type apres struct { + pi abi.PieceInfo + err error +} + +type testExec struct { + apch chan chan apres +} + +func (t *testExec) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.SectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, error) { + panic("implement me") +} + +func (t *testExec) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.SectorInfo, randomness abi.PoStRandomness) (proof []proof.PoStProof, skipped []abi.SectorID, err error) { + panic("implement me") +} + +func (t *testExec) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) { + panic("implement me") +} + +func (t *testExec) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storage.SectorCids, error) { + panic("implement me") +} + +func (t *testExec) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) { + panic("implement me") +} + +func (t *testExec) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storage.Proof, error) { + panic("implement me") +} + +func (t *testExec) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error { + panic("implement me") +} + +func (t *testExec) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error { + panic("implement me") +} + +func (t *testExec) Remove(ctx context.Context, sector abi.SectorID) error { + panic("implement me") +} + +func (t *testExec) NewSector(ctx context.Context, sector abi.SectorID) error { + panic("implement me") +} + +func (t *testExec) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) { + resp := make(chan apres) + t.apch <- resp + ar := <-resp + return ar.pi, ar.err +} + +func (t *testExec) UnsealPiece(ctx context.Context, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error { + panic("implement me") +} + +func (t *testExec) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { + panic("implement me") +} + +var _ ffiwrapper.Storage = &testExec{} diff --git a/extern/sector-storage/testworker_test.go b/extern/sector-storage/testworker_test.go index 8f27401f0..d04afb0cc 100644 --- a/extern/sector-storage/testworker_test.go +++ b/extern/sector-storage/testworker_test.go @@ -2,12 +2,11 @@ package sectorstorage import ( "context" - "io" - - "github.com/ipfs/go-cid" + "sync" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/specs-storage/storage" + "github.com/google/uuid" "github.com/filecoin-project/lotus/extern/sector-storage/mock" "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" @@ -18,11 +17,20 @@ import ( type testWorker struct { acceptTasks map[sealtasks.TaskType]struct{} lstor *stores.Local + ret storiface.WorkerReturn mockSeal *mock.SectorMgr + + pc1s int + pc1lk sync.Mutex + pc1wait *sync.WaitGroup + + session uuid.UUID + + Worker } -func newTestWorker(wcfg WorkerConfig, lstor *stores.Local) *testWorker { +func newTestWorker(wcfg WorkerConfig, lstor *stores.Local, ret storiface.WorkerReturn) *testWorker { ssize, err := wcfg.SealProof.SectorSize() if err != nil { panic(err) @@ -36,61 +44,58 @@ func newTestWorker(wcfg WorkerConfig, lstor *stores.Local) *testWorker { return &testWorker{ acceptTasks: acceptTasks, lstor: lstor, + ret: ret, mockSeal: mock.NewMockSectorMgr(ssize, nil), + + session: uuid.New(), } } -func (t *testWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) { - return t.mockSeal.SealPreCommit1(ctx, sector, ticket, pieces) +func (t *testWorker) asyncCall(sector abi.SectorID, work func(ci storiface.CallID)) (storiface.CallID, error) { + ci := storiface.CallID{ + Sector: sector, + ID: uuid.New(), + } + + go work(ci) + + return ci, nil } -func (t *testWorker) NewSector(ctx context.Context, sector abi.SectorID) error { - panic("implement me") +func (t *testWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) { + return t.asyncCall(sector, func(ci storiface.CallID) { + p, err := t.mockSeal.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData) + if err := t.ret.ReturnAddPiece(ctx, ci, p, errstr(err)); err != nil { + log.Error(err) + } + }) } -func (t *testWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) error { - panic("implement me") +func (t *testWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) { + return t.asyncCall(sector, func(ci storiface.CallID) { + t.pc1s++ + + if t.pc1wait != nil { + t.pc1wait.Done() + } + + t.pc1lk.Lock() + defer t.pc1lk.Unlock() + + p1o, err := t.mockSeal.SealPreCommit1(ctx, sector, ticket, pieces) + if err := t.ret.ReturnSealPreCommit1(ctx, ci, p1o, errstr(err)); err != nil { + log.Error(err) + } + }) } -func (t *testWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { - panic("implement me") -} - -func (t *testWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) { - return t.mockSeal.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData) -} - -func (t *testWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storage.SectorCids, error) { - panic("implement me") -} - -func (t *testWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) { - panic("implement me") -} - -func (t *testWorker) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storage.Proof, error) { - panic("implement me") -} - -func (t *testWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error { - panic("implement me") -} - -func (t *testWorker) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error { - panic("implement me") -} - -func (t *testWorker) Remove(ctx context.Context, sector abi.SectorID) error { - panic("implement me") -} - -func (t *testWorker) MoveStorage(ctx context.Context, sector abi.SectorID, types stores.SectorFileType) error { - panic("implement me") -} - -func (t *testWorker) Fetch(ctx context.Context, id abi.SectorID, fileType stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) error { - return nil +func (t *testWorker) Fetch(ctx context.Context, sector abi.SectorID, fileType storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) { + return t.asyncCall(sector, func(ci storiface.CallID) { + if err := t.ret.ReturnFetch(ctx, ci, ""); err != nil { + log.Error(err) + } + }) } func (t *testWorker) TaskTypes(ctx context.Context) (map[sealtasks.TaskType]struct{}, error) { @@ -116,8 +121,8 @@ func (t *testWorker) Info(ctx context.Context) (storiface.WorkerInfo, error) { }, nil } -func (t *testWorker) Closing(ctx context.Context) (<-chan struct{}, error) { - return ctx.Done(), nil +func (t *testWorker) Session(context.Context) (uuid.UUID, error) { + return t.session, nil } func (t *testWorker) Close() error { diff --git a/extern/sector-storage/work_tracker.go b/extern/sector-storage/work_tracker.go deleted file mode 100644 index 5dc12802c..000000000 --- a/extern/sector-storage/work_tracker.go +++ /dev/null @@ -1,129 +0,0 @@ -package sectorstorage - -import ( - "context" - "io" - "sync" - "time" - - "github.com/ipfs/go-cid" - - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/specs-storage/storage" - - "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" - "github.com/filecoin-project/lotus/extern/sector-storage/stores" - "github.com/filecoin-project/lotus/extern/sector-storage/storiface" -) - -type workTracker struct { - lk sync.Mutex - - ctr uint64 - running map[uint64]storiface.WorkerJob - - // TODO: done, aggregate stats, queue stats, scheduler feedback -} - -func (wt *workTracker) track(sid abi.SectorID, task sealtasks.TaskType) func() { - wt.lk.Lock() - defer wt.lk.Unlock() - - id := wt.ctr - wt.ctr++ - - wt.running[id] = storiface.WorkerJob{ - ID: id, - Sector: sid, - Task: task, - Start: time.Now(), - } - - return func() { - wt.lk.Lock() - defer wt.lk.Unlock() - - delete(wt.running, id) - } -} - -func (wt *workTracker) worker(w Worker) Worker { - return &trackedWorker{ - Worker: w, - tracker: wt, - } -} - -func (wt *workTracker) Running() []storiface.WorkerJob { - wt.lk.Lock() - defer wt.lk.Unlock() - - out := make([]storiface.WorkerJob, 0, len(wt.running)) - for _, job := range wt.running { - out = append(out, job) - } - - return out -} - -type trackedWorker struct { - Worker - - tracker *workTracker -} - -func (t *trackedWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) { - defer t.tracker.track(sector, sealtasks.TTPreCommit1)() - - return t.Worker.SealPreCommit1(ctx, sector, ticket, pieces) -} - -func (t *trackedWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storage.SectorCids, error) { - defer t.tracker.track(sector, sealtasks.TTPreCommit2)() - - return t.Worker.SealPreCommit2(ctx, sector, pc1o) -} - -func (t *trackedWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) { - defer t.tracker.track(sector, sealtasks.TTCommit1)() - - return t.Worker.SealCommit1(ctx, sector, ticket, seed, pieces, cids) -} - -func (t *trackedWorker) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storage.Proof, error) { - defer t.tracker.track(sector, sealtasks.TTCommit2)() - - return t.Worker.SealCommit2(ctx, sector, c1o) -} - -func (t *trackedWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error { - defer t.tracker.track(sector, sealtasks.TTFinalize)() - - return t.Worker.FinalizeSector(ctx, sector, keepUnsealed) -} - -func (t *trackedWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) { - defer t.tracker.track(sector, sealtasks.TTAddPiece)() - - return t.Worker.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData) -} - -func (t *trackedWorker) Fetch(ctx context.Context, s abi.SectorID, ft stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) error { - defer t.tracker.track(s, sealtasks.TTFetch)() - - return t.Worker.Fetch(ctx, s, ft, ptype, am) -} - -func (t *trackedWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) error { - defer t.tracker.track(id, sealtasks.TTUnseal)() - - return t.Worker.UnsealPiece(ctx, id, index, size, randomness, cid) -} - -func (t *trackedWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { - defer t.tracker.track(id, sealtasks.TTReadUnsealed)() - - return t.Worker.ReadPiece(ctx, writer, id, index, size) -} - -var _ Worker = &trackedWorker{} diff --git a/extern/sector-storage/worker_calltracker.go b/extern/sector-storage/worker_calltracker.go new file mode 100644 index 000000000..6f03c72cc --- /dev/null +++ b/extern/sector-storage/worker_calltracker.go @@ -0,0 +1,117 @@ +package sectorstorage + +import ( + "fmt" + "io" + + "github.com/filecoin-project/go-statestore" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" +) + +type workerCallTracker struct { + st *statestore.StateStore // by CallID +} + +type CallState uint64 + +const ( + CallStarted CallState = iota + CallDone + // returned -> remove +) + +type Call struct { + ID storiface.CallID + RetType ReturnType + + State CallState + + Result *ManyBytes // json bytes +} + +func (wt *workerCallTracker) onStart(ci storiface.CallID, rt ReturnType) error { + return wt.st.Begin(ci, &Call{ + ID: ci, + RetType: rt, + State: CallStarted, + }) +} + +func (wt *workerCallTracker) onDone(ci storiface.CallID, ret []byte) error { + st := wt.st.Get(ci) + return st.Mutate(func(cs *Call) error { + cs.State = CallDone + cs.Result = &ManyBytes{ret} + return nil + }) +} + +func (wt *workerCallTracker) onReturned(ci storiface.CallID) error { + st := wt.st.Get(ci) + return st.End() +} + +func (wt *workerCallTracker) unfinished() ([]Call, error) { + var out []Call + return out, wt.st.List(&out) +} + +// Ideally this would be a tag on the struct field telling cbor-gen to enforce higher max-len +type ManyBytes struct { + b []byte +} + +const many = 100 << 20 + +func (t *ManyBytes) MarshalCBOR(w io.Writer) error { + if t == nil { + t = &ManyBytes{} + } + + if len(t.b) > many { + return xerrors.Errorf("byte array in field t.Result was too long") + } + + scratch := make([]byte, 9) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.b))); err != nil { + return err + } + + if _, err := w.Write(t.b[:]); err != nil { + return err + } + return nil +} + +func (t *ManyBytes) UnmarshalCBOR(r io.Reader) error { + *t = ManyBytes{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 9) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > many { + return fmt.Errorf("byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.b = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.b[:]); err != nil { + return err + } + + return nil +} diff --git a/extern/sector-storage/worker_local.go b/extern/sector-storage/worker_local.go new file mode 100644 index 000000000..ae2b325ca --- /dev/null +++ b/extern/sector-storage/worker_local.go @@ -0,0 +1,556 @@ +package sectorstorage + +import ( + "context" + "encoding/json" + "io" + "os" + "reflect" + "runtime" + "sync" + "sync/atomic" + "time" + + "github.com/elastic/go-sysinfo" + "github.com/google/uuid" + "github.com/hashicorp/go-multierror" + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + ffi "github.com/filecoin-project/filecoin-ffi" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-statestore" + storage2 "github.com/filecoin-project/specs-storage/storage" + + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" + "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" +) + +var pathTypes = []storiface.SectorFileType{storiface.FTUnsealed, storiface.FTSealed, storiface.FTCache} + +type WorkerConfig struct { + SealProof abi.RegisteredSealProof + TaskTypes []sealtasks.TaskType + NoSwap bool +} + +// used do provide custom proofs impl (mostly used in testing) +type ExecutorFunc func() (ffiwrapper.Storage, error) + +type LocalWorker struct { + scfg *ffiwrapper.Config + storage stores.Store + localStore *stores.Local + sindex stores.SectorIndex + ret storiface.WorkerReturn + executor ExecutorFunc + noSwap bool + + ct *workerCallTracker + acceptTasks map[sealtasks.TaskType]struct{} + running sync.WaitGroup + + session uuid.UUID + testDisable int64 + closing chan struct{} +} + +func newLocalWorker(executor ExecutorFunc, wcfg WorkerConfig, store stores.Store, local *stores.Local, sindex stores.SectorIndex, ret storiface.WorkerReturn, cst *statestore.StateStore) *LocalWorker { + acceptTasks := map[sealtasks.TaskType]struct{}{} + for _, taskType := range wcfg.TaskTypes { + acceptTasks[taskType] = struct{}{} + } + + w := &LocalWorker{ + scfg: &ffiwrapper.Config{ + SealProofType: wcfg.SealProof, + }, + storage: store, + localStore: local, + sindex: sindex, + ret: ret, + + ct: &workerCallTracker{ + st: cst, + }, + acceptTasks: acceptTasks, + executor: executor, + noSwap: wcfg.NoSwap, + + session: uuid.New(), + closing: make(chan struct{}), + } + + if w.executor == nil { + w.executor = w.ffiExec + } + + unfinished, err := w.ct.unfinished() + if err != nil { + log.Errorf("reading unfinished tasks: %+v", err) + return w + } + + go func() { + for _, call := range unfinished { + err := xerrors.Errorf("worker restarted") + + // TODO: Handle restarting PC1 once support is merged + + if doReturn(context.TODO(), call.RetType, call.ID, ret, nil, err) { + if err := w.ct.onReturned(call.ID); err != nil { + log.Errorf("marking call as returned failed: %s: %+v", call.RetType, err) + } + } + } + }() + + return w +} + +func NewLocalWorker(wcfg WorkerConfig, store stores.Store, local *stores.Local, sindex stores.SectorIndex, ret storiface.WorkerReturn, cst *statestore.StateStore) *LocalWorker { + return newLocalWorker(nil, wcfg, store, local, sindex, ret, cst) +} + +type localWorkerPathProvider struct { + w *LocalWorker + op storiface.AcquireMode +} + +func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi.SectorID, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType) (storiface.SectorPaths, func(), error) { + ssize, err := l.w.scfg.SealProofType.SectorSize() + if err != nil { + return storiface.SectorPaths{}, nil, err + } + + paths, storageIDs, err := l.w.storage.AcquireSector(ctx, sector, ssize, existing, allocate, sealing, l.op) + if err != nil { + return storiface.SectorPaths{}, nil, err + } + + releaseStorage, err := l.w.localStore.Reserve(ctx, sector, ssize, allocate, storageIDs, storiface.FSOverheadSeal) + if err != nil { + return storiface.SectorPaths{}, nil, xerrors.Errorf("reserving storage space: %w", err) + } + + log.Debugf("acquired sector %d (e:%d; a:%d): %v", sector, existing, allocate, paths) + + return paths, func() { + releaseStorage() + + for _, fileType := range pathTypes { + if fileType&allocate == 0 { + continue + } + + sid := storiface.PathByType(storageIDs, fileType) + + if err := l.w.sindex.StorageDeclareSector(ctx, stores.ID(sid), sector, fileType, l.op == storiface.AcquireMove); err != nil { + log.Errorf("declare sector error: %+v", err) + } + } + }, nil +} + +func (l *LocalWorker) ffiExec() (ffiwrapper.Storage, error) { + return ffiwrapper.New(&localWorkerPathProvider{w: l}, l.scfg) +} + +type ReturnType string + +// in: func(WorkerReturn, context.Context, CallID, err string) +// in: func(WorkerReturn, context.Context, CallID, ret T, err string) +func rfunc(in interface{}) func(context.Context, storiface.CallID, storiface.WorkerReturn, interface{}, error) error { + rf := reflect.ValueOf(in) + ft := rf.Type() + withRet := ft.NumIn() == 5 + + return func(ctx context.Context, ci storiface.CallID, wr storiface.WorkerReturn, i interface{}, err error) error { + rctx := reflect.ValueOf(ctx) + rwr := reflect.ValueOf(wr) + rerr := reflect.ValueOf(errstr(err)) + rci := reflect.ValueOf(ci) + + var ro []reflect.Value + + if withRet { + ret := reflect.ValueOf(i) + if i == nil { + ret = reflect.Zero(rf.Type().In(3)) + } + + ro = rf.Call([]reflect.Value{rwr, rctx, rci, ret, rerr}) + } else { + ro = rf.Call([]reflect.Value{rwr, rctx, rci, rerr}) + } + + if !ro[0].IsNil() { + return ro[0].Interface().(error) + } + + return nil + } +} + +var returnFunc = map[ReturnType]func(context.Context, storiface.CallID, storiface.WorkerReturn, interface{}, error) error{ + "AddPiece": rfunc(storiface.WorkerReturn.ReturnAddPiece), + "SealPreCommit1": rfunc(storiface.WorkerReturn.ReturnSealPreCommit1), + "SealPreCommit2": rfunc(storiface.WorkerReturn.ReturnSealPreCommit2), + "SealCommit1": rfunc(storiface.WorkerReturn.ReturnSealCommit1), + "SealCommit2": rfunc(storiface.WorkerReturn.ReturnSealCommit2), + "FinalizeSector": rfunc(storiface.WorkerReturn.ReturnFinalizeSector), + "ReleaseUnsealed": rfunc(storiface.WorkerReturn.ReturnReleaseUnsealed), + "MoveStorage": rfunc(storiface.WorkerReturn.ReturnMoveStorage), + "UnsealPiece": rfunc(storiface.WorkerReturn.ReturnUnsealPiece), + "ReadPiece": rfunc(storiface.WorkerReturn.ReturnReadPiece), + "Fetch": rfunc(storiface.WorkerReturn.ReturnFetch), +} + +func (l *LocalWorker) asyncCall(ctx context.Context, sector abi.SectorID, rt ReturnType, work func(ctx context.Context, ci storiface.CallID) (interface{}, error)) (storiface.CallID, error) { + ci := storiface.CallID{ + Sector: sector, + ID: uuid.New(), + } + + if err := l.ct.onStart(ci, rt); err != nil { + log.Errorf("tracking call (start): %+v", err) + } + + l.running.Add(1) + + go func() { + defer l.running.Done() + + ctx := &wctx{ + vals: ctx, + closing: l.closing, + } + + res, err := work(ctx, ci) + + if err != nil { + rb, err := json.Marshal(res) + if err != nil { + log.Errorf("tracking call (marshaling results): %+v", err) + } else { + if err := l.ct.onDone(ci, rb); err != nil { + log.Errorf("tracking call (done): %+v", err) + } + } + } + + if doReturn(ctx, rt, ci, l.ret, res, err) { + if err := l.ct.onReturned(ci); err != nil { + log.Errorf("tracking call (done): %+v", err) + } + } + }() + + return ci, nil +} + +// doReturn tries to send the result to manager, returns true if successful +func doReturn(ctx context.Context, rt ReturnType, ci storiface.CallID, ret storiface.WorkerReturn, res interface{}, rerr error) bool { + for { + err := returnFunc[rt](ctx, ci, ret, res, rerr) + if err == nil { + break + } + + log.Errorf("return error, will retry in 5s: %s: %+v", rt, err) + select { + case <-time.After(5 * time.Second): + case <-ctx.Done(): + log.Errorf("failed to return results: %s", ctx.Err()) + + // fine to just return, worker is most likely shutting down, and + // we didn't mark the result as returned yet, so we'll try to + // re-submit it on restart + return false + } + } + + return true +} + +func errstr(err error) string { + if err != nil { + return err.Error() + } + + return "" +} + +func (l *LocalWorker) NewSector(ctx context.Context, sector abi.SectorID) error { + sb, err := l.executor() + if err != nil { + return err + } + + return sb.NewSector(ctx, sector) +} + +func (l *LocalWorker) AddPiece(ctx context.Context, sector abi.SectorID, epcs []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (storiface.CallID, error) { + sb, err := l.executor() + if err != nil { + return storiface.UndefCall, err + } + + return l.asyncCall(ctx, sector, "AddPiece", func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + return sb.AddPiece(ctx, sector, epcs, sz, r) + }) +} + +func (l *LocalWorker) Fetch(ctx context.Context, sector abi.SectorID, fileType storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) { + return l.asyncCall(ctx, sector, "Fetch", func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + _, done, err := (&localWorkerPathProvider{w: l, op: am}).AcquireSector(ctx, sector, fileType, storiface.FTNone, ptype) + if err == nil { + done() + } + + return nil, err + }) +} + +func (l *LocalWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) { + return l.asyncCall(ctx, sector, "SealPreCommit1", func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + + { + // cleanup previous failed attempts if they exist + if err := l.storage.Remove(ctx, sector, storiface.FTSealed, true); err != nil { + return nil, xerrors.Errorf("cleaning up sealed data: %w", err) + } + + if err := l.storage.Remove(ctx, sector, storiface.FTCache, true); err != nil { + return nil, xerrors.Errorf("cleaning up cache data: %w", err) + } + } + + sb, err := l.executor() + if err != nil { + return nil, err + } + + return sb.SealPreCommit1(ctx, sector, ticket, pieces) + }) +} + +func (l *LocalWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage2.PreCommit1Out) (storiface.CallID, error) { + sb, err := l.executor() + if err != nil { + return storiface.UndefCall, err + } + + return l.asyncCall(ctx, sector, "SealPreCommit2", func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + return sb.SealPreCommit2(ctx, sector, phase1Out) + }) +} + +func (l *LocalWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage2.SectorCids) (storiface.CallID, error) { + sb, err := l.executor() + if err != nil { + return storiface.UndefCall, err + } + + return l.asyncCall(ctx, sector, "SealCommit1", func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + return sb.SealCommit1(ctx, sector, ticket, seed, pieces, cids) + }) +} + +func (l *LocalWorker) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage2.Commit1Out) (storiface.CallID, error) { + sb, err := l.executor() + if err != nil { + return storiface.UndefCall, err + } + + return l.asyncCall(ctx, sector, "SealCommit2", func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + return sb.SealCommit2(ctx, sector, phase1Out) + }) +} + +func (l *LocalWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage2.Range) (storiface.CallID, error) { + sb, err := l.executor() + if err != nil { + return storiface.UndefCall, err + } + + return l.asyncCall(ctx, sector, "FinalizeSector", func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + if err := sb.FinalizeSector(ctx, sector, keepUnsealed); err != nil { + return nil, xerrors.Errorf("finalizing sector: %w", err) + } + + if len(keepUnsealed) == 0 { + if err := l.storage.Remove(ctx, sector, storiface.FTUnsealed, true); err != nil { + return nil, xerrors.Errorf("removing unsealed data: %w", err) + } + } + + return nil, err + }) +} + +func (l *LocalWorker) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage2.Range) (storiface.CallID, error) { + return storiface.UndefCall, xerrors.Errorf("implement me") +} + +func (l *LocalWorker) Remove(ctx context.Context, sector abi.SectorID) error { + var err error + + if rerr := l.storage.Remove(ctx, sector, storiface.FTSealed, true); rerr != nil { + err = multierror.Append(err, xerrors.Errorf("removing sector (sealed): %w", rerr)) + } + if rerr := l.storage.Remove(ctx, sector, storiface.FTCache, true); rerr != nil { + err = multierror.Append(err, xerrors.Errorf("removing sector (cache): %w", rerr)) + } + if rerr := l.storage.Remove(ctx, sector, storiface.FTUnsealed, true); rerr != nil { + err = multierror.Append(err, xerrors.Errorf("removing sector (unsealed): %w", rerr)) + } + + return err +} + +func (l *LocalWorker) MoveStorage(ctx context.Context, sector abi.SectorID, types storiface.SectorFileType) (storiface.CallID, error) { + return l.asyncCall(ctx, sector, "MoveStorage", func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + ssize, err := l.scfg.SealProofType.SectorSize() + if err != nil { + return nil, err + } + + return nil, l.storage.MoveStorage(ctx, sector, ssize, types) + }) +} + +func (l *LocalWorker) UnsealPiece(ctx context.Context, sector abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) (storiface.CallID, error) { + sb, err := l.executor() + if err != nil { + return storiface.UndefCall, err + } + + return l.asyncCall(ctx, sector, "UnsealPiece", func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + if err = sb.UnsealPiece(ctx, sector, index, size, randomness, cid); err != nil { + return nil, xerrors.Errorf("unsealing sector: %w", err) + } + + if err = l.storage.RemoveCopies(ctx, sector, storiface.FTSealed); err != nil { + return nil, xerrors.Errorf("removing source data: %w", err) + } + + if err = l.storage.RemoveCopies(ctx, sector, storiface.FTCache); err != nil { + return nil, xerrors.Errorf("removing source data: %w", err) + } + + return nil, nil + }) +} + +func (l *LocalWorker) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) { + sb, err := l.executor() + if err != nil { + return storiface.UndefCall, err + } + + return l.asyncCall(ctx, sector, "ReadPiece", func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + return sb.ReadPiece(ctx, writer, sector, index, size) + }) +} + +func (l *LocalWorker) TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) { + return l.acceptTasks, nil +} + +func (l *LocalWorker) Paths(ctx context.Context) ([]stores.StoragePath, error) { + return l.localStore.Local(ctx) +} + +func (l *LocalWorker) Info(context.Context) (storiface.WorkerInfo, error) { + hostname, err := os.Hostname() // TODO: allow overriding from config + if err != nil { + panic(err) + } + + gpus, err := ffi.GetGPUDevices() + if err != nil { + log.Errorf("getting gpu devices failed: %+v", err) + } + + h, err := sysinfo.Host() + if err != nil { + return storiface.WorkerInfo{}, xerrors.Errorf("getting host info: %w", err) + } + + mem, err := h.Memory() + if err != nil { + return storiface.WorkerInfo{}, xerrors.Errorf("getting memory info: %w", err) + } + + memSwap := mem.VirtualTotal + if l.noSwap { + memSwap = 0 + } + + return storiface.WorkerInfo{ + Hostname: hostname, + Resources: storiface.WorkerResources{ + MemPhysical: mem.Total, + MemSwap: memSwap, + MemReserved: mem.VirtualUsed + mem.Total - mem.Available, // TODO: sub this process + CPUs: uint64(runtime.NumCPU()), + GPUs: gpus, + }, + }, nil +} + +func (l *LocalWorker) Session(ctx context.Context) (uuid.UUID, error) { + if atomic.LoadInt64(&l.testDisable) == 1 { + return uuid.UUID{}, xerrors.Errorf("disabled") + } + + select { + case <-l.closing: + return ClosedWorkerID, nil + default: + return l.session, nil + } +} + +func (l *LocalWorker) Close() error { + close(l.closing) + return nil +} + +// WaitQuiet blocks as long as there are tasks running +func (l *LocalWorker) WaitQuiet() { + l.running.Wait() +} + +type wctx struct { + vals context.Context + closing chan struct{} +} + +func (w *wctx) Deadline() (time.Time, bool) { + return time.Time{}, false +} + +func (w *wctx) Done() <-chan struct{} { + return w.closing +} + +func (w *wctx) Err() error { + select { + case <-w.closing: + return context.Canceled + default: + return nil + } +} + +func (w *wctx) Value(key interface{}) interface{} { + return w.vals.Value(key) +} + +var _ context.Context = &wctx{} + +var _ Worker = &LocalWorker{} diff --git a/extern/sector-storage/worker_tracked.go b/extern/sector-storage/worker_tracked.go new file mode 100644 index 000000000..4a22fcca7 --- /dev/null +++ b/extern/sector-storage/worker_tracked.go @@ -0,0 +1,138 @@ +package sectorstorage + +import ( + "context" + "io" + "sync" + "time" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-storage/storage" + + "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" +) + +type trackedWork struct { + job storiface.WorkerJob + worker WorkerID +} + +type workTracker struct { + lk sync.Mutex + + done map[storiface.CallID]struct{} + running map[storiface.CallID]trackedWork + + // TODO: done, aggregate stats, queue stats, scheduler feedback +} + +func (wt *workTracker) onDone(callID storiface.CallID) { + wt.lk.Lock() + defer wt.lk.Unlock() + + _, ok := wt.running[callID] + if !ok { + wt.done[callID] = struct{}{} + return + } + + delete(wt.running, callID) +} + +func (wt *workTracker) track(wid WorkerID, sid abi.SectorID, task sealtasks.TaskType) func(storiface.CallID, error) (storiface.CallID, error) { + return func(callID storiface.CallID, err error) (storiface.CallID, error) { + if err != nil { + return callID, err + } + + wt.lk.Lock() + defer wt.lk.Unlock() + + _, done := wt.done[callID] + if done { + delete(wt.done, callID) + return callID, err + } + + wt.running[callID] = trackedWork{ + job: storiface.WorkerJob{ + ID: callID, + Sector: sid, + Task: task, + Start: time.Now(), + }, + worker: wid, + } + + return callID, err + } +} + +func (wt *workTracker) worker(wid WorkerID, w Worker) Worker { + return &trackedWorker{ + Worker: w, + wid: wid, + + tracker: wt, + } +} + +func (wt *workTracker) Running() []trackedWork { + wt.lk.Lock() + defer wt.lk.Unlock() + + out := make([]trackedWork, 0, len(wt.running)) + for _, job := range wt.running { + out = append(out, job) + } + + return out +} + +type trackedWorker struct { + Worker + wid WorkerID + + tracker *workTracker +} + +func (t *trackedWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) { + return t.tracker.track(t.wid, sector, sealtasks.TTPreCommit1)(t.Worker.SealPreCommit1(ctx, sector, ticket, pieces)) +} + +func (t *trackedWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storiface.CallID, error) { + return t.tracker.track(t.wid, sector, sealtasks.TTPreCommit2)(t.Worker.SealPreCommit2(ctx, sector, pc1o)) +} + +func (t *trackedWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) { + return t.tracker.track(t.wid, sector, sealtasks.TTCommit1)(t.Worker.SealCommit1(ctx, sector, ticket, seed, pieces, cids)) +} + +func (t *trackedWorker) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storiface.CallID, error) { + return t.tracker.track(t.wid, sector, sealtasks.TTCommit2)(t.Worker.SealCommit2(ctx, sector, c1o)) +} + +func (t *trackedWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (storiface.CallID, error) { + return t.tracker.track(t.wid, sector, sealtasks.TTFinalize)(t.Worker.FinalizeSector(ctx, sector, keepUnsealed)) +} + +func (t *trackedWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) { + return t.tracker.track(t.wid, sector, sealtasks.TTAddPiece)(t.Worker.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData)) +} + +func (t *trackedWorker) Fetch(ctx context.Context, s abi.SectorID, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) { + return t.tracker.track(t.wid, s, sealtasks.TTFetch)(t.Worker.Fetch(ctx, s, ft, ptype, am)) +} + +func (t *trackedWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) (storiface.CallID, error) { + return t.tracker.track(t.wid, id, sealtasks.TTUnseal)(t.Worker.UnsealPiece(ctx, id, index, size, randomness, cid)) +} + +func (t *trackedWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) { + return t.tracker.track(t.wid, id, sealtasks.TTReadUnsealed)(t.Worker.ReadPiece(ctx, writer, id, index, size)) +} + +var _ Worker = &trackedWorker{} diff --git a/extern/storage-sealing/checks.go b/extern/storage-sealing/checks.go index 1010d31b2..ed7a691ef 100644 --- a/extern/storage-sealing/checks.go +++ b/extern/storage-sealing/checks.go @@ -7,7 +7,7 @@ import ( "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/policy" - proof0 "github.com/filecoin-project/specs-actors/actors/runtime/proof" + proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" "golang.org/x/xerrors" @@ -179,7 +179,7 @@ func (m *Sealing) checkCommit(ctx context.Context, si SectorInfo, proof []byte, log.Warn("on-chain sealed CID doesn't match!") } - ok, err := m.verif.VerifySeal(proof0.SealVerifyInfo{ + ok, err := m.verif.VerifySeal(proof2.SealVerifyInfo{ SectorID: m.minerSector(si.SectorNumber), SealedCID: pci.Info.SealedCID, SealProof: spt, diff --git a/extern/storage-sealing/fsm.go b/extern/storage-sealing/fsm.go index 0d2e766fd..3a5931c8b 100644 --- a/extern/storage-sealing/fsm.go +++ b/extern/storage-sealing/fsm.go @@ -45,16 +45,22 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto on(SectorAddPiece{}, WaitDeals), on(SectorStartPacking{}, Packing), ), - Packing: planOne(on(SectorPacked{}, PreCommit1)), + Packing: planOne(on(SectorPacked{}, GetTicket)), + GetTicket: planOne( + on(SectorTicket{}, PreCommit1), + on(SectorCommitFailed{}, CommitFailed), + ), PreCommit1: planOne( on(SectorPreCommit1{}, PreCommit2), on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed), on(SectorDealsExpired{}, DealsExpired), on(SectorInvalidDealIDs{}, RecoverDealIDs), + on(SectorOldTicket{}, GetTicket), ), PreCommit2: planOne( on(SectorPreCommit2{}, PreCommitting), on(SectorSealPreCommit2Failed{}, SealPreCommit2Failed), + on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed), ), PreCommitting: planOne( on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed), @@ -121,6 +127,7 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto on(SectorRetryCommitWait{}, CommitWait), on(SectorDealsExpired{}, DealsExpired), on(SectorInvalidDealIDs{}, RecoverDealIDs), + on(SectorTicketExpired{}, Removing), ), FinalizeFailed: planOne( on(SectorRetryFinalize{}, FinalizeSector), @@ -219,6 +226,9 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta *<- Packing <- incoming committed capacity | | | v + | GetTicket + | | ^ + | v | *<- PreCommit1 <--> SealPreCommit1Failed | | ^ ^^ | | *----------++----\ @@ -267,6 +277,8 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta log.Infof("Waiting for deals %d", state.SectorNumber) case Packing: return m.handlePacking, processed, nil + case GetTicket: + return m.handleGetTicket, processed, nil case PreCommit1: return m.handlePreCommit1, processed, nil case PreCommit2: diff --git a/extern/storage-sealing/fsm_events.go b/extern/storage-sealing/fsm_events.go index 3e597d761..59f5e77e6 100644 --- a/extern/storage-sealing/fsm_events.go +++ b/extern/storage-sealing/fsm_events.go @@ -101,16 +101,26 @@ func (evt SectorPacked) apply(state *SectorInfo) { } } +type SectorTicket struct { + TicketValue abi.SealRandomness + TicketEpoch abi.ChainEpoch +} + +func (evt SectorTicket) apply(state *SectorInfo) { + state.TicketEpoch = evt.TicketEpoch + state.TicketValue = evt.TicketValue +} + +type SectorOldTicket struct{} + +func (evt SectorOldTicket) apply(*SectorInfo) {} + type SectorPreCommit1 struct { PreCommit1Out storage.PreCommit1Out - TicketValue abi.SealRandomness - TicketEpoch abi.ChainEpoch } func (evt SectorPreCommit1) apply(state *SectorInfo) { state.PreCommit1Out = evt.PreCommit1Out - state.TicketEpoch = evt.TicketEpoch - state.TicketValue = evt.TicketValue state.PreCommit2Fails = 0 } @@ -196,6 +206,11 @@ type SectorDealsExpired struct{ error } func (evt SectorDealsExpired) FormatError(xerrors.Printer) (next error) { return evt.error } func (evt SectorDealsExpired) apply(*SectorInfo) {} +type SectorTicketExpired struct{ error } + +func (evt SectorTicketExpired) FormatError(xerrors.Printer) (next error) { return evt.error } +func (evt SectorTicketExpired) apply(*SectorInfo) {} + type SectorCommitted struct { Proof []byte } diff --git a/extern/storage-sealing/fsm_test.go b/extern/storage-sealing/fsm_test.go index 51fd2a37b..5b4541f75 100644 --- a/extern/storage-sealing/fsm_test.go +++ b/extern/storage-sealing/fsm_test.go @@ -44,6 +44,9 @@ func TestHappyPath(t *testing.T) { } m.planSingle(SectorPacked{}) + require.Equal(m.t, m.state.State, GetTicket) + + m.planSingle(SectorTicket{}) require.Equal(m.t, m.state.State, PreCommit1) m.planSingle(SectorPreCommit1{}) @@ -73,7 +76,7 @@ func TestHappyPath(t *testing.T) { m.planSingle(SectorFinalized{}) require.Equal(m.t, m.state.State, Proving) - expected := []SectorState{Packing, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, WaitSeed, Committing, SubmitCommit, CommitWait, FinalizeSector, Proving} + expected := []SectorState{Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, WaitSeed, Committing, SubmitCommit, CommitWait, FinalizeSector, Proving} for i, n := range notif { if n.before.State != expected[i] { t.Fatalf("expected before state: %s, got: %s", expected[i], n.before.State) @@ -98,6 +101,9 @@ func TestSeedRevert(t *testing.T) { } m.planSingle(SectorPacked{}) + require.Equal(m.t, m.state.State, GetTicket) + + m.planSingle(SectorTicket{}) require.Equal(m.t, m.state.State, PreCommit1) m.planSingle(SectorPreCommit1{}) diff --git a/extern/storage-sealing/sealing.go b/extern/storage-sealing/sealing.go index 1ba53661a..d9953eee0 100644 --- a/extern/storage-sealing/sealing.go +++ b/extern/storage-sealing/sealing.go @@ -53,6 +53,7 @@ type SealingAPI interface { StateMinerWorkerAddress(ctx context.Context, maddr address.Address, tok TipSetToken) (address.Address, error) StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error) StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error) + StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, TipSetToken) (bool, error) StateMarketStorageDeal(context.Context, abi.DealID, TipSetToken) (market.DealProposal, error) StateNetworkVersion(ctx context.Context, tok TipSetToken) (network.Version, error) SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error) diff --git a/extern/storage-sealing/sector_state.go b/extern/storage-sealing/sector_state.go index 10b96e504..8b0bff24a 100644 --- a/extern/storage-sealing/sector_state.go +++ b/extern/storage-sealing/sector_state.go @@ -41,6 +41,7 @@ const ( Empty SectorState = "Empty" WaitDeals SectorState = "WaitDeals" // waiting for more pieces (deals) to be added to the sector Packing SectorState = "Packing" // sector not in sealStore, and not on chain + GetTicket SectorState = "GetTicket" // generate ticket PreCommit1 SectorState = "PreCommit1" // do PreCommit1 PreCommit2 SectorState = "PreCommit2" // do PreCommit2 PreCommitting SectorState = "PreCommitting" // on chain pre-commit diff --git a/extern/storage-sealing/states_failed.go b/extern/storage-sealing/states_failed.go index d22830253..b583701ae 100644 --- a/extern/storage-sealing/states_failed.go +++ b/extern/storage-sealing/states_failed.go @@ -170,7 +170,7 @@ func (m *Sealing) handleCommitFailed(ctx statemachine.Context, sector SectorInfo case *ErrExpiredTicket: return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("ticket expired error: %w", err)}) case *ErrBadTicket: - return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("bad ticket: %w", err)}) + return ctx.Send(SectorTicketExpired{xerrors.Errorf("expired ticket: %w", err)}) case *ErrInvalidDeals: log.Warnf("invalid deals in sector %d: %v", sector.SectorNumber, err) return ctx.Send(SectorInvalidDealIDs{Return: RetCommitFailed}) diff --git a/extern/storage-sealing/states_sealing.go b/extern/storage-sealing/states_sealing.go index 96589bcd2..a1aee4cde 100644 --- a/extern/storage-sealing/states_sealing.go +++ b/extern/storage-sealing/states_sealing.go @@ -4,10 +4,7 @@ import ( "bytes" "context" - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/actors/policy" - + "github.com/ipfs/go-cid" "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" @@ -15,11 +12,16 @@ import ( "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/go-statemachine" - "github.com/filecoin-project/specs-actors/actors/builtin" + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" "github.com/filecoin-project/specs-storage/storage" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/policy" ) var DealSectorPriority = 1024 +var MaxTicketAge = abi.ChainEpoch(builtin0.EpochsInDay * 2) func (m *Sealing) handlePacking(ctx statemachine.Context, sector SectorInfo) error { log.Infow("performing filling up rest of the sector...", "sector", sector.SectorNumber) @@ -82,6 +84,33 @@ func (m *Sealing) getTicket(ctx statemachine.Context, sector SectorInfo) (abi.Se return abi.SealRandomness(rand), ticketEpoch, nil } +func (m *Sealing) handleGetTicket(ctx statemachine.Context, sector SectorInfo) error { + ticketValue, ticketEpoch, err := m.getTicket(ctx, sector) + if err != nil { + allocated, aerr := m.api.StateMinerSectorAllocated(ctx.Context(), m.maddr, sector.SectorNumber, nil) + if aerr == nil { + log.Errorf("error checking if sector is allocated: %+v", err) + } + + if allocated { + if sector.CommitMessage != nil { + // Some recovery paths with unfortunate timing lead here + return ctx.Send(SectorCommitFailed{xerrors.Errorf("sector %s is committed but got into the GetTicket state", sector.SectorNumber)}) + } + + log.Errorf("Sector %s precommitted but expired", sector.SectorNumber) + return ctx.Send(SectorRemove{}) + } + + return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("getting ticket failed: %w", err)}) + } + + return ctx.Send(SectorTicket{ + TicketValue: ticketValue, + TicketEpoch: ticketEpoch, + }) +} + func (m *Sealing) handlePreCommit1(ctx statemachine.Context, sector SectorInfo) error { if err := checkPieces(ctx.Context(), m.maddr, sector, m.api); err != nil { // Sanity check state switch err.(type) { @@ -98,21 +127,34 @@ func (m *Sealing) handlePreCommit1(ctx statemachine.Context, sector SectorInfo) } } - log.Infow("performing sector replication...", "sector", sector.SectorNumber) - ticketValue, ticketEpoch, err := m.getTicket(ctx, sector) + tok, height, err := m.api.ChainHead(ctx.Context()) if err != nil { - return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("getting ticket failed: %w", err)}) + log.Errorf("handlePreCommit1: api error, not proceeding: %+v", err) + return nil } - pc1o, err := m.sealer.SealPreCommit1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorNumber), ticketValue, sector.pieceInfos()) + if height-sector.TicketEpoch > MaxTicketAge { + pci, err := m.api.StateSectorPreCommitInfo(ctx.Context(), m.maddr, sector.SectorNumber, tok) + if err != nil { + log.Errorf("getting precommit info: %+v", err) + } + + if pci == nil { + return ctx.Send(SectorOldTicket{}) // go get new ticket + } + + // TODO: allow configuring expected seal durations, if we're here, it's + // pretty unlikely that we'll precommit on time (unless the miner + // process has just restarted and the worker had the result ready) + } + + pc1o, err := m.sealer.SealPreCommit1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorNumber), sector.TicketValue, sector.pieceInfos()) if err != nil { return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("seal pre commit(1) failed: %w", err)}) } return ctx.Send(SectorPreCommit1{ PreCommit1Out: pc1o, - TicketValue: ticketValue, - TicketEpoch: ticketEpoch, }) } @@ -122,6 +164,10 @@ func (m *Sealing) handlePreCommit2(ctx statemachine.Context, sector SectorInfo) return ctx.Send(SectorSealPreCommit2Failed{xerrors.Errorf("seal pre commit(2) failed: %w", err)}) } + if cids.Unsealed == cid.Undef { + return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("seal pre commit(2) returned undefined CommD")}) + } + return ctx.Send(SectorPreCommit2{ Unsealed: cids.Unsealed, Sealed: cids.Sealed, @@ -220,7 +266,7 @@ func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInf deposit := big.Max(depositMinimum, collateral) log.Infof("submitting precommit for sector %d (deposit: %s): ", sector.SectorNumber, deposit) - mcid, err := m.api.SendMsg(ctx.Context(), waddr, m.maddr, builtin.MethodsMiner.PreCommitSector, deposit, m.feeCfg.MaxPreCommitGasFee, enc.Bytes()) + mcid, err := m.api.SendMsg(ctx.Context(), waddr, m.maddr, miner.Methods.PreCommitSector, deposit, m.feeCfg.MaxPreCommitGasFee, enc.Bytes()) if err != nil { if params.ReplaceCapacity { m.remarkForUpgrade(params.ReplaceSectorNumber) @@ -401,7 +447,7 @@ func (m *Sealing) handleSubmitCommit(ctx statemachine.Context, sector SectorInfo } // TODO: check seed / ticket / deals are up to date - mcid, err := m.api.SendMsg(ctx.Context(), waddr, m.maddr, builtin.MethodsMiner.ProveCommitSector, collateral, m.feeCfg.MaxCommitGasFee, enc.Bytes()) + mcid, err := m.api.SendMsg(ctx.Context(), waddr, m.maddr, miner.Methods.ProveCommitSector, collateral, m.feeCfg.MaxCommitGasFee, enc.Bytes()) if err != nil { return ctx.Send(SectorCommitFailed{xerrors.Errorf("pushing message to mpool: %w", err)}) } diff --git a/extern/storage-sealing/types.go b/extern/storage-sealing/types.go index 046271a7f..8f3e82a0b 100644 --- a/extern/storage-sealing/types.go +++ b/extern/storage-sealing/types.go @@ -9,9 +9,9 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/exitcode" - miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" "github.com/filecoin-project/specs-storage/storage" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" ) @@ -82,7 +82,7 @@ type SectorInfo struct { CommR *cid.Cid Proof []byte - PreCommitInfo *miner0.SectorPreCommitInfo + PreCommitInfo *miner.SectorPreCommitInfo PreCommitDeposit big.Int PreCommitMessage *cid.Cid PreCommitTipSet TipSetToken diff --git a/extern/storage-sealing/types_test.go b/extern/storage-sealing/types_test.go index fc56620dc..0b3c97032 100644 --- a/extern/storage-sealing/types_test.go +++ b/extern/storage-sealing/types_test.go @@ -8,7 +8,7 @@ import ( cborutil "github.com/filecoin-project/go-cbor-util" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/specs-actors/actors/builtin" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" ) func TestSectorInfoSelialization(t *testing.T) { @@ -22,7 +22,7 @@ func TestSectorInfoSelialization(t *testing.T) { }, } - dummyCid := builtin.AccountActorCodeID + dummyCid := builtin2.AccountActorCodeID si := &SectorInfo{ State: "stateful", diff --git a/extern/test-vectors b/extern/test-vectors index a8f968ade..d9a75a787 160000 --- a/extern/test-vectors +++ b/extern/test-vectors @@ -1 +1 @@ -Subproject commit a8f968adeba1995f161f7be0048188affc425079 +Subproject commit d9a75a7873aee0db28b87e3970d2ea16a2f37c6a diff --git a/gen/main.go b/gen/main.go index d5874af2c..c2a6d009b 100644 --- a/gen/main.go +++ b/gen/main.go @@ -9,6 +9,8 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/exchange" "github.com/filecoin-project/lotus/chain/types" + sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" "github.com/filecoin-project/lotus/node/hello" "github.com/filecoin-project/lotus/paychmgr" ) @@ -75,4 +77,22 @@ func main() { fmt.Println(err) os.Exit(1) } + + err = gen.WriteMapEncodersToFile("./extern/sector-storage/storiface/cbor_gen.go", "storiface", + storiface.CallID{}, + ) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + err = gen.WriteMapEncodersToFile("./extern/sector-storage/cbor_gen.go", "sectorstorage", + sectorstorage.Call{}, + sectorstorage.WorkState{}, + sectorstorage.WorkID{}, + ) + if err != nil { + fmt.Println(err) + os.Exit(1) + } } diff --git a/genesis/types.go b/genesis/types.go index 79656feac..db8d32a3b 100644 --- a/genesis/types.go +++ b/genesis/types.go @@ -5,9 +5,10 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/specs-actors/actors/builtin/market" "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p-core/peer" + + market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" ) type ActorType string @@ -21,7 +22,7 @@ type PreSeal struct { CommR cid.Cid CommD cid.Cid SectorID abi.SectorNumber - Deal market.DealProposal + Deal market2.DealProposal ProofType abi.RegisteredSealProof } diff --git a/go.mod b/go.mod index eb79a496c..723640930 100644 --- a/go.mod +++ b/go.mod @@ -11,37 +11,38 @@ require ( github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d github.com/buger/goterm v0.0.0-20200322175922-2f3e71b85129 - github.com/cockroachdb/pebble v0.0.0-20200916222308-4e219a90ba5b + github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e + github.com/cockroachdb/pebble v0.0.0-20201001221639-879f3bfeef07 github.com/coreos/go-systemd/v22 v22.0.0 github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e github.com/dgraph-io/badger/v2 v2.2007.2 github.com/docker/go-units v0.4.0 - github.com/drand/drand v1.1.2-0.20200905144319-79c957281b32 - github.com/drand/kyber v1.1.2 + github.com/drand/drand v1.2.1 + github.com/drand/kyber v1.1.4 github.com/dustin/go-humanize v1.0.0 github.com/elastic/go-sysinfo v1.3.0 - github.com/fatih/color v1.8.0 - github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200716204036-cddc56607e1d + github.com/fatih/color v1.9.0 + github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200910194244-f640612a1a1f github.com/filecoin-project/go-address v0.0.4 github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349 // indirect github.com/filecoin-project/go-bitfield v0.2.1 github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 - github.com/filecoin-project/go-data-transfer v0.6.7 + github.com/filecoin-project/go-data-transfer v0.9.0 github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f - github.com/filecoin-project/go-fil-markets v0.7.1 + github.com/filecoin-project/go-fil-markets v1.0.0 github.com/filecoin-project/go-jsonrpc v0.1.2-0.20201008195726-68c6a2704e49 github.com/filecoin-project/go-multistore v0.0.3 github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20 github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261 - github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab + github.com/filecoin-project/go-state-types v0.0.0-20201013222834-41ea465f274f github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe github.com/filecoin-project/go-statestore v0.1.0 github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b github.com/filecoin-project/specs-actors v0.9.12 - github.com/filecoin-project/specs-actors/v2 v2.0.3 + github.com/filecoin-project/specs-actors/v2 v2.2.0 github.com/filecoin-project/specs-storage v0.1.1-0.20200907031224-ed2e5cd13796 - github.com/filecoin-project/test-vectors/schema v0.0.4 + github.com/filecoin-project/test-vectors/schema v0.0.5 github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1 github.com/go-kit/kit v0.10.0 github.com/go-ole/go-ole v1.2.4 // indirect @@ -66,7 +67,7 @@ require ( github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459 github.com/ipfs/go-filestore v1.0.0 github.com/ipfs/go-fs-lock v0.0.6 - github.com/ipfs/go-graphsync v0.2.1 + github.com/ipfs/go-graphsync v0.3.1 github.com/ipfs/go-ipfs-blockstore v1.0.1 github.com/ipfs/go-ipfs-chunker v0.0.5 github.com/ipfs/go-ipfs-ds-help v1.0.0 @@ -80,6 +81,7 @@ require ( github.com/ipfs/go-log v1.0.4 github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4 github.com/ipfs/go-merkledag v0.3.2 + github.com/ipfs/go-metrics-prometheus v0.0.2 github.com/ipfs/go-path v0.0.7 github.com/ipfs/go-unixfs v0.2.4 github.com/ipfs/interface-go-ipfs-core v0.2.3 @@ -104,6 +106,7 @@ require ( github.com/libp2p/go-libp2p-tls v0.1.3 github.com/libp2p/go-libp2p-yamux v0.2.8 github.com/libp2p/go-maddr-filter v0.1.0 + github.com/mattn/go-colorable v0.1.6 // indirect github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 github.com/mitchellh/go-homedir v1.1.0 github.com/multiformats/go-base32 v0.0.3 @@ -113,6 +116,7 @@ require ( github.com/multiformats/go-multihash v0.0.14 github.com/opentracing/opentracing-go v1.2.0 github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a + github.com/prometheus/client_golang v1.6.0 github.com/raulk/clock v1.1.0 github.com/stretchr/testify v1.6.1 github.com/supranational/blst v0.1.1 @@ -120,6 +124,7 @@ require ( github.com/urfave/cli/v2 v2.2.0 github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163 + github.com/whyrusleeping/ledger-filecoin-go v0.9.1-0.20201010031517-c3dcc1bddce4 github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 github.com/whyrusleeping/pubsub v0.0.0-20131020042734-02de8aa2db3d github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542 @@ -129,7 +134,7 @@ require ( go.uber.org/multierr v1.5.0 go.uber.org/zap v1.15.0 golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 - golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980 + golang.org/x/sys v0.0.0-20200926100807-9d91bd62050c golang.org/x/time v0.0.0-20191024005414-555d28b269f0 golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 gopkg.in/cheggaaa/pb.v1 v1.0.28 diff --git a/go.sum b/go.sum index e13359f6c..3ced99135 100644 --- a/go.sum +++ b/go.sum @@ -91,6 +91,7 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/briandowns/spinner v1.11.1 h1:OixPqDEcX3juo5AjQZAnFPbeUA0jvkp2qzB5gOZJ/L0= github.com/briandowns/spinner v1.11.1/go.mod h1:QOuQk7x+EaDASo80FEXwlwiA+j/PPIcX3FScO+3/ZPQ= github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= @@ -120,8 +121,11 @@ github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE= github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= +github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= @@ -133,6 +137,8 @@ github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOi github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= github.com/cockroachdb/pebble v0.0.0-20200916222308-4e219a90ba5b h1:OKALTB609+19AM7wsO0k8yMwAqjEIppcnYvyIhA+ZlQ= github.com/cockroachdb/pebble v0.0.0-20200916222308-4e219a90ba5b/go.mod h1:hU7vhtrqonEphNF+xt8/lHdaBprxmV1h8BOGrd9XwmQ= +github.com/cockroachdb/pebble v0.0.0-20201001221639-879f3bfeef07 h1:Cb2pZUCFXlLA8i7My+wrN51D41GeuhYOKa1dJeZt6NY= +github.com/cockroachdb/pebble v0.0.0-20201001221639-879f3bfeef07/go.mod h1:hU7vhtrqonEphNF+xt8/lHdaBprxmV1h8BOGrd9XwmQ= github.com/cockroachdb/redact v0.0.0-20200622112456-cd282804bbd3 h1:2+dpIJzYMSbLi0587YXpi8tOJT52qCOI/1I0UNThc/I= github.com/cockroachdb/redact v0.0.0-20200622112456-cd282804bbd3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w= @@ -191,14 +197,15 @@ github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/drand/bls12-381 v0.3.2 h1:RImU8Wckmx8XQx1tp1q04OV73J9Tj6mmpQLYDP7V1XE= github.com/drand/bls12-381 v0.3.2/go.mod h1:dtcLgPtYT38L3NO6mPDYH0nbpc5tjPassDqiniuAt4Y= -github.com/drand/drand v1.1.2-0.20200905144319-79c957281b32 h1:sU+51aQRaDxg0KnjQg19KuYRIxDBEUHffBAICSnBys8= -github.com/drand/drand v1.1.2-0.20200905144319-79c957281b32/go.mod h1:0sQEVg+ngs1jaDPVIiEgY0lbENWJPaUlWxGHEaSmKVM= +github.com/drand/drand v1.2.1 h1:KB7z+69YbnQ5z22AH/LMi0ObDR8DzYmrkS6vZXTR9jI= +github.com/drand/drand v1.2.1/go.mod h1:j0P7RGmVaY7E/OuO2yQOcQj7OgeZCuhgu2gdv0JAm+g= github.com/drand/kyber v1.0.1-0.20200110225416-8de27ed8c0e2/go.mod h1:UpXoA0Upd1N9l4TvRPHr1qAUBBERj6JQ/mnKI3BPEmw= github.com/drand/kyber v1.0.2/go.mod h1:x6KOpK7avKj0GJ4emhXFP5n7M7W7ChAPmnQh/OL6vRw= -github.com/drand/kyber v1.1.2 h1:faemqlaFyLrbBSjZGRzzu5SG/do+uTYpHlnrJIHbAhQ= -github.com/drand/kyber v1.1.2/go.mod h1:x6KOpK7avKj0GJ4emhXFP5n7M7W7ChAPmnQh/OL6vRw= -github.com/drand/kyber-bls12381 v0.1.0 h1:/P4C65VnyEwxzR5ZYYVMNzY1If+aYBrdUU5ukwh7LQw= -github.com/drand/kyber-bls12381 v0.1.0/go.mod h1:N1emiHpm+jj7kMlxEbu3MUyOiooTgNySln564cgD9mk= +github.com/drand/kyber v1.1.4 h1:YvKM03QWGvLrdTnYmxxP5iURAX+Gdb6qRDUOgg8i60Q= +github.com/drand/kyber v1.1.4/go.mod h1:9+IgTq7kadePhZg7eRwSD7+bA+bmvqRK+8DtmoV5a3U= +github.com/drand/kyber-bls12381 v0.2.0/go.mod h1:zQip/bHdeEB6HFZSU3v+d3cQE0GaBVQw9aR2E7AdoeI= +github.com/drand/kyber-bls12381 v0.2.1 h1:/d5/YAdaCmHpYjF1NZevOEcKGaq6LBbyvkCTIdGqDjs= +github.com/drand/kyber-bls12381 v0.2.1/go.mod h1:JwWn4nHO9Mp4F5qCie5sVIPQZ0X6cw8XAeMRvc/GXBE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -221,6 +228,8 @@ github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5/go.mod h1:Jp github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.8.0 h1:5bzFgL+oy7JITMTxUPJ00n7VxmYd/PdMp5mHFX40/RY= github.com/fatih/color v1.8.0/go.mod h1:3l45GVGkyrnYNl9HoIjnp2NnNWvh6hLAqD8yTfGjnw8= +github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fd/go-nat v1.0.0/go.mod h1:BTBu/CKvMmOMUPkKVef1pngt2WFH/lg7E6yQnulfp6E= github.com/filecoin-project/go-address v0.0.3 h1:eVfbdjEbpbzIrbiSa+PiGUY+oDK9HnUn+M1R/ggoHf8= github.com/filecoin-project/go-address v0.0.3/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= @@ -238,14 +247,14 @@ github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 h1:a github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= -github.com/filecoin-project/go-data-transfer v0.6.7 h1:Kacr5qz2YWtd3sensU6aXFtES7joeapVDeXApeUD35I= -github.com/filecoin-project/go-data-transfer v0.6.7/go.mod h1:C++k1U6+jMQODOaen5OPDo9XQbth9Yq3ie94vNjBJbk= +github.com/filecoin-project/go-data-transfer v0.9.0 h1:nTT8j7Hu3TM0wRWrGy83/ctawG7sleJGdFWtIsUsKgY= +github.com/filecoin-project/go-data-transfer v0.9.0/go.mod h1:i2CqUy7TMQGKukj9BgqIxiP8nDHDXU2VLd771KVaCaQ= github.com/filecoin-project/go-ds-versioning v0.1.0 h1:y/X6UksYTsK8TLCI7rttCKEvl8btmWxyFMEeeWGUxIQ= github.com/filecoin-project/go-ds-versioning v0.1.0/go.mod h1:mp16rb4i2QPmxBnmanUx8i/XANp+PFCCJWiAb+VW4/s= github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f h1:GxJzR3oRIMTPtpZ0b7QF8FKPK6/iPAc7trhlL5k/g+s= github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= -github.com/filecoin-project/go-fil-markets v0.7.1 h1:e0NlpSnaeGyDUhCOzevjcxkSA54kt9BzlXpLRgduUFI= -github.com/filecoin-project/go-fil-markets v0.7.1/go.mod h1:5Pt4DXQqUoUrp9QzlSdlYTpItXxwAtqKrxRWQ6hAOqk= +github.com/filecoin-project/go-fil-markets v1.0.0 h1:np9+tlnWXh9xYG4oZfha6HZFLYOaAZoMGR3V4w6DM48= +github.com/filecoin-project/go-fil-markets v1.0.0/go.mod h1:lXExJyYHwpMMddCqhEdNrc7euYJKNkp04K76NZqJLGg= github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= @@ -262,10 +271,10 @@ github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261 h github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= github.com/filecoin-project/go-state-types v0.0.0-20200904021452-1883f36ca2f4/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= -github.com/filecoin-project/go-state-types v0.0.0-20200905071437-95828685f9df/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab h1:cEDC5Ei8UuT99hPWhCjA72SM9AuRtnpvdSTIYbnzN8I= github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= -github.com/filecoin-project/go-statemachine v0.0.0-20200714194326-a77c3ae20989/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= +github.com/filecoin-project/go-state-types v0.0.0-20201013222834-41ea465f274f h1:TZDTu4MtBKSFLXWGKLy+cvC3nHfMFIrVgWLAz/+GgZQ= +github.com/filecoin-project/go-state-types v0.0.0-20201013222834-41ea465f274f/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe h1:dF8u+LEWeIcTcfUcCf3WFVlc81Fr2JKg8zPzIbBDKDw= github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= github.com/filecoin-project/go-statestore v0.1.0 h1:t56reH59843TwXHkMcwyuayStBIiWBRilQjQ+5IiwdQ= @@ -273,15 +282,15 @@ github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZO github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b h1:fkRZSPrYpk42PV3/lIXiL0LHetxde7vyYYvSsttQtfg= github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/go.mod h1:Q0GQOBtKf1oE10eSXSlhN45kDBdGvEcVOqMiffqX+N8= github.com/filecoin-project/specs-actors v0.9.4/go.mod h1:BStZQzx5x7TmCkLv0Bpa07U6cPKol6fd3w9KjMPZ6Z4= -github.com/filecoin-project/specs-actors v0.9.7/go.mod h1:wM2z+kwqYgXn5Z7scV1YHLyd1Q1cy0R8HfTIWQ0BFGU= github.com/filecoin-project/specs-actors v0.9.12 h1:iIvk58tuMtmloFNHhAOQHG+4Gci6Lui0n7DYQGi3cJk= github.com/filecoin-project/specs-actors v0.9.12/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= -github.com/filecoin-project/specs-actors/v2 v2.0.3 h1:Niy6xncgi8bI8aBCt1McdZfATBfG4Uxytt8KW4s3bAc= -github.com/filecoin-project/specs-actors/v2 v2.0.3/go.mod h1:v2NZVYinNIKA9acEMBm5wWXxqv5+frFEbekBFemYghY= +github.com/filecoin-project/specs-actors/v2 v2.0.1/go.mod h1:v2NZVYinNIKA9acEMBm5wWXxqv5+frFEbekBFemYghY= +github.com/filecoin-project/specs-actors/v2 v2.2.0 h1:IyCICb0NHYeD0sdSqjVGwWydn/7r7xXuxdpvGAcRCGY= +github.com/filecoin-project/specs-actors/v2 v2.2.0/go.mod h1:rlv5Mx9wUhV8Qsz+vUezZNm+zL4tK08O0HreKKPB2Wc= github.com/filecoin-project/specs-storage v0.1.1-0.20200907031224-ed2e5cd13796 h1:dJsTPWpG2pcTeojO2pyn0c6l+x/3MZYCBgo/9d11JEk= github.com/filecoin-project/specs-storage v0.1.1-0.20200907031224-ed2e5cd13796/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g= -github.com/filecoin-project/test-vectors/schema v0.0.4 h1:QTRd0gb/NP4ZOTM7Dib5U3xE1/ToGDKnYLfxkC3t/m8= -github.com/filecoin-project/test-vectors/schema v0.0.4/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E= +github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg= +github.com/filecoin-project/test-vectors/schema v0.0.5/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6 h1:u/UEqS66A5ckRmS4yNpjmVH56sVtS/RfclBAYocb4as= github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ= @@ -374,6 +383,8 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -536,8 +547,10 @@ github.com/ipfs/go-filestore v1.0.0/go.mod h1:/XOCuNtIe2f1YPbiXdYvD0BKLA0JR1MgPi github.com/ipfs/go-fs-lock v0.0.6 h1:sn3TWwNVQqSeNjlWy6zQ1uUGAZrV3hPOyEA6y1/N2a0= github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28L7zESmM= github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE= -github.com/ipfs/go-graphsync v0.2.1 h1:MdehhqBSuTI2LARfKLkpYnt0mUrqHs/mtuDnESXHBfU= -github.com/ipfs/go-graphsync v0.2.1/go.mod h1:gEBvJUNelzMkaRPJTpg/jaKN4AQW/7wDWu0K92D8o10= +github.com/ipfs/go-graphsync v0.3.0 h1:I6Y20kSuCWkUvPoUWo4V3am704/9QjgDVVkf0zIV8+8= +github.com/ipfs/go-graphsync v0.3.0/go.mod h1:gEBvJUNelzMkaRPJTpg/jaKN4AQW/7wDWu0K92D8o10= +github.com/ipfs/go-graphsync v0.3.1 h1:dJLYrck4oyJDfMVhGEKiWHxaY8oYMWko4m2Fi+4bofo= +github.com/ipfs/go-graphsync v0.3.1/go.mod h1:bw4LiLM5Oq/uLdzEtih9LK8GrwSijv+XqYiWCTxHMqs= github.com/ipfs/go-hamt-ipld v0.1.1 h1:0IQdvwnAAUKmDE+PMJa5y1QiwOPHpI9+eAbQEEEYthk= github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk= github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= @@ -621,6 +634,8 @@ github.com/ipfs/go-merkledag v0.3.2 h1:MRqj40QkrWkvPswXs4EfSslhZ4RVPRbxwX11js0t1 github.com/ipfs/go-merkledag v0.3.2/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= +github.com/ipfs/go-metrics-prometheus v0.0.2 h1:9i2iljLg12S78OhC6UAiXi176xvQGiZaGVF1CUVdE+s= +github.com/ipfs/go-metrics-prometheus v0.0.2/go.mod h1:ELLU99AQQNi+zX6GCGm2lAgnzdSH3u5UVlCdqSXnEks= github.com/ipfs/go-path v0.0.3/go.mod h1:zIRQUez3LuQIU25zFjC2hpBTHimWx7VK5bjZgRLbbdo= github.com/ipfs/go-path v0.0.7 h1:H06hKMquQ0aYtHiHryOMLpQC1qC3QwXwkahcEVD51Ho= github.com/ipfs/go-path v0.0.7/go.mod h1:6KTKmeRnBXgqrTvzFrPV3CamxcgvXX/4z79tfAd2Sno= @@ -703,6 +718,9 @@ github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dv github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= github.com/kilic/bls12-381 v0.0.0-20200607163746-32e1441c8a9f h1:qET3Wx0v8tMtoTOQnsJXVvqvCopSf48qobR6tcJuDHo= github.com/kilic/bls12-381 v0.0.0-20200607163746-32e1441c8a9f/go.mod h1:XXfR6YFCRSrkEXbNlIyDsgXVNJWVUV30m/ebkVy9n6s= +github.com/kilic/bls12-381 v0.0.0-20200731194930-64c428e1bff5/go.mod h1:XXfR6YFCRSrkEXbNlIyDsgXVNJWVUV30m/ebkVy9n6s= +github.com/kilic/bls12-381 v0.0.0-20200820230200-6b2c19996391 h1:51kHw7l/dUDdOdW06AlUGT5jnpj6nqQSILebcsikSjA= +github.com/kilic/bls12-381 v0.0.0-20200820230200-6b2c19996391/go.mod h1:XXfR6YFCRSrkEXbNlIyDsgXVNJWVUV30m/ebkVy9n6s= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= @@ -1045,11 +1063,16 @@ github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcncea github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= @@ -1422,6 +1445,8 @@ github.com/whyrusleeping/go-smux-multiplex v3.0.16+incompatible/go.mod h1:34LEDb github.com/whyrusleeping/go-smux-multistream v2.0.2+incompatible/go.mod h1:dRWHHvc4HDQSHh9gbKEBbUZ+f2Q8iZTPG3UOGYODxSQ= github.com/whyrusleeping/go-smux-yamux v2.0.8+incompatible/go.mod h1:6qHUzBXUbB9MXmw3AUdB52L8sEb/hScCqOdW2kj/wuI= github.com/whyrusleeping/go-smux-yamux v2.0.9+incompatible/go.mod h1:6qHUzBXUbB9MXmw3AUdB52L8sEb/hScCqOdW2kj/wuI= +github.com/whyrusleeping/ledger-filecoin-go v0.9.1-0.20201010031517-c3dcc1bddce4 h1:NwiwjQDB3CzQ5XH0rdMh1oQqzJH7O2PSLWxif/w3zsY= +github.com/whyrusleeping/ledger-filecoin-go v0.9.1-0.20201010031517-c3dcc1bddce4/go.mod h1:K+EVq8d5QcQ2At5VECsA+SNZvWefyBXh8TnIsxo1OvQ= github.com/whyrusleeping/mafmt v1.2.8/go.mod h1:faQJFPbLSxzD9xpA02ttW/tS9vZykNvXwGvqIpk20FA= github.com/whyrusleeping/mdns v0.0.0-20180901202407-ef14215e6b30/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= @@ -1434,8 +1459,8 @@ github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1: github.com/whyrusleeping/yamux v1.1.5/go.mod h1:E8LnQQ8HKx5KD29HZFUwM1PxCOdPRzGwur1mcYhXcD8= github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/c-for-go v0.0.0-20200718154222-87b0065af829 h1:wb7xrDzfkLgPHsSEBm+VSx6aDdi64VtV0xvP0E6j8bk= -github.com/xlab/c-for-go v0.0.0-20200718154222-87b0065af829/go.mod h1:h/1PEBwj7Ym/8kOuMWvO2ujZ6Lt+TMbySEXNhjjR87I= +github.com/xlab/c-for-go v0.0.0-20201002084316-c134bfab968f h1:nMhj+x/m7ZQsHBz0L3gpytp0v6ogokdbrQDnhB8Kh7s= +github.com/xlab/c-for-go v0.0.0-20201002084316-c134bfab968f/go.mod h1:h/1PEBwj7Ym/8kOuMWvO2ujZ6Lt+TMbySEXNhjjR87I= github.com/xlab/pkgconfig v0.0.0-20170226114623-cea12a0fd245 h1:Sw125DKxZhPUI4JLlWugkzsrlB50jR9v2khiD9FxuSo= github.com/xlab/pkgconfig v0.0.0-20170226114623-cea12a0fd245/go.mod h1:C+diUUz7pxhNY6KAoLgrTYARGWnt82zWTylZlxT92vk= github.com/xorcare/golden v0.6.0/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/U6FtvQ= @@ -1443,6 +1468,10 @@ github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542 h1:oWgZJmC1DorFZD github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/U6FtvQ= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/zondax/hid v0.9.0 h1:eiT3P6vNxAEVxXMw66eZUAAnU2zD33JBkfG/EnfAKl8= +github.com/zondax/hid v0.9.0/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= +github.com/zondax/ledger-go v0.12.1 h1:hYRcyznPRJp+5mzF2sazTLP2nGvGjYDD2VzhHhFomLU= +github.com/zondax/ledger-go v0.12.1/go.mod h1:KatxXrVDzgWwbssUWsF5+cOJHXPvzQ09YSlzGNuhOEo= go.dedis.ch/fixbuf v1.0.3 h1:hGcV9Cd/znUxlusJ64eAlExS+5cJDIyTyEG+otu5wQs= go.dedis.ch/fixbuf v1.0.3/go.mod h1:yzJMt34Wa5xD37V5RTdmp38cz3QhMagdGoem9anUalw= go.dedis.ch/kyber/v3 v3.0.4/go.mod h1:OzvaEnPvKlyrWyp3kGXlFdp7ap1VC6RkZDTaPikqhsQ= @@ -1515,6 +1544,7 @@ golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1523,6 +1553,9 @@ golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1658,11 +1691,13 @@ golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191025090151-53bf42e6b339/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1675,6 +1710,9 @@ golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980 h1:OjiUf46hAmXblsZdnoSXsEUSKU8r1UEzcL5RVZ4gO9Y= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200926100807-9d91bd62050c h1:38q6VNPWR010vN82/SB121GujZNIfAUb4YttE2rhGuc= +golang.org/x/sys v0.0.0-20200926100807-9d91bd62050c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1802,6 +1840,8 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/lib/blockstore/blockstore.go b/lib/blockstore/blockstore.go index 9e74f4373..99d849188 100644 --- a/lib/blockstore/blockstore.go +++ b/lib/blockstore/blockstore.go @@ -18,19 +18,18 @@ import ( "context" ds "github.com/ipfs/go-datastore" - dssync "github.com/ipfs/go-datastore/sync" blockstore "github.com/ipfs/go-ipfs-blockstore" ) // NewTemporary returns a temporary blockstore. -func NewTemporary() blockstore.Blockstore { - return NewBlockstore(ds.NewMapDatastore()) +func NewTemporary() MemStore { + return make(MemStore) } // NewTemporarySync returns a thread-safe temporary blockstore. -func NewTemporarySync() blockstore.Blockstore { - return NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) +func NewTemporarySync() *SyncStore { + return &SyncStore{bs: make(MemStore)} } // WrapIDStore wraps the underlying blockstore in an "identity" blockstore. @@ -51,9 +50,16 @@ type GCLocker = blockstore.GCLocker var NewGCLocker = blockstore.NewGCLocker var NewGCBlockstore = blockstore.NewGCBlockstore -var DefaultCacheOpts = blockstore.DefaultCacheOpts var ErrNotFound = blockstore.ErrNotFound +func DefaultCacheOpts() CacheOpts { + return CacheOpts{ + HasBloomFilterSize: 0, + HasBloomFilterHashes: 0, + HasARCCacheSize: 512 << 10, + } +} + func CachedBlockstore(ctx context.Context, bs Blockstore, opts CacheOpts) (Blockstore, error) { bs, err := blockstore.CachedBlockstore(ctx, bs, opts) if err != nil { diff --git a/lib/blockstore/memstore.go b/lib/blockstore/memstore.go new file mode 100644 index 000000000..9745d6f03 --- /dev/null +++ b/lib/blockstore/memstore.go @@ -0,0 +1,80 @@ +package blockstore + +import ( + "context" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + blockstore "github.com/ipfs/go-ipfs-blockstore" +) + +type MemStore map[cid.Cid]blocks.Block + +func (m MemStore) DeleteBlock(k cid.Cid) error { + delete(m, k) + return nil +} +func (m MemStore) Has(k cid.Cid) (bool, error) { + _, ok := m[k] + return ok, nil +} +func (m MemStore) Get(k cid.Cid) (blocks.Block, error) { + b, ok := m[k] + if !ok { + return nil, blockstore.ErrNotFound + } + return b, nil +} + +// GetSize returns the CIDs mapped BlockSize +func (m MemStore) GetSize(k cid.Cid) (int, error) { + b, ok := m[k] + if !ok { + return 0, blockstore.ErrNotFound + } + return len(b.RawData()), nil +} + +// Put puts a given block to the underlying datastore +func (m MemStore) Put(b blocks.Block) error { + // Convert to a basic block for safety, but try to reuse the existing + // block if it's already a basic block. + k := b.Cid() + if _, ok := b.(*blocks.BasicBlock); !ok { + // If we already have the block, abort. + if _, ok := m[k]; ok { + return nil + } + // the error is only for debugging. + b, _ = blocks.NewBlockWithCid(b.RawData(), b.Cid()) + } + m[b.Cid()] = b + return nil +} + +// PutMany puts a slice of blocks at the same time using batching +// capabilities of the underlying datastore whenever possible. +func (m MemStore) PutMany(bs []blocks.Block) error { + for _, b := range bs { + _ = m.Put(b) // can't fail + } + return nil +} + +// AllKeysChan returns a channel from which +// the CIDs in the Blockstore can be read. It should respect +// the given context, closing the channel if it becomes Done. +func (m MemStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + ch := make(chan cid.Cid, len(m)) + for k := range m { + ch <- k + } + close(ch) + return ch, nil +} + +// HashOnRead specifies if every read block should be +// rehashed to make sure it matches its CID. +func (m MemStore) HashOnRead(enabled bool) { + // no-op +} diff --git a/lib/blockstore/syncstore.go b/lib/blockstore/syncstore.go new file mode 100644 index 000000000..be9f6b5c4 --- /dev/null +++ b/lib/blockstore/syncstore.go @@ -0,0 +1,68 @@ +package blockstore + +import ( + "context" + "sync" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" +) + +type SyncStore struct { + mu sync.RWMutex + bs MemStore // specifically use a memStore to save indirection overhead. +} + +func (m *SyncStore) DeleteBlock(k cid.Cid) error { + m.mu.Lock() + defer m.mu.Unlock() + return m.bs.DeleteBlock(k) +} +func (m *SyncStore) Has(k cid.Cid) (bool, error) { + m.mu.RLock() + defer m.mu.RUnlock() + return m.bs.Has(k) +} +func (m *SyncStore) Get(k cid.Cid) (blocks.Block, error) { + m.mu.RLock() + defer m.mu.RUnlock() + return m.bs.Get(k) +} + +// GetSize returns the CIDs mapped BlockSize +func (m *SyncStore) GetSize(k cid.Cid) (int, error) { + m.mu.RLock() + defer m.mu.RUnlock() + return m.bs.GetSize(k) +} + +// Put puts a given block to the underlying datastore +func (m *SyncStore) Put(b blocks.Block) error { + m.mu.Lock() + defer m.mu.Unlock() + return m.bs.Put(b) +} + +// PutMany puts a slice of blocks at the same time using batching +// capabilities of the underlying datastore whenever possible. +func (m *SyncStore) PutMany(bs []blocks.Block) error { + m.mu.Lock() + defer m.mu.Unlock() + return m.bs.PutMany(bs) +} + +// AllKeysChan returns a channel from which +// the CIDs in the Blockstore can be read. It should respect +// the given context, closing the channel if it becomes Done. +func (m *SyncStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + m.mu.RLock() + defer m.mu.RUnlock() + // this blockstore implementation doesn't do any async work. + return m.bs.AllKeysChan(ctx) +} + +// HashOnRead specifies if every read block should be +// rehashed to make sure it matches its CID. +func (m *SyncStore) HashOnRead(enabled bool) { + // noop +} diff --git a/lib/bufbstore/buf_bstore.go b/lib/bufbstore/buf_bstore.go index a766c2b52..4ea746444 100644 --- a/lib/bufbstore/buf_bstore.go +++ b/lib/bufbstore/buf_bstore.go @@ -19,10 +19,12 @@ type BufferedBS struct { } func NewBufferedBstore(base bstore.Blockstore) *BufferedBS { - buf := bstore.NewTemporary() + var buf bstore.Blockstore if os.Getenv("LOTUS_DISABLE_VM_BUF") == "iknowitsabadidea" { log.Warn("VM BLOCKSTORE BUFFERING IS DISABLED") buf = base + } else { + buf = bstore.NewTemporary() } return &BufferedBS{ diff --git a/lib/commp/writer.go b/lib/commp/writer.go new file mode 100644 index 000000000..4c5e3350c --- /dev/null +++ b/lib/commp/writer.go @@ -0,0 +1,113 @@ +package commp + +import ( + "bytes" + "math/bits" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + ffi "github.com/filecoin-project/filecoin-ffi" + "github.com/filecoin-project/go-padreader" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + "github.com/filecoin-project/lotus/extern/sector-storage/zerocomm" +) + +const commPBufPad = abi.PaddedPieceSize(8 << 20) +const CommPBuf = abi.UnpaddedPieceSize(commPBufPad - (commPBufPad / 128)) // can't use .Unpadded() for const + +type Writer struct { + len int64 + buf [CommPBuf]byte + leaves []cid.Cid +} + +func (w *Writer) Write(p []byte) (int, error) { + n := len(p) + for len(p) > 0 { + buffered := int(w.len % int64(len(w.buf))) + toBuffer := len(w.buf) - buffered + if toBuffer > len(p) { + toBuffer = len(p) + } + + copied := copy(w.buf[buffered:], p[:toBuffer]) + p = p[copied:] + w.len += int64(copied) + + if copied > 0 && w.len%int64(len(w.buf)) == 0 { + leaf, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg32GiBV1, bytes.NewReader(w.buf[:]), CommPBuf) + if err != nil { + return 0, err + } + w.leaves = append(w.leaves, leaf) + } + } + return n, nil +} + +func (w *Writer) Sum() (api.DataCIDSize, error) { + // process last non-zero leaf if exists + lastLen := w.len % int64(len(w.buf)) + rawLen := w.len + + // process remaining bit of data + if lastLen != 0 { + if len(w.leaves) != 0 { + copy(w.buf[lastLen:], make([]byte, int(int64(CommPBuf)-lastLen))) + lastLen = int64(CommPBuf) + } + + r, sz := padreader.New(bytes.NewReader(w.buf[:lastLen]), uint64(lastLen)) + p, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg32GiBV1, r, sz) + if err != nil { + return api.DataCIDSize{}, err + } + + if sz < CommPBuf { // special case for pieces smaller than 16MiB + return api.DataCIDSize{ + PayloadSize: w.len, + PieceSize: sz.Padded(), + PieceCID: p, + }, nil + } + + w.leaves = append(w.leaves, p) + } + + // pad with zero pieces to power-of-two size + fillerLeaves := (1 << (bits.Len(uint(len(w.leaves) - 1)))) - len(w.leaves) + for i := 0; i < fillerLeaves; i++ { + w.leaves = append(w.leaves, zerocomm.ZeroPieceCommitment(CommPBuf)) + } + + if len(w.leaves) == 1 { + return api.DataCIDSize{ + PayloadSize: rawLen, + PieceSize: abi.PaddedPieceSize(len(w.leaves)) * commPBufPad, + PieceCID: w.leaves[0], + }, nil + } + + pieces := make([]abi.PieceInfo, len(w.leaves)) + for i, leaf := range w.leaves { + pieces[i] = abi.PieceInfo{ + Size: commPBufPad, + PieceCID: leaf, + } + } + + p, err := ffi.GenerateUnsealedCID(abi.RegisteredSealProof_StackedDrg32GiBV1, pieces) + if err != nil { + return api.DataCIDSize{}, xerrors.Errorf("generating unsealed CID: %w", err) + } + + return api.DataCIDSize{ + PayloadSize: rawLen, + PieceSize: abi.PaddedPieceSize(len(w.leaves)) * commPBufPad, + PieceCID: p, + }, nil +} diff --git a/lib/commp/writer_test.go b/lib/commp/writer_test.go new file mode 100644 index 000000000..284648e4e --- /dev/null +++ b/lib/commp/writer_test.go @@ -0,0 +1,88 @@ +package commp + +import ( + "bytes" + "crypto/rand" + "fmt" + "io" + "io/ioutil" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-padreader" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + "github.com/filecoin-project/lotus/extern/sector-storage/zerocomm" +) + +func TestWriterZero(t *testing.T) { + for i, s := range []struct { + writes []int + expect abi.PaddedPieceSize + }{ + {writes: []int{200}, expect: 256}, + {writes: []int{200, 200}, expect: 512}, + + {writes: []int{int(CommPBuf)}, expect: commPBufPad}, + {writes: []int{int(CommPBuf) * 2}, expect: 2 * commPBufPad}, + {writes: []int{int(CommPBuf), int(CommPBuf), int(CommPBuf)}, expect: 4 * commPBufPad}, + {writes: []int{int(CommPBuf), int(CommPBuf), int(CommPBuf), int(CommPBuf), int(CommPBuf), int(CommPBuf), int(CommPBuf), int(CommPBuf), int(CommPBuf)}, expect: 16 * commPBufPad}, + + {writes: []int{200, int(CommPBuf)}, expect: 2 * commPBufPad}, + } { + s := s + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + w := &Writer{} + var rawSum int64 + for _, write := range s.writes { + rawSum += int64(write) + _, err := w.Write(make([]byte, write)) + require.NoError(t, err) + } + + p, err := w.Sum() + require.NoError(t, err) + require.Equal(t, rawSum, p.PayloadSize) + require.Equal(t, s.expect, p.PieceSize) + require.Equal(t, zerocomm.ZeroPieceCommitment(s.expect.Unpadded()).String(), p.PieceCID.String()) + }) + } +} + +func TestWriterData(t *testing.T) { + dataLen := float64(CommPBuf) * 6.78 + data, _ := ioutil.ReadAll(io.LimitReader(rand.Reader, int64(dataLen))) + + pr, sz := padreader.New(bytes.NewReader(data), uint64(dataLen)) + exp, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg32GiBV1, pr, sz) + require.NoError(t, err) + + w := &Writer{} + _, err = io.Copy(w, bytes.NewReader(data)) + require.NoError(t, err) + + res, err := w.Sum() + require.NoError(t, err) + + require.Equal(t, exp.String(), res.PieceCID.String()) +} + +func BenchmarkWriterZero(b *testing.B) { + buf := make([]byte, int(CommPBuf)*b.N) + b.SetBytes(int64(CommPBuf)) + b.ResetTimer() + + w := &Writer{} + + _, err := w.Write(buf) + require.NoError(b, err) + o, err := w.Sum() + + b.StopTimer() + + require.NoError(b, err) + require.Equal(b, zerocomm.ZeroPieceCommitment(o.PieceSize.Unpadded()).String(), o.PieceCID.String()) + require.Equal(b, int64(CommPBuf)*int64(b.N), o.PayloadSize) +} diff --git a/lib/ipfsbstore/ipfsbstore.go b/lib/ipfsbstore/ipfsbstore.go index 748afee51..e66f3da89 100644 --- a/lib/ipfsbstore/ipfsbstore.go +++ b/lib/ipfsbstore/ipfsbstore.go @@ -25,12 +25,12 @@ type IpfsBstore struct { api iface.CoreAPI } -func NewIpfsBstore(ctx context.Context) (*IpfsBstore, error) { +func NewIpfsBstore(ctx context.Context, onlineMode bool) (*IpfsBstore, error) { localApi, err := httpapi.NewLocalApi() if err != nil { return nil, xerrors.Errorf("getting local ipfs api: %w", err) } - api, err := localApi.WithOptions(options.Api.Offline(true)) + api, err := localApi.WithOptions(options.Api.Offline(!onlineMode)) if err != nil { return nil, xerrors.Errorf("setting offline mode: %s", err) } @@ -41,12 +41,12 @@ func NewIpfsBstore(ctx context.Context) (*IpfsBstore, error) { }, nil } -func NewRemoteIpfsBstore(ctx context.Context, maddr multiaddr.Multiaddr) (*IpfsBstore, error) { +func NewRemoteIpfsBstore(ctx context.Context, maddr multiaddr.Multiaddr, onlineMode bool) (*IpfsBstore, error) { httpApi, err := httpapi.NewApi(maddr) if err != nil { return nil, xerrors.Errorf("setting remote ipfs api: %w", err) } - api, err := httpApi.WithOptions(options.Api.Offline(true)) + api, err := httpApi.WithOptions(options.Api.Offline(!onlineMode)) if err != nil { return nil, xerrors.Errorf("applying offline mode: %s", err) } diff --git a/lib/timedbs/timedbs.go b/lib/timedbs/timedbs.go new file mode 100644 index 000000000..c5c1a8fe0 --- /dev/null +++ b/lib/timedbs/timedbs.go @@ -0,0 +1,162 @@ +package timedbs + +import ( + "context" + "fmt" + "sync" + "time" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + "github.com/raulk/clock" + "go.uber.org/multierr" + + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/lib/blockstore" +) + +// TimedCacheBS is a blockstore that keeps blocks for at least the specified +// caching interval before discarding them. Garbage collection must be started +// and stopped by calling Start/Stop. +// +// Under the covers, it's implemented with an active and an inactive blockstore +// that are rotated every cache time interval. This means all blocks will be +// stored at most 2x the cache interval. +type TimedCacheBS struct { + mu sync.RWMutex + active, inactive blockstore.MemStore + clock clock.Clock + interval time.Duration + closeCh chan struct{} + doneRotatingCh chan struct{} +} + +func NewTimedCacheBS(cacheTime time.Duration) *TimedCacheBS { + return &TimedCacheBS{ + active: blockstore.NewTemporary(), + inactive: blockstore.NewTemporary(), + interval: cacheTime, + clock: build.Clock, + } +} + +func (t *TimedCacheBS) Start(ctx context.Context) error { + t.mu.Lock() + defer t.mu.Unlock() + if t.closeCh != nil { + return fmt.Errorf("already started") + } + t.closeCh = make(chan struct{}) + go func() { + ticker := t.clock.Ticker(t.interval) + defer ticker.Stop() + for { + select { + case <-ticker.C: + t.rotate() + if t.doneRotatingCh != nil { + t.doneRotatingCh <- struct{}{} + } + case <-t.closeCh: + return + } + } + }() + return nil +} + +func (t *TimedCacheBS) Stop(ctx context.Context) error { + t.mu.Lock() + defer t.mu.Unlock() + if t.closeCh == nil { + return fmt.Errorf("not started started") + } + select { + case <-t.closeCh: + // already closed + default: + close(t.closeCh) + } + return nil +} + +func (t *TimedCacheBS) rotate() { + newBs := blockstore.NewTemporary() + + t.mu.Lock() + t.inactive, t.active = t.active, newBs + t.mu.Unlock() +} + +func (t *TimedCacheBS) Put(b blocks.Block) error { + // Don't check the inactive set here. We want to keep this block for at + // least one interval. + t.mu.Lock() + defer t.mu.Unlock() + return t.active.Put(b) +} + +func (t *TimedCacheBS) PutMany(bs []blocks.Block) error { + t.mu.Lock() + defer t.mu.Unlock() + return t.active.PutMany(bs) +} + +func (t *TimedCacheBS) Get(k cid.Cid) (blocks.Block, error) { + t.mu.RLock() + defer t.mu.RUnlock() + b, err := t.active.Get(k) + if err == blockstore.ErrNotFound { + b, err = t.inactive.Get(k) + } + return b, err +} + +func (t *TimedCacheBS) GetSize(k cid.Cid) (int, error) { + t.mu.RLock() + defer t.mu.RUnlock() + size, err := t.active.GetSize(k) + if err == blockstore.ErrNotFound { + size, err = t.inactive.GetSize(k) + } + return size, err +} + +func (t *TimedCacheBS) Has(k cid.Cid) (bool, error) { + t.mu.RLock() + defer t.mu.RUnlock() + if has, err := t.active.Has(k); err != nil { + return false, err + } else if has { + return true, nil + } + return t.inactive.Has(k) +} + +func (t *TimedCacheBS) HashOnRead(_ bool) { + // no-op +} + +func (t *TimedCacheBS) DeleteBlock(k cid.Cid) error { + t.mu.Lock() + defer t.mu.Unlock() + return multierr.Combine(t.active.DeleteBlock(k), t.inactive.DeleteBlock(k)) +} + +func (t *TimedCacheBS) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + t.mu.RLock() + defer t.mu.RUnlock() + + ch := make(chan cid.Cid, len(t.active)+len(t.inactive)) + for c := range t.active { + ch <- c + } + for c := range t.inactive { + if _, ok := t.active[c]; ok { + continue + } + ch <- c + } + close(ch) + return ch, nil +} diff --git a/lib/timedbs/timedbs_test.go b/lib/timedbs/timedbs_test.go new file mode 100644 index 000000000..e01215bbd --- /dev/null +++ b/lib/timedbs/timedbs_test.go @@ -0,0 +1,85 @@ +package timedbs + +import ( + "context" + "testing" + "time" + + "github.com/raulk/clock" + "github.com/stretchr/testify/require" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" +) + +func TestTimedBSSimple(t *testing.T) { + tc := NewTimedCacheBS(10 * time.Millisecond) + mClock := clock.NewMock() + mClock.Set(time.Now()) + tc.clock = mClock + tc.doneRotatingCh = make(chan struct{}) + + _ = tc.Start(context.Background()) + mClock.Add(1) // IDK why it is needed but it makes it work + + defer func() { + _ = tc.Stop(context.Background()) + }() + + b1 := blocks.NewBlock([]byte("foo")) + require.NoError(t, tc.Put(b1)) + + b2 := blocks.NewBlock([]byte("bar")) + require.NoError(t, tc.Put(b2)) + + b3 := blocks.NewBlock([]byte("baz")) + + b1out, err := tc.Get(b1.Cid()) + require.NoError(t, err) + require.Equal(t, b1.RawData(), b1out.RawData()) + + has, err := tc.Has(b1.Cid()) + require.NoError(t, err) + require.True(t, has) + + mClock.Add(10 * time.Millisecond) + <-tc.doneRotatingCh + + // We should still have everything. + has, err = tc.Has(b1.Cid()) + require.NoError(t, err) + require.True(t, has) + + has, err = tc.Has(b2.Cid()) + require.NoError(t, err) + require.True(t, has) + + // extend b2, add b3. + require.NoError(t, tc.Put(b2)) + require.NoError(t, tc.Put(b3)) + + // all keys once. + allKeys, err := tc.AllKeysChan(context.Background()) + var ks []cid.Cid + for k := range allKeys { + ks = append(ks, k) + } + require.NoError(t, err) + require.ElementsMatch(t, ks, []cid.Cid{b1.Cid(), b2.Cid(), b3.Cid()}) + + mClock.Add(10 * time.Millisecond) + <-tc.doneRotatingCh + // should still have b2, and b3, but not b1 + + has, err = tc.Has(b1.Cid()) + require.NoError(t, err) + require.False(t, has) + + has, err = tc.Has(b2.Cid()) + require.NoError(t, err) + require.True(t, has) + + has, err = tc.Has(b3.Cid()) + require.NoError(t, err) + require.True(t, has) +} diff --git a/markets/dealfilter/cli.go b/markets/dealfilter/cli.go index 2cb9d6c4f..af832bfa0 100644 --- a/markets/dealfilter/cli.go +++ b/markets/dealfilter/cli.go @@ -6,35 +6,57 @@ import ( "encoding/json" "os/exec" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/lotus/node/modules/dtypes" ) -func CliDealFilter(cmd string) dtypes.DealFilter { - // TODO: run some checks on the cmd string - +func CliStorageDealFilter(cmd string) dtypes.StorageDealFilter { return func(ctx context.Context, deal storagemarket.MinerDeal) (bool, string, error) { - j, err := json.MarshalIndent(deal, "", " ") - if err != nil { - return false, "", err + d := struct { + storagemarket.MinerDeal + DealType string + }{ + MinerDeal: deal, + DealType: "storage", } - - var out bytes.Buffer - - c := exec.Command("sh", "-c", cmd) - c.Stdin = bytes.NewReader(j) - c.Stdout = &out - c.Stderr = &out - - switch err := c.Run().(type) { - case nil: - return true, "", nil - case *exec.ExitError: - return false, out.String(), nil - default: - return false, "filter cmd run error", err - } - + return runDealFilter(ctx, cmd, d) + } +} + +func CliRetrievalDealFilter(cmd string) dtypes.RetrievalDealFilter { + return func(ctx context.Context, deal retrievalmarket.ProviderDealState) (bool, string, error) { + d := struct { + retrievalmarket.ProviderDealState + DealType string + }{ + ProviderDealState: deal, + DealType: "retrieval", + } + return runDealFilter(ctx, cmd, d) + } +} + +func runDealFilter(ctx context.Context, cmd string, deal interface{}) (bool, string, error) { + j, err := json.MarshalIndent(deal, "", " ") + if err != nil { + return false, "", err + } + + var out bytes.Buffer + + c := exec.Command("sh", "-c", cmd) + c.Stdin = bytes.NewReader(j) + c.Stdout = &out + c.Stderr = &out + + switch err := c.Run().(type) { + case nil: + return true, "", nil + case *exec.ExitError: + return false, out.String(), nil + default: + return false, "filter cmd run error", err } } diff --git a/markets/storageadapter/client.go b/markets/storageadapter/client.go index 6496fffad..482183bf9 100644 --- a/markets/storageadapter/client.go +++ b/markets/storageadapter/client.go @@ -6,20 +6,20 @@ import ( "bytes" "context" - "golang.org/x/xerrors" - - "github.com/ipfs/go-cid" - "github.com/filecoin-project/go-address" cborutil "github.com/filecoin-project/go-cbor-util" + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + "github.com/filecoin-project/go-fil-markets/shared" "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/exitcode" - miner0 "github.com/filecoin-project/specs-actors/actors/builtin" - market0 "github.com/filecoin-project/specs-actors/actors/builtin/market" + + miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" @@ -27,8 +27,6 @@ import ( "github.com/filecoin-project/lotus/chain/events" "github.com/filecoin-project/lotus/chain/events/state" "github.com/filecoin-project/lotus/chain/market" - "github.com/filecoin-project/lotus/chain/stmgr" - "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/lib/sigs" "github.com/filecoin-project/lotus/markets/utils" @@ -40,10 +38,9 @@ type ClientNodeAdapter struct { full.ChainAPI full.MpoolAPI - sm *stmgr.StateManager - cs *store.ChainStore - fm *market.FundMgr - ev *events.Events + fm *market.FundMgr + ev *events.Events + dsMatcher *dealStateMatcher } type clientApi struct { @@ -51,16 +48,16 @@ type clientApi struct { full.StateAPI } -func NewClientNodeAdapter(state full.StateAPI, chain full.ChainAPI, mpool full.MpoolAPI, sm *stmgr.StateManager, cs *store.ChainStore, fm *market.FundMgr) storagemarket.StorageClientNode { +func NewClientNodeAdapter(stateapi full.StateAPI, chain full.ChainAPI, mpool full.MpoolAPI, fm *market.FundMgr) storagemarket.StorageClientNode { + capi := &clientApi{chain, stateapi} return &ClientNodeAdapter{ - StateAPI: state, + StateAPI: stateapi, ChainAPI: chain, MpoolAPI: mpool, - sm: sm, - cs: cs, - fm: fm, - ev: events.NewEvents(context.TODO(), &clientApi{chain, state}), + fm: fm, + ev: events.NewEvents(context.TODO(), capi), + dsMatcher: newDealStateMatcher(state.NewStatePredicates(capi)), } } @@ -103,10 +100,10 @@ func (c *ClientNodeAdapter) VerifySignature(ctx context.Context, sig crypto.Sign func (c *ClientNodeAdapter) AddFunds(ctx context.Context, addr address.Address, amount abi.TokenAmount) (cid.Cid, error) { // (Provider Node API) smsg, err := c.MpoolPushMessage(ctx, &types.Message{ - To: miner0.StorageMarketActorAddr, + To: miner2.StorageMarketActorAddr, From: addr, Value: amount, - Method: miner0.MethodsMarket.AddBalance, + Method: miner2.MethodsMarket.AddBalance, }, nil) if err != nil { return cid.Undef, err @@ -138,12 +135,12 @@ func (c *ClientNodeAdapter) GetBalance(ctx context.Context, addr address.Address func (c *ClientNodeAdapter) ValidatePublishedDeal(ctx context.Context, deal storagemarket.ClientDeal) (abi.DealID, error) { log.Infow("DEAL ACCEPTED!") - pubmsg, err := c.cs.GetMessage(*deal.PublishMessage) + pubmsg, err := c.ChainGetMessage(ctx, *deal.PublishMessage) if err != nil { return 0, xerrors.Errorf("getting deal publish message: %w", err) } - mi, err := stmgr.StateMinerInfo(ctx, c.sm, c.cs.GetHeaviestTipSet(), deal.Proposal.Provider) + mi, err := c.StateMinerInfo(ctx, deal.Proposal.Provider, types.EmptyTSK) if err != nil { return 0, xerrors.Errorf("getting miner worker failed: %w", err) } @@ -157,15 +154,15 @@ func (c *ClientNodeAdapter) ValidatePublishedDeal(ctx context.Context, deal stor return 0, xerrors.Errorf("deal wasn't published by storage provider: from=%s, provider=%s", pubmsg.From, deal.Proposal.Provider) } - if pubmsg.To != miner0.StorageMarketActorAddr { + if pubmsg.To != miner2.StorageMarketActorAddr { return 0, xerrors.Errorf("deal publish message wasn't set to StorageMarket actor (to=%s)", pubmsg.To) } - if pubmsg.Method != miner0.MethodsMarket.PublishStorageDeals { + if pubmsg.Method != miner2.MethodsMarket.PublishStorageDeals { return 0, xerrors.Errorf("deal publish message called incorrect method (method=%s)", pubmsg.Method) } - var params market0.PublishStorageDealsParams + var params market2.PublishStorageDealsParams if err := params.UnmarshalCBOR(bytes.NewReader(pubmsg.Params)); err != nil { return 0, err } @@ -189,16 +186,16 @@ func (c *ClientNodeAdapter) ValidatePublishedDeal(ctx context.Context, deal stor } // TODO: timeout - _, ret, _, err := c.sm.WaitForMessage(ctx, *deal.PublishMessage, build.MessageConfidence, stmgr.LookbackNoLimit) + ret, err := c.StateWaitMsg(ctx, *deal.PublishMessage, build.MessageConfidence) if err != nil { return 0, xerrors.Errorf("waiting for deal publish message: %w", err) } - if ret.ExitCode != 0 { - return 0, xerrors.Errorf("deal publish failed: exit=%d", ret.ExitCode) + if ret.Receipt.ExitCode != 0 { + return 0, xerrors.Errorf("deal publish failed: exit=%d", ret.Receipt.ExitCode) } - var res market0.PublishStorageDealsReturn - if err := res.UnmarshalCBOR(bytes.NewReader(ret.Return)); err != nil { + var res market2.PublishStorageDealsReturn + if err := res.UnmarshalCBOR(bytes.NewReader(ret.Receipt.Return)); err != nil { return 0, err } @@ -218,7 +215,7 @@ func (c *ClientNodeAdapter) DealProviderCollateralBounds(ctx context.Context, si func (c *ClientNodeAdapter) OnDealSectorCommitted(ctx context.Context, provider address.Address, dealId abi.DealID, cb storagemarket.DealSectorCommittedCallback) error { checkFunc := func(ts *types.TipSet) (done bool, more bool, err error) { - sd, err := stmgr.GetStorageDeal(ctx, c.StateManager, dealId, ts) + sd, err := c.StateMarketStorageDeal(ctx, dealId, ts.Key()) if err != nil { // TODO: This may be fine for some errors @@ -245,7 +242,7 @@ func (c *ClientNodeAdapter) OnDealSectorCommitted(ctx context.Context, provider return false, nil } - sd, err := stmgr.GetStorageDeal(ctx, c.StateManager, dealId, ts) + sd, err := c.StateMarketStorageDeal(ctx, dealId, ts.Key()) if err != nil { return false, xerrors.Errorf("failed to look up deal on chain: %w", err) } @@ -269,44 +266,44 @@ func (c *ClientNodeAdapter) OnDealSectorCommitted(ctx context.Context, provider var sectorNumber abi.SectorNumber var sectorFound bool - matchEvent := func(msg *types.Message) (matchOnce bool, matched bool, err error) { + matchEvent := func(msg *types.Message) (matched bool, err error) { if msg.To != provider { - return true, false, nil + return false, nil } switch msg.Method { - case miner0.MethodsMiner.PreCommitSector: + case miner2.MethodsMiner.PreCommitSector: var params miner.SectorPreCommitInfo if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { - return true, false, xerrors.Errorf("unmarshal pre commit: %w", err) + return false, xerrors.Errorf("unmarshal pre commit: %w", err) } for _, did := range params.DealIDs { if did == dealId { sectorNumber = params.SectorNumber sectorFound = true - return true, false, nil + return false, nil } } - return true, false, nil - case miner0.MethodsMiner.ProveCommitSector: + return false, nil + case miner2.MethodsMiner.ProveCommitSector: var params miner.ProveCommitSectorParams if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { - return true, false, xerrors.Errorf("failed to unmarshal prove commit sector params: %w", err) + return false, xerrors.Errorf("failed to unmarshal prove commit sector params: %w", err) } if !sectorFound { - return true, false, nil + return false, nil } if params.SectorNumber != sectorNumber { - return true, false, nil + return false, nil } - return false, true, nil + return true, nil default: - return true, false, nil + return false, nil } } @@ -395,13 +392,7 @@ func (c *ClientNodeAdapter) OnDealExpiredOrSlashed(ctx context.Context, dealID a } // Watch for state changes to the deal - preds := state.NewStatePredicates(c) - dealDiff := preds.OnStorageMarketActorChanged( - preds.OnDealStateChanged( - preds.DealStateChangedForIDs([]abi.DealID{dealID}))) - match := func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) { - return dealDiff(ctx, oldTs.Key(), newTs.Key()) - } + match := c.dsMatcher.matcher(ctx, dealID) // Wait until after the end epoch for the deal and then timeout timeout := (sd.Proposal.EndEpoch - head.Height()) + 1 @@ -412,7 +403,7 @@ func (c *ClientNodeAdapter) OnDealExpiredOrSlashed(ctx context.Context, dealID a return nil } -func (c *ClientNodeAdapter) SignProposal(ctx context.Context, signer address.Address, proposal market0.DealProposal) (*market0.ClientDealProposal, error) { +func (c *ClientNodeAdapter) SignProposal(ctx context.Context, signer address.Address, proposal market2.DealProposal) (*market2.ClientDealProposal, error) { // TODO: output spec signed proposal buf, err := cborutil.Dump(&proposal) if err != nil { @@ -431,7 +422,7 @@ func (c *ClientNodeAdapter) SignProposal(ctx context.Context, signer address.Add return nil, err } - return &market0.ClientDealProposal{ + return &market2.ClientDealProposal{ Proposal: proposal, ClientSignature: *sig, }, nil diff --git a/markets/storageadapter/dealstatematcher.go b/markets/storageadapter/dealstatematcher.go new file mode 100644 index 000000000..b8b47ef8e --- /dev/null +++ b/markets/storageadapter/dealstatematcher.go @@ -0,0 +1,84 @@ +package storageadapter + +import ( + "context" + "sync" + + "github.com/filecoin-project/go-state-types/abi" + actorsmarket "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/events" + "github.com/filecoin-project/lotus/chain/events/state" + "github.com/filecoin-project/lotus/chain/types" +) + +// dealStateMatcher caches the DealStates for the most recent +// old/new tipset combination +type dealStateMatcher struct { + preds *state.StatePredicates + + lk sync.Mutex + oldTsk types.TipSetKey + newTsk types.TipSetKey + oldDealStateRoot actorsmarket.DealStates + newDealStateRoot actorsmarket.DealStates +} + +func newDealStateMatcher(preds *state.StatePredicates) *dealStateMatcher { + return &dealStateMatcher{preds: preds} +} + +// matcher returns a function that checks if the state of the given dealID +// has changed. +// It caches the DealStates for the most recent old/new tipset combination. +func (mc *dealStateMatcher) matcher(ctx context.Context, dealID abi.DealID) events.StateMatchFunc { + // The function that is called to check if the deal state has changed for + // the target deal ID + dealStateChangedForID := mc.preds.DealStateChangedForIDs([]abi.DealID{dealID}) + + // The match function is called by the events API to check if there's + // been a state change for the deal with the target deal ID + match := func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) { + mc.lk.Lock() + defer mc.lk.Unlock() + + // Check if we've already fetched the DealStates for the given tipsets + if mc.oldTsk == oldTs.Key() && mc.newTsk == newTs.Key() { + // If we fetch the DealStates and there is no difference between + // them, they are stored as nil. So we can just bail out. + if mc.oldDealStateRoot == nil || mc.newDealStateRoot == nil { + return false, nil, nil + } + + // Check if the deal state has changed for the target ID + return dealStateChangedForID(ctx, mc.oldDealStateRoot, mc.newDealStateRoot) + } + + // We haven't already fetched the DealStates for the given tipsets, so + // do so now + + // Replace dealStateChangedForID with a function that records the + // DealStates so that we can cache them + var oldDealStateRootSaved, newDealStateRootSaved actorsmarket.DealStates + recorder := func(ctx context.Context, oldDealStateRoot, newDealStateRoot actorsmarket.DealStates) (changed bool, user state.UserData, err error) { + // Record DealStates + oldDealStateRootSaved = oldDealStateRoot + newDealStateRootSaved = newDealStateRoot + + return dealStateChangedForID(ctx, oldDealStateRoot, newDealStateRoot) + } + + // Call the match function + dealDiff := mc.preds.OnStorageMarketActorChanged( + mc.preds.OnDealStateChanged(recorder)) + matched, data, err := dealDiff(ctx, oldTs.Key(), newTs.Key()) + + // Save the recorded DealStates for the tipsets + mc.oldTsk = oldTs.Key() + mc.newTsk = newTs.Key() + mc.oldDealStateRoot = oldDealStateRootSaved + mc.newDealStateRoot = newDealStateRootSaved + + return matched, data, err + } + return match +} diff --git a/markets/storageadapter/dealstatematcher_test.go b/markets/storageadapter/dealstatematcher_test.go new file mode 100644 index 000000000..d0c5277d5 --- /dev/null +++ b/markets/storageadapter/dealstatematcher_test.go @@ -0,0 +1,157 @@ +package storageadapter + +import ( + "context" + "testing" + + "github.com/filecoin-project/lotus/chain/events" + "golang.org/x/sync/errgroup" + + cbornode "github.com/ipfs/go-ipld-cbor" + + adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + test "github.com/filecoin-project/lotus/chain/events/state/mock" + bstore "github.com/filecoin-project/lotus/lib/blockstore" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + + market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/lotus/chain/events/state" + "github.com/filecoin-project/lotus/chain/types" +) + +func TestDealStateMatcher(t *testing.T) { + ctx := context.Background() + bs := bstore.NewTemporarySync() + store := adt2.WrapStore(ctx, cbornode.NewCborStore(bs)) + + deal1 := &market2.DealState{ + SectorStartEpoch: 1, + LastUpdatedEpoch: 2, + } + deal2 := &market2.DealState{ + SectorStartEpoch: 4, + LastUpdatedEpoch: 5, + } + deal3 := &market2.DealState{ + SectorStartEpoch: 7, + LastUpdatedEpoch: 8, + } + deals1 := map[abi.DealID]*market2.DealState{ + abi.DealID(1): deal1, + } + deals2 := map[abi.DealID]*market2.DealState{ + abi.DealID(1): deal2, + } + deals3 := map[abi.DealID]*market2.DealState{ + abi.DealID(1): deal3, + } + + deal1StateC := createMarketState(ctx, t, store, deals1) + deal2StateC := createMarketState(ctx, t, store, deals2) + deal3StateC := createMarketState(ctx, t, store, deals3) + + minerAddr, err := address.NewFromString("t00") + require.NoError(t, err) + ts1, err := test.MockTipset(minerAddr, 1) + require.NoError(t, err) + ts2, err := test.MockTipset(minerAddr, 2) + require.NoError(t, err) + ts3, err := test.MockTipset(minerAddr, 3) + require.NoError(t, err) + + api := test.NewMockAPI(bs) + api.SetActor(ts1.Key(), &types.Actor{Code: builtin2.StorageMarketActorCodeID, Head: deal1StateC}) + api.SetActor(ts2.Key(), &types.Actor{Code: builtin2.StorageMarketActorCodeID, Head: deal2StateC}) + api.SetActor(ts3.Key(), &types.Actor{Code: builtin2.StorageMarketActorCodeID, Head: deal3StateC}) + + t.Run("caching", func(t *testing.T) { + dsm := newDealStateMatcher(state.NewStatePredicates(api)) + matcher := dsm.matcher(ctx, abi.DealID(1)) + + // Call matcher with tipsets that have the same state + ok, stateChange, err := matcher(ts1, ts1) + require.NoError(t, err) + require.False(t, ok) + require.Nil(t, stateChange) + // Should call StateGetActor once for each tipset + require.Equal(t, 2, api.StateGetActorCallCount()) + + // Call matcher with tipsets that have different state + api.ResetCallCounts() + ok, stateChange, err = matcher(ts1, ts2) + require.NoError(t, err) + require.True(t, ok) + require.NotNil(t, stateChange) + // Should call StateGetActor once for each tipset + require.Equal(t, 2, api.StateGetActorCallCount()) + + // Call matcher again with the same tipsets as above, should be cached + api.ResetCallCounts() + ok, stateChange, err = matcher(ts1, ts2) + require.NoError(t, err) + require.True(t, ok) + require.NotNil(t, stateChange) + // Should not call StateGetActor (because it should hit the cache) + require.Equal(t, 0, api.StateGetActorCallCount()) + + // Call matcher with different tipsets, should not be cached + api.ResetCallCounts() + ok, stateChange, err = matcher(ts2, ts3) + require.NoError(t, err) + require.True(t, ok) + require.NotNil(t, stateChange) + // Should call StateGetActor once for each tipset + require.Equal(t, 2, api.StateGetActorCallCount()) + }) + + t.Run("parallel", func(t *testing.T) { + api.ResetCallCounts() + dsm := newDealStateMatcher(state.NewStatePredicates(api)) + matcher := dsm.matcher(ctx, abi.DealID(1)) + + // Call matcher with lots of go-routines in parallel + var eg errgroup.Group + res := make([]struct { + ok bool + stateChange events.StateChange + }, 20) + for i := 0; i < len(res); i++ { + i := i + eg.Go(func() error { + ok, stateChange, err := matcher(ts1, ts2) + res[i].ok = ok + res[i].stateChange = stateChange + return err + }) + } + err := eg.Wait() + require.NoError(t, err) + + // All go-routines should have got the same (cached) result + for i := 1; i < len(res); i++ { + require.Equal(t, res[i].ok, res[i-1].ok) + require.Equal(t, res[i].stateChange, res[i-1].stateChange) + } + + // Only one go-routine should have called StateGetActor + // (once for each tipset) + require.Equal(t, 2, api.StateGetActorCallCount()) + }) +} + +func createMarketState(ctx context.Context, t *testing.T, store adt2.Store, deals map[abi.DealID]*market2.DealState) cid.Cid { + dealRootCid := test.CreateDealAMT(ctx, t, store, deals) + state := test.CreateEmptyMarketState(t, store) + state.States = dealRootCid + + stateC, err := store.Put(ctx, state) + require.NoError(t, err) + return stateC +} diff --git a/markets/storageadapter/provider.go b/markets/storageadapter/provider.go index b1071adcd..82c3a278f 100644 --- a/markets/storageadapter/provider.go +++ b/markets/storageadapter/provider.go @@ -12,8 +12,7 @@ import ( logging "github.com/ipfs/go-log/v2" "golang.org/x/xerrors" - builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" - market0 "github.com/filecoin-project/specs-actors/actors/builtin/market" + market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-fil-markets/shared" @@ -52,6 +51,7 @@ type ProviderNodeAdapter struct { ev *events.Events publishSpec, addBalanceSpec *api.MessageSendSpec + dsMatcher *dealStateMatcher } func NewProviderNodeAdapter(fc *config.MinerFeeConfig) func(dag dtypes.StagingDAG, secb *sectorblocks.SectorBlocks, full api.FullNode) storagemarket.StorageProviderNode { @@ -59,9 +59,10 @@ func NewProviderNodeAdapter(fc *config.MinerFeeConfig) func(dag dtypes.StagingDA na := &ProviderNodeAdapter{ FullNode: full, - dag: dag, - secb: secb, - ev: events.NewEvents(context.TODO(), full), + dag: dag, + secb: secb, + ev: events.NewEvents(context.TODO(), full), + dsMatcher: newDealStateMatcher(state.NewStatePredicates(full)), } if fc != nil { na.publishSpec = &api.MessageSendSpec{MaxFee: abi.TokenAmount(fc.MaxPublishDealsFee)} @@ -79,8 +80,8 @@ func (n *ProviderNodeAdapter) PublishDeals(ctx context.Context, deal storagemark return cid.Undef, err } - params, err := actors.SerializeParams(&market0.PublishStorageDealsParams{ - Deals: []market0.ClientDealProposal{deal.ClientDealProposal}, + params, err := actors.SerializeParams(&market2.PublishStorageDealsParams{ + Deals: []market2.ClientDealProposal{deal.ClientDealProposal}, }) if err != nil { @@ -92,7 +93,7 @@ func (n *ProviderNodeAdapter) PublishDeals(ctx context.Context, deal storagemark To: market.Address, From: mi.Worker, Value: types.NewInt(0), - Method: builtin0.MethodsMarket.PublishStorageDeals, + Method: market.Methods.PublishStorageDeals, Params: params, }, n.publishSpec) if err != nil { @@ -192,7 +193,7 @@ func (n *ProviderNodeAdapter) AddFunds(ctx context.Context, addr address.Address To: market.Address, From: addr, Value: amount, - Method: builtin0.MethodsMarket.AddBalance, + Method: market.Methods.AddBalance, }, n.addBalanceSpec) if err != nil { return cid.Undef, err @@ -308,44 +309,44 @@ func (n *ProviderNodeAdapter) OnDealSectorCommitted(ctx context.Context, provide var sectorNumber abi.SectorNumber var sectorFound bool - matchEvent := func(msg *types.Message) (matchOnce bool, matched bool, err error) { + matchEvent := func(msg *types.Message) (matched bool, err error) { if msg.To != provider { - return true, false, nil + return false, nil } switch msg.Method { - case builtin0.MethodsMiner.PreCommitSector: + case miner.Methods.PreCommitSector: var params miner.SectorPreCommitInfo if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { - return true, false, xerrors.Errorf("unmarshal pre commit: %w", err) + return false, xerrors.Errorf("unmarshal pre commit: %w", err) } for _, did := range params.DealIDs { if did == dealID { sectorNumber = params.SectorNumber sectorFound = true - return true, false, nil + return false, nil } } - return true, false, nil - case builtin0.MethodsMiner.ProveCommitSector: + return false, nil + case miner.Methods.ProveCommitSector: var params miner.ProveCommitSectorParams if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { - return true, false, xerrors.Errorf("failed to unmarshal prove commit sector params: %w", err) + return false, xerrors.Errorf("failed to unmarshal prove commit sector params: %w", err) } if !sectorFound { - return true, false, nil + return false, nil } if params.SectorNumber != sectorNumber { - return true, false, nil + return false, nil } - return false, true, nil + return true, nil default: - return true, false, nil + return false, nil } } @@ -462,13 +463,7 @@ func (n *ProviderNodeAdapter) OnDealExpiredOrSlashed(ctx context.Context, dealID } // Watch for state changes to the deal - preds := state.NewStatePredicates(n) - dealDiff := preds.OnStorageMarketActorChanged( - preds.OnDealStateChanged( - preds.DealStateChangedForIDs([]abi.DealID{dealID}))) - match := func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) { - return dealDiff(ctx, oldTs.Key(), newTs.Key()) - } + match := n.dsMatcher.matcher(ctx, dealID) // Wait until after the end epoch for the deal and then timeout timeout := (sd.Proposal.EndEpoch - head.Height()) + 1 diff --git a/metrics/metrics.go b/metrics/metrics.go index 5dd865263..33d9e9174 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -1,6 +1,7 @@ package metrics import ( + "context" "time" "go.opencensus.io/stats" @@ -24,6 +25,8 @@ var ( MessageTo, _ = tag.NewKey("message_to") MessageNonce, _ = tag.NewKey("message_nonce") ReceivedFrom, _ = tag.NewKey("received_from") + Endpoint, _ = tag.NewKey("endpoint") + APIInterface, _ = tag.NewKey("api") // to distinguish between gateway api and full node api endpoint calls ) // Measures @@ -49,6 +52,7 @@ var ( PubsubRecvRPC = stats.Int64("pubsub/recv_rpc", "Counter for total received RPCs", stats.UnitDimensionless) PubsubSendRPC = stats.Int64("pubsub/send_rpc", "Counter for total sent RPCs", stats.UnitDimensionless) PubsubDropRPC = stats.Int64("pubsub/drop_rpc", "Counter for total dropped RPCs", stats.UnitDimensionless) + APIRequestDuration = stats.Float64("api/request_duration_ms", "Duration of API requests", stats.UnitMilliseconds) ) var ( @@ -137,6 +141,11 @@ var ( Measure: PubsubDropRPC, Aggregation: view.Count(), } + APIRequestDurationView = &view.View{ + Measure: APIRequestDuration, + Aggregation: defaultMillisecondsDistribution, + TagKeys: []tag.Key{APIInterface, Endpoint}, + } ) // DefaultViews is an array of OpenCensus views for metric gathering purposes @@ -161,6 +170,7 @@ var DefaultViews = append([]*view.View{ PubsubRecvRPCView, PubsubSendRPCView, PubsubDropRPCView, + APIRequestDurationView, }, rpcmetrics.DefaultViews...) @@ -168,3 +178,12 @@ var DefaultViews = append([]*view.View{ func SinceInMilliseconds(startTime time.Time) float64 { return float64(time.Since(startTime).Nanoseconds()) / 1e6 } + +// Timer is a function stopwatch, calling it starts the timer, +// calling the returned function will record the duration. +func Timer(ctx context.Context, m *stats.Float64Measure) func() { + start := time.Now() + return func() { + stats.Record(ctx, m.M(SinceInMilliseconds(start))) + } +} diff --git a/metrics/proxy.go b/metrics/proxy.go new file mode 100644 index 000000000..f3714ec2e --- /dev/null +++ b/metrics/proxy.go @@ -0,0 +1,65 @@ +package metrics + +import ( + "context" + "reflect" + + "go.opencensus.io/tag" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/apistruct" +) + +func MetricedStorMinerAPI(a api.StorageMiner) api.StorageMiner { + var out apistruct.StorageMinerStruct + proxy(a, &out.Internal) + proxy(a, &out.CommonStruct.Internal) + return &out +} + +func MetricedFullAPI(a api.FullNode) api.FullNode { + var out apistruct.FullNodeStruct + proxy(a, &out.Internal) + proxy(a, &out.CommonStruct.Internal) + return &out +} + +func MetricedWorkerAPI(a api.WorkerAPI) api.WorkerAPI { + var out apistruct.WorkerStruct + proxy(a, &out.Internal) + return &out +} + +func MetricedWalletAPI(a api.WalletAPI) api.WalletAPI { + var out apistruct.WalletStruct + proxy(a, &out.Internal) + return &out +} + +func MetricedGatewayAPI(a api.GatewayAPI) api.GatewayAPI { + var out apistruct.GatewayStruct + proxy(a, &out.Internal) + return &out +} + +func proxy(in interface{}, out interface{}) { + rint := reflect.ValueOf(out).Elem() + ra := reflect.ValueOf(in) + + for f := 0; f < rint.NumField(); f++ { + field := rint.Type().Field(f) + fn := ra.MethodByName(field.Name) + + rint.Field(f).Set(reflect.MakeFunc(field.Type, func(args []reflect.Value) (results []reflect.Value) { + ctx := args[0].Interface().(context.Context) + // upsert function name into context + ctx, _ = tag.New(ctx, tag.Upsert(Endpoint, field.Name)) + stop := Timer(ctx, APIRequestDuration) + defer stop() + // pass tagged ctx back into function call + args[0] = reflect.ValueOf(ctx) + return fn.Call(args) + })) + + } +} diff --git a/miner/miner.go b/miner/miner.go index 73985a649..f2468a911 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -9,7 +9,7 @@ import ( "sync" "time" - proof0 "github.com/filecoin-project/specs-actors/actors/runtime/proof" + proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" "github.com/filecoin-project/lotus/chain/gen/slashfilter" @@ -280,7 +280,7 @@ minerLoop: m.minedBlockHeights.Add(blkKey, true) if err := m.api.SyncSubmitBlock(ctx, b); err != nil { - log.Errorf("failed to submit newly mined block: %s", err) + log.Errorf("failed to submit newly mined block: %+v", err) } } else { base.NullRounds++ @@ -386,7 +386,7 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (*types.BlockMsg, rbase = bvals[len(bvals)-1] } - ticket, err := m.computeTicket(ctx, &rbase, base) + ticket, err := m.computeTicket(ctx, &rbase, base, mbi) if err != nil { return nil, xerrors.Errorf("scratching ticket failed: %w", err) } @@ -456,16 +456,7 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (*types.BlockMsg, return b, nil } -func (m *Miner) computeTicket(ctx context.Context, brand *types.BeaconEntry, base *MiningBase) (*types.Ticket, error) { - mi, err := m.api.StateMinerInfo(ctx, m.address, types.EmptyTSK) - if err != nil { - return nil, err - } - worker, err := m.api.StateAccountKey(ctx, mi.Worker, types.EmptyTSK) - if err != nil { - return nil, err - } - +func (m *Miner) computeTicket(ctx context.Context, brand *types.BeaconEntry, base *MiningBase, mbi *api.MiningBaseInfo) (*types.Ticket, error) { buf := new(bytes.Buffer) if err := m.address.MarshalCBOR(buf); err != nil { return nil, xerrors.Errorf("failed to marshal address to cbor: %w", err) @@ -481,7 +472,7 @@ func (m *Miner) computeTicket(ctx context.Context, brand *types.BeaconEntry, bas return nil, err } - vrfOut, err := gen.ComputeVRF(ctx, m.api.WalletSign, worker, input) + vrfOut, err := gen.ComputeVRF(ctx, m.api.WalletSign, mbi.WorkerKey, input) if err != nil { return nil, err } @@ -492,7 +483,7 @@ func (m *Miner) computeTicket(ctx context.Context, brand *types.BeaconEntry, bas } func (m *Miner) createBlock(base *MiningBase, addr address.Address, ticket *types.Ticket, - eproof *types.ElectionProof, bvals []types.BeaconEntry, wpostProof []proof0.PoStProof, msgs []*types.SignedMessage) (*types.BlockMsg, error) { + eproof *types.ElectionProof, bvals []types.BeaconEntry, wpostProof []proof2.PoStProof, msgs []*types.SignedMessage) (*types.BlockMsg, error) { uts := base.TipSet.MinTimestamp() + build.BlockDelaySecs*(uint64(base.NullRounds)+1) nheight := base.TipSet.Height() + base.NullRounds + 1 diff --git a/node/builder.go b/node/builder.go index b91172386..05409df04 100644 --- a/node/builder.go +++ b/node/builder.go @@ -44,10 +44,12 @@ import ( "github.com/filecoin-project/lotus/chain/metrics" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" + ledgerwallet "github.com/filecoin-project/lotus/chain/wallet/ledger" "github.com/filecoin-project/lotus/chain/wallet/remotewallet" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" sealing "github.com/filecoin-project/lotus/extern/storage-sealing" "github.com/filecoin-project/lotus/journal" "github.com/filecoin-project/lotus/lib/blockstore" @@ -249,10 +251,9 @@ func Online() Option { Override(new(*store.ChainStore), modules.ChainStore), Override(new(stmgr.UpgradeSchedule), stmgr.DefaultUpgradeSchedule()), Override(new(*stmgr.StateManager), stmgr.NewStateManagerWithUpgradeSchedule), - Override(new(stmgr.StateManagerAPI), From(new(*stmgr.StateManager))), Override(new(*wallet.LocalWallet), wallet.NewWallet), - Override(new(api.WalletAPI), From(new(*wallet.LocalWallet))), Override(new(wallet.Default), From(new(*wallet.LocalWallet))), + Override(new(api.WalletAPI), From(new(wallet.MultiWallet))), Override(new(*messagesigner.MessageSigner), messagesigner.NewMessageSigner), Override(new(dtypes.ChainGCLocker), blockstore.NewGCLocker), @@ -267,6 +268,7 @@ func Online() Option { Override(new(*chain.Syncer), modules.NewSyncer), Override(new(exchange.Client), exchange.NewClient), Override(new(*messagepool.MessagePool), modules.MessagePool), + Override(new(dtypes.DefaultMaxFeeFunc), modules.NewDefaultMaxFeeFunc), Override(new(modules.Genesis), modules.ErrorGenesis), Override(new(dtypes.AfterGenesisSet), modules.SetGenesis), @@ -340,6 +342,7 @@ func Online() Option { Override(new(sectorstorage.SectorManager), From(new(*sectorstorage.Manager))), Override(new(storage2.Prover), From(new(sectorstorage.SectorManager))), + Override(new(storiface.WorkerReturn), From(new(sectorstorage.SectorManager))), Override(new(*sectorblocks.SectorBlocks), sectorblocks.NewSectorBlocks), Override(new(*storage.Miner), modules.StorageMiner(config.DefaultStorageMiner().Fees)), @@ -353,7 +356,8 @@ func Online() Option { Override(new(dtypes.ProviderDataTransfer), modules.NewProviderDAGServiceDataTransfer), Override(new(dtypes.ProviderPieceStore), modules.NewProviderPieceStore), Override(new(*storedask.StoredAsk), modules.NewStorageAsk), - Override(new(dtypes.DealFilter), modules.BasicDealFilter(nil)), + Override(new(dtypes.StorageDealFilter), modules.BasicDealFilter(nil)), + Override(new(dtypes.RetrievalDealFilter), modules.RetrievalDealFilter(nil)), Override(new(modules.ProviderDealFunds), modules.NewProviderDealFunds), Override(new(storagemarket.StorageProvider), modules.StorageProvider), Override(new(storagemarket.StorageProviderNode), storageadapter.NewProviderNodeAdapter(nil)), @@ -451,7 +455,7 @@ func ConfigFullNode(c interface{}) Option { return Options( ConfigCommon(&cfg.Common), If(cfg.Client.UseIpfs, - Override(new(dtypes.ClientBlockstore), modules.IpfsClientBlockstore(ipfsMaddr)), + Override(new(dtypes.ClientBlockstore), modules.IpfsClientBlockstore(ipfsMaddr, cfg.Client.IpfsOnlineMode)), If(cfg.Client.IpfsUseForRetrieval, Override(new(dtypes.ClientRetrievalStoreManager), modules.ClientBlockstoreRetrievalStoreManager), ), @@ -459,8 +463,16 @@ func ConfigFullNode(c interface{}) Option { If(cfg.Metrics.HeadNotifs, Override(HeadMetricsKey, metrics.SendHeadNotifs(cfg.Metrics.Nickname)), ), + If(cfg.Wallet.RemoteBackend != "", - Override(new(api.WalletAPI), remotewallet.SetupRemoteWallet(cfg.Wallet.RemoteBackend)), + Override(new(*remotewallet.RemoteWallet), remotewallet.SetupRemoteWallet(cfg.Wallet.RemoteBackend)), + ), + If(cfg.Wallet.EnableLedger, + Override(new(*ledgerwallet.LedgerWallet), ledgerwallet.NewWallet), + ), + If(cfg.Wallet.DisableLocal, + Unset(new(*wallet.LocalWallet)), + Override(new(wallet.Default), wallet.NilDefault), ), ) } @@ -475,7 +487,11 @@ func ConfigStorageMiner(c interface{}) Option { ConfigCommon(&cfg.Common), If(cfg.Dealmaking.Filter != "", - Override(new(dtypes.DealFilter), modules.BasicDealFilter(dealfilter.CliDealFilter(cfg.Dealmaking.Filter))), + Override(new(dtypes.StorageDealFilter), modules.BasicDealFilter(dealfilter.CliStorageDealFilter(cfg.Dealmaking.Filter))), + ), + + If(cfg.Dealmaking.RetrievalFilter != "", + Override(new(dtypes.RetrievalDealFilter), modules.RetrievalDealFilter(dealfilter.CliRetrievalDealFilter(cfg.Dealmaking.RetrievalFilter))), ), Override(new(storagemarket.StorageProviderNode), storageadapter.NewProviderNodeAdapter(&cfg.Fees)), diff --git a/node/config/def.go b/node/config/def.go index 63340cfd5..3e109e84a 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -23,6 +23,7 @@ type FullNode struct { Client Client Metrics Metrics Wallet Wallet + Fees FeeConfig } // // Common @@ -45,7 +46,8 @@ type DealmakingConfig struct { PieceCidBlocklist []cid.Cid ExpectedSealDuration Duration - Filter string + Filter string + RetrievalFilter string } type SealingConfig struct { @@ -104,12 +106,19 @@ type Metrics struct { type Client struct { UseIpfs bool + IpfsOnlineMode bool IpfsMAddr string IpfsUseForRetrieval bool } type Wallet struct { RemoteBackend string + EnableLedger bool + DisableLocal bool +} + +type FeeConfig struct { + DefaultMaxFee types.FIL } func defCommon() Common { @@ -139,10 +148,15 @@ func defCommon() Common { } +var DefaultDefaultMaxFee = types.MustParseFIL("0.007") + // DefaultFullNode returns the default config func DefaultFullNode() *FullNode { return &FullNode{ Common: defCommon(), + Fees: FeeConfig{ + DefaultMaxFee: DefaultDefaultMaxFee, + }, } } @@ -180,11 +194,11 @@ func DefaultStorageMiner() *StorageMiner { }, Fees: MinerFeeConfig{ - MaxPreCommitGasFee: types.FIL(types.BigDiv(types.FromFil(1), types.NewInt(20))), // 0.05 - MaxCommitGasFee: types.FIL(types.BigDiv(types.FromFil(1), types.NewInt(20))), - MaxWindowPoStGasFee: types.FIL(types.FromFil(50)), - MaxPublishDealsFee: types.FIL(types.BigDiv(types.FromFil(1), types.NewInt(33))), // 0.03ish - MaxMarketBalanceAddFee: types.FIL(types.BigDiv(types.FromFil(1), types.NewInt(100))), // 0.01 + MaxPreCommitGasFee: types.MustParseFIL("0.025"), + MaxCommitGasFee: types.MustParseFIL("0.05"), + MaxWindowPoStGasFee: types.MustParseFIL("5"), + MaxPublishDealsFee: types.MustParseFIL("0.05"), + MaxMarketBalanceAddFee: types.MustParseFIL("0.007"), }, } cfg.Common.API.ListenAddress = "/ip4/127.0.0.1/tcp/2345/http" diff --git a/node/impl/client/client.go b/node/impl/client/client.go index fe83fda7b..1e3374950 100644 --- a/node/impl/client/client.go +++ b/node/impl/client/client.go @@ -1,17 +1,17 @@ package client import ( + "bufio" "context" "fmt" "io" "os" - "github.com/filecoin-project/go-state-types/dline" - - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/filecoin-project/go-state-types/big" "golang.org/x/xerrors" + "github.com/filecoin-project/go-padreader" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/dline" "github.com/ipfs/go-blockservice" "github.com/ipfs/go-cid" "github.com/ipfs/go-cidutil" @@ -33,6 +33,7 @@ import ( "go.uber.org/fx" "github.com/filecoin-project/go-address" + datatransfer "github.com/filecoin-project/go-data-transfer" "github.com/filecoin-project/go-fil-markets/discovery" "github.com/filecoin-project/go-fil-markets/pieceio" "github.com/filecoin-project/go-fil-markets/retrievalmarket" @@ -40,7 +41,6 @@ import ( "github.com/filecoin-project/go-fil-markets/shared" "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-multistore" - "github.com/filecoin-project/go-padreader" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" @@ -50,6 +50,7 @@ import ( "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/lib/commp" "github.com/filecoin-project/lotus/markets/utils" "github.com/filecoin-project/lotus/node/impl/full" "github.com/filecoin-project/lotus/node/impl/paych" @@ -65,9 +66,9 @@ type API struct { fx.In full.ChainAPI - full.StateAPI full.WalletAPI paych.PaychAPI + full.StateAPI SMDealClient storagemarket.StorageClient RetDiscovery discovery.PeerResolver @@ -117,7 +118,7 @@ func (a *API) ClientStartDeal(ctx context.Context, params *api.StartDealParams) } } - walletKey, err := a.StateAPI.StateManager.ResolveToKeyAddress(ctx, params.Wallet, nil) + walletKey, err := a.StateAccountKey(ctx, params.Wallet, types.EmptyTSK) if err != nil { return nil, xerrors.Errorf("failed resolving params.Wallet addr: %w", params.Wallet) } @@ -709,6 +710,24 @@ func (a *API) ClientDealSize(ctx context.Context, root cid.Cid) (api.DataSize, e }, nil } +func (a *API) ClientDealPieceCID(ctx context.Context, root cid.Cid) (api.DataCIDSize, error) { + dag := merkledag.NewDAGService(blockservice.New(a.CombinedBstore, offline.Exchange(a.CombinedBstore))) + + w := &commp.Writer{} + bw := bufio.NewWriterSize(w, int(commp.CommPBuf)) + + err := car.WriteCar(ctx, dag, []cid.Cid{root}, w) + if err != nil { + return api.DataCIDSize{}, err + } + + if err := bw.Flush(); err != nil { + return api.DataCIDSize{}, err + } + + return w.Sum() +} + func (a *API) ClientGenCar(ctx context.Context, ref api.FileRef, outputPath string) error { id, st, err := a.imgr().NewStore() if err != nil { @@ -850,6 +869,22 @@ func (a *API) ClientDataTransferUpdates(ctx context.Context) (<-chan api.DataTra return channels, nil } +func (a *API) ClientRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error { + selfPeer := a.Host.ID() + if isInitiator { + return a.DataTransfer.RestartDataTransferChannel(ctx, datatransfer.ChannelID{Initiator: selfPeer, Responder: otherPeer, ID: transferID}) + } + return a.DataTransfer.RestartDataTransferChannel(ctx, datatransfer.ChannelID{Initiator: otherPeer, Responder: selfPeer, ID: transferID}) +} + +func (a *API) ClientCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error { + selfPeer := a.Host.ID() + if isInitiator { + return a.DataTransfer.CloseDataTransferChannel(ctx, datatransfer.ChannelID{Initiator: selfPeer, Responder: otherPeer, ID: transferID}) + } + return a.DataTransfer.CloseDataTransferChannel(ctx, datatransfer.ChannelID{Initiator: otherPeer, Responder: selfPeer, ID: transferID}) +} + func newDealInfo(v storagemarket.ClientDeal) api.DealInfo { return api.DealInfo{ ProposalCid: v.ProposalCid, @@ -870,3 +905,12 @@ func newDealInfo(v storagemarket.ClientDeal) api.DealInfo { func (a *API) ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error { return a.Retrieval.TryRestartInsufficientFunds(paymentChannel) } + +func (a *API) ClientGetDealStatus(ctx context.Context, statusCode uint64) (string, error) { + ststr, ok := storagemarket.DealStates[statusCode] + if !ok { + return "", fmt.Errorf("no such deal state %d", statusCode) + } + + return ststr, nil +} diff --git a/node/impl/client/client_test.go b/node/impl/client/client_test.go new file mode 100644 index 000000000..da13c8ef3 --- /dev/null +++ b/node/impl/client/client_test.go @@ -0,0 +1 @@ +package client diff --git a/node/impl/common/common.go b/node/impl/common/common.go index da7cfff25..79478e489 100644 --- a/node/impl/common/common.go +++ b/node/impl/common/common.go @@ -5,9 +5,9 @@ import ( "sort" "strings" - logging "github.com/ipfs/go-log/v2" - "github.com/gbrlsnchs/jwt/v3" + "github.com/google/uuid" + logging "github.com/ipfs/go-log/v2" "github.com/libp2p/go-libp2p-core/host" metrics "github.com/libp2p/go-libp2p-core/metrics" "github.com/libp2p/go-libp2p-core/network" @@ -27,6 +27,8 @@ import ( "github.com/filecoin-project/lotus/node/modules/lp2p" ) +var session = uuid.New() + type CommonAPI struct { fx.In @@ -202,6 +204,10 @@ func (a *CommonAPI) Shutdown(ctx context.Context) error { return nil } +func (a *CommonAPI) Session(ctx context.Context) (uuid.UUID, error) { + return session, nil +} + func (a *CommonAPI) Closing(ctx context.Context) (<-chan struct{}, error) { return make(chan struct{}), nil // relies on jsonrpc closing } diff --git a/node/impl/full/chain.go b/node/impl/full/chain.go index 5b4f41114..a3410b8db 100644 --- a/node/impl/full/chain.go +++ b/node/impl/full/chain.go @@ -40,9 +40,14 @@ import ( var log = logging.Logger("fullnode") type ChainModuleAPI interface { + ChainNotify(context.Context) (<-chan []*api.HeadChange, error) + ChainGetBlockMessages(context.Context, cid.Cid) (*api.BlockMessages, error) + ChainHasObj(context.Context, cid.Cid) (bool, error) ChainHead(context.Context) (*types.TipSet, error) + ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) + ChainReadObj(context.Context, cid.Cid) ([]byte, error) } // ChainModule provides a default implementation of ChainModuleAPI. @@ -65,8 +70,8 @@ type ChainAPI struct { Chain *store.ChainStore } -func (a *ChainAPI) ChainNotify(ctx context.Context) (<-chan []*api.HeadChange, error) { - return a.Chain.SubHeadChanges(ctx), nil +func (m *ChainModule) ChainNotify(ctx context.Context) (<-chan []*api.HeadChange, error) { + return m.Chain.SubHeadChanges(ctx), nil } func (m *ChainModule) ChainHead(context.Context) (*types.TipSet, error) { @@ -99,13 +104,13 @@ func (m *ChainModule) ChainGetTipSet(ctx context.Context, key types.TipSetKey) ( return m.Chain.LoadTipSet(key) } -func (a *ChainAPI) ChainGetBlockMessages(ctx context.Context, msg cid.Cid) (*api.BlockMessages, error) { - b, err := a.Chain.GetBlock(msg) +func (m *ChainModule) ChainGetBlockMessages(ctx context.Context, msg cid.Cid) (*api.BlockMessages, error) { + b, err := m.Chain.GetBlock(msg) if err != nil { return nil, err } - bmsgs, smsgs, err := a.Chain.MessagesForBlock(b) + bmsgs, smsgs, err := m.Chain.MessagesForBlock(b) if err != nil { return nil, err } @@ -206,8 +211,8 @@ func (m *ChainModule) ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpo return m.Chain.GetTipsetByHeight(ctx, h, ts, true) } -func (a *ChainAPI) ChainReadObj(ctx context.Context, obj cid.Cid) ([]byte, error) { - blk, err := a.Chain.Blockstore().Get(obj) +func (m *ChainModule) ChainReadObj(ctx context.Context, obj cid.Cid) ([]byte, error) { + blk, err := m.Chain.Blockstore().Get(obj) if err != nil { return nil, xerrors.Errorf("blockstore get: %w", err) } @@ -219,8 +224,8 @@ func (a *ChainAPI) ChainDeleteObj(ctx context.Context, obj cid.Cid) error { return a.Chain.Blockstore().DeleteBlock(obj) } -func (a *ChainAPI) ChainHasObj(ctx context.Context, obj cid.Cid) (bool, error) { - return a.Chain.Blockstore().Has(obj) +func (m *ChainModule) ChainHasObj(ctx context.Context, obj cid.Cid) (bool, error) { + return m.Chain.Blockstore().Has(obj) } func (a *ChainAPI) ChainStatObj(ctx context.Context, obj cid.Cid, base cid.Cid) (api.ObjStat, error) { @@ -530,8 +535,8 @@ func (a *ChainAPI) ChainGetNode(ctx context.Context, p string) (*api.IpldObject, }, nil } -func (a *ChainAPI) ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) { - cm, err := a.Chain.GetCMessage(mc) +func (m *ChainModule) ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) { + cm, err := m.Chain.GetCMessage(mc) if err != nil { return nil, err } diff --git a/node/impl/full/gas.go b/node/impl/full/gas.go index 0cb1eb084..5d21121ee 100644 --- a/node/impl/full/gas.go +++ b/node/impl/full/gas.go @@ -7,6 +7,7 @@ import ( "sort" "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/builtin/paych" "go.uber.org/fx" "golang.org/x/xerrors" @@ -16,14 +17,13 @@ import ( "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/exitcode" - builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/node/modules/dtypes" ) type GasModuleAPI interface { @@ -35,9 +35,10 @@ type GasModuleAPI interface { // Injection (for example with a thin RPC client). type GasModule struct { fx.In - Stmgr *stmgr.StateManager - Chain *store.ChainStore - Mpool *messagepool.MessagePool + Stmgr *stmgr.StateManager + Chain *store.ChainStore + Mpool *messagepool.MessagePool + GetMaxFee dtypes.DefaultMaxFeeFunc } var _ GasModuleAPI = (*GasModule)(nil) @@ -259,7 +260,7 @@ func gasEstimateGasLimit( if !builtin.IsPaymentChannelActor(act.Code) { return res.MsgRct.GasUsed, nil } - if msgIn.Method != builtin0.MethodsPaych.Collect { + if msgIn.Method != paych.Methods.Collect { return res.MsgRct.GasUsed, nil } @@ -277,7 +278,7 @@ func (m *GasModule) GasEstimateMessageGas(ctx context.Context, msg *types.Messag } if msg.GasPremium == types.EmptyInt || types.BigCmp(msg.GasPremium, types.NewInt(0)) == 0 { - gasPremium, err := m.GasEstimateGasPremium(ctx, 2, msg.From, msg.GasLimit, types.TipSetKey{}) + gasPremium, err := m.GasEstimateGasPremium(ctx, 10, msg.From, msg.GasLimit, types.TipSetKey{}) if err != nil { return nil, xerrors.Errorf("estimating gas price: %w", err) } @@ -292,7 +293,7 @@ func (m *GasModule) GasEstimateMessageGas(ctx context.Context, msg *types.Messag msg.GasFeeCap = feeCap } - messagepool.CapGasFee(msg, spec.Get().MaxFee) + messagepool.CapGasFee(m.GetMaxFee, msg, spec.Get().MaxFee) return msg, nil } diff --git a/node/impl/full/mpool.go b/node/impl/full/mpool.go index 8ad209f3f..b1e9f94f9 100644 --- a/node/impl/full/mpool.go +++ b/node/impl/full/mpool.go @@ -187,6 +187,42 @@ func (a *MpoolAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spe }) } +func (a *MpoolAPI) MpoolBatchPush(ctx context.Context, smsgs []*types.SignedMessage) ([]cid.Cid, error) { + var messageCids []cid.Cid + for _, smsg := range smsgs { + smsgCid, err := a.Mpool.Push(smsg) + if err != nil { + return messageCids, err + } + messageCids = append(messageCids, smsgCid) + } + return messageCids, nil +} + +func (a *MpoolAPI) MpoolBatchPushUntrusted(ctx context.Context, smsgs []*types.SignedMessage) ([]cid.Cid, error) { + var messageCids []cid.Cid + for _, smsg := range smsgs { + smsgCid, err := a.Mpool.PushUntrusted(smsg) + if err != nil { + return messageCids, err + } + messageCids = append(messageCids, smsgCid) + } + return messageCids, nil +} + +func (a *MpoolAPI) MpoolBatchPushMessage(ctx context.Context, msgs []*types.Message, spec *api.MessageSendSpec) ([]*types.SignedMessage, error) { + var smsgs []*types.SignedMessage + for _, msg := range msgs { + smsg, err := a.MpoolPushMessage(ctx, msg, spec) + if err != nil { + return smsgs, err + } + smsgs = append(smsgs, smsg) + } + return smsgs, nil +} + func (a *MpoolAPI) MpoolGetNonce(ctx context.Context, addr address.Address) (uint64, error) { return a.Mpool.GetNonce(addr) } diff --git a/node/impl/full/multisig.go b/node/impl/full/multisig.go index d840620eb..9c5f683c4 100644 --- a/node/impl/full/multisig.go +++ b/node/impl/full/multisig.go @@ -12,8 +12,7 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin/multisig" "github.com/filecoin-project/lotus/chain/types" - builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" - multisig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" + multisig2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig" "github.com/ipfs/go-cid" "go.uber.org/fx" @@ -85,7 +84,7 @@ func (a *MsigAPI) MsigAddPropose(ctx context.Context, msig address.Address, src return cid.Undef, actErr } - return a.MsigPropose(ctx, msig, msig, big.Zero(), src, uint64(builtin0.MethodsMultisig.AddSigner), enc) + return a.MsigPropose(ctx, msig, msig, big.Zero(), src, uint64(multisig.Methods.AddSigner), enc) } func (a *MsigAPI) MsigAddApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, newAdd address.Address, inc bool) (cid.Cid, error) { @@ -94,7 +93,7 @@ func (a *MsigAPI) MsigAddApprove(ctx context.Context, msig address.Address, src return cid.Undef, actErr } - return a.MsigApproveTxnHash(ctx, msig, txID, proposer, msig, big.Zero(), src, uint64(builtin0.MethodsMultisig.AddSigner), enc) + return a.MsigApproveTxnHash(ctx, msig, txID, proposer, msig, big.Zero(), src, uint64(multisig.Methods.AddSigner), enc) } func (a *MsigAPI) MsigAddCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, newAdd address.Address, inc bool) (cid.Cid, error) { @@ -103,7 +102,7 @@ func (a *MsigAPI) MsigAddCancel(ctx context.Context, msig address.Address, src a return cid.Undef, actErr } - return a.MsigCancel(ctx, msig, txID, msig, big.Zero(), src, uint64(builtin0.MethodsMultisig.AddSigner), enc) + return a.MsigCancel(ctx, msig, txID, msig, big.Zero(), src, uint64(multisig.Methods.AddSigner), enc) } func (a *MsigAPI) MsigSwapPropose(ctx context.Context, msig address.Address, src address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) { @@ -112,7 +111,7 @@ func (a *MsigAPI) MsigSwapPropose(ctx context.Context, msig address.Address, src return cid.Undef, actErr } - return a.MsigPropose(ctx, msig, msig, big.Zero(), src, uint64(builtin0.MethodsMultisig.SwapSigner), enc) + return a.MsigPropose(ctx, msig, msig, big.Zero(), src, uint64(multisig.Methods.SwapSigner), enc) } func (a *MsigAPI) MsigSwapApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) { @@ -121,7 +120,7 @@ func (a *MsigAPI) MsigSwapApprove(ctx context.Context, msig address.Address, src return cid.Undef, actErr } - return a.MsigApproveTxnHash(ctx, msig, txID, proposer, msig, big.Zero(), src, uint64(builtin0.MethodsMultisig.SwapSigner), enc) + return a.MsigApproveTxnHash(ctx, msig, txID, proposer, msig, big.Zero(), src, uint64(multisig.Methods.SwapSigner), enc) } func (a *MsigAPI) MsigSwapCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) { @@ -130,7 +129,7 @@ func (a *MsigAPI) MsigSwapCancel(ctx context.Context, msig address.Address, src return cid.Undef, actErr } - return a.MsigCancel(ctx, msig, txID, msig, big.Zero(), src, uint64(builtin0.MethodsMultisig.SwapSigner), enc) + return a.MsigCancel(ctx, msig, txID, msig, big.Zero(), src, uint64(multisig.Methods.SwapSigner), enc) } func (a *MsigAPI) MsigApprove(ctx context.Context, msig address.Address, txID uint64, src address.Address) (cid.Cid, error) { @@ -151,7 +150,7 @@ func (a *MsigAPI) MsigRemoveSigner(ctx context.Context, msig address.Address, pr return cid.Undef, actErr } - return a.MsigPropose(ctx, msig, msig, types.NewInt(0), proposer, uint64(builtin0.MethodsMultisig.RemoveSigner), enc) + return a.MsigPropose(ctx, msig, msig, types.NewInt(0), proposer, uint64(multisig.Methods.RemoveSigner), enc) } func (a *MsigAPI) msigApproveOrCancelSimple(ctx context.Context, operation api.MsigProposeResponse, msig address.Address, txID uint64, src address.Address) (cid.Cid, error) { @@ -242,7 +241,7 @@ func (a *MsigAPI) msigApproveOrCancelTxnHash(ctx context.Context, operation api. } func serializeAddParams(new address.Address, inc bool) ([]byte, error) { - enc, actErr := actors.SerializeParams(&multisig0.AddSignerParams{ + enc, actErr := actors.SerializeParams(&multisig2.AddSignerParams{ Signer: new, Increase: inc, }) @@ -254,7 +253,7 @@ func serializeAddParams(new address.Address, inc bool) ([]byte, error) { } func serializeSwapParams(old address.Address, new address.Address) ([]byte, error) { - enc, actErr := actors.SerializeParams(&multisig0.SwapSignerParams{ + enc, actErr := actors.SerializeParams(&multisig2.SwapSignerParams{ From: old, To: new, }) @@ -266,7 +265,7 @@ func serializeSwapParams(old address.Address, new address.Address) ([]byte, erro } func serializeRemoveParams(rem address.Address, dec bool) ([]byte, error) { - enc, actErr := actors.SerializeParams(&multisig0.RemoveSignerParams{ + enc, actErr := actors.SerializeParams(&multisig2.RemoveSignerParams{ Signer: rem, Decrease: dec, }) diff --git a/node/impl/full/state.go b/node/impl/full/state.go index db91433aa..126ff0d7b 100644 --- a/node/impl/full/state.go +++ b/node/impl/full/state.go @@ -40,12 +40,22 @@ import ( ) type StateModuleAPI interface { - StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) - StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) - StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) - StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error) MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) + StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) + StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) + StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) + StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error) + StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) + StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) + StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error) + StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error) + StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error) + StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error) + StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) + StateNetworkVersion(ctx context.Context, key types.TipSetKey) (network.Version, error) + StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) + StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error) } // StateModule provides a default implementation of StateModuleAPI. @@ -112,13 +122,13 @@ func (a *StateAPI) StateMinerActiveSectors(ctx context.Context, maddr address.Ad return stmgr.GetMinerSectorSet(ctx, a.StateManager, ts, maddr, &activeSectors) } -func (a *StateAPI) StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error) { - act, err := a.StateManager.LoadActorTsk(ctx, actor, tsk) +func (m *StateModule) StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error) { + act, err := m.StateManager.LoadActorTsk(ctx, actor, tsk) if err != nil { return miner.MinerInfo{}, xerrors.Errorf("failed to load miner actor: %w", err) } - mas, err := miner.Load(a.StateManager.ChainStore().Store(ctx), act) + mas, err := miner.Load(m.StateManager.ChainStore().Store(ctx), act) if err != nil { return miner.MinerInfo{}, xerrors.Errorf("failed to load miner actor state: %w", err) } @@ -215,18 +225,18 @@ func (a *StateAPI) StateMinerPartitions(ctx context.Context, m address.Address, return out, err } -func (a *StateAPI) StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error) { - ts, err := a.StateManager.ChainStore().GetTipSetFromKey(tsk) +func (m *StateModule) StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error) { + ts, err := m.Chain.GetTipSetFromKey(tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } - act, err := a.StateManager.LoadActor(ctx, addr, ts) + act, err := m.StateManager.LoadActor(ctx, addr, ts) if err != nil { return nil, xerrors.Errorf("failed to load miner actor: %w", err) } - mas, err := miner.Load(a.StateManager.ChainStore().Store(ctx), act) + mas, err := miner.Load(m.StateManager.ChainStore().Store(ctx), act) if err != nil { return nil, xerrors.Errorf("failed to load miner actor state: %w", err) } @@ -310,19 +320,19 @@ func (a *StateAPI) StateMinerRecoveries(ctx context.Context, addr address.Addres return miner.AllPartSectors(mas, miner.Partition.RecoveringSectors) } -func (a *StateAPI) StateMinerPower(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*api.MinerPower, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) +func (m *StateModule) StateMinerPower(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*api.MinerPower, error) { + ts, err := m.Chain.GetTipSetFromKey(tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } - m, net, hmp, err := stmgr.GetPower(ctx, a.StateManager, ts, addr) + mp, net, hmp, err := stmgr.GetPower(ctx, m.StateManager, ts, addr) if err != nil { return nil, err } return &api.MinerPower{ - MinerPower: m, + MinerPower: mp, TotalPower: net, HasMinPower: hmp, }, nil @@ -347,11 +357,37 @@ func (a *StateAPI) StateCall(ctx context.Context, msg *types.Message, tsk types. } func (a *StateAPI) StateReplay(ctx context.Context, tsk types.TipSetKey, mc cid.Cid) (*api.InvocResult, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) - if err != nil { - return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) + msgToReplay := mc + var ts *types.TipSet + var err error + if tsk == types.EmptyTSK { + mlkp, err := a.StateSearchMsg(ctx, mc) + if err != nil { + return nil, xerrors.Errorf("searching for msg %s: %w", mc, err) + } + if mlkp == nil { + return nil, xerrors.Errorf("didn't find msg %s", mc) + } + + msgToReplay = mlkp.Message + + executionTs, err := a.Chain.GetTipSetFromKey(mlkp.TipSet) + if err != nil { + return nil, xerrors.Errorf("loading tipset %s: %w", mlkp.TipSet, err) + } + + ts, err = a.Chain.LoadTipSet(executionTs.Parents()) + if err != nil { + return nil, xerrors.Errorf("loading parent tipset %s: %w", mlkp.TipSet, err) + } + } else { + ts, err = a.Chain.LoadTipSet(tsk) + if err != nil { + return nil, xerrors.Errorf("loading specified tipset %s: %w", tsk, err) + } } - m, r, err := a.StateManager.Replay(ctx, ts, mc) + + m, r, err := a.StateManager.Replay(ctx, ts, msgToReplay) if err != nil { return nil, err } @@ -362,8 +398,10 @@ func (a *StateAPI) StateReplay(ctx context.Context, tsk types.TipSetKey, mc cid. } return &api.InvocResult{ + MsgCid: msgToReplay, Msg: m, MsgRct: &r.MessageReceipt, + GasCost: stmgr.MakeMsgGasCost(m, r), ExecutionTrace: r.ExecutionTrace, Error: errstr, Duration: r.Duration, @@ -453,6 +491,24 @@ func (a *StateAPI) StateReadState(ctx context.Context, actor address.Address, ts }, nil } +func (a *StateAPI) StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error) { + act, err := a.StateGetActor(ctx, toAddr, tsk) + if err != nil { + return nil, xerrors.Errorf("getting actor: %w", err) + } + + paramType, err := stmgr.GetParamType(act.Code, method) + if err != nil { + return nil, xerrors.Errorf("getting params type: %w", err) + } + + if err = paramType.UnmarshalCBOR(bytes.NewReader(params)); err != nil { + return nil, err + } + + return paramType, nil +} + // This is on StateAPI because miner.Miner requires this, and MinerAPI requires miner.Miner func (a *StateAPI) MinerGetBaseInfo(ctx context.Context, maddr address.Address, epoch abi.ChainEpoch, tsk types.TipSetKey) (*api.MiningBaseInfo, error) { return stmgr.MinerGetBaseInfo(ctx, a.StateManager, a.Beacon, tsk, epoch, maddr, a.ProofVerifier) @@ -535,20 +591,20 @@ func (a *StateAPI) StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLoo return nil, nil } -func (a *StateAPI) StateGetReceipt(ctx context.Context, msg cid.Cid, tsk types.TipSetKey) (*types.MessageReceipt, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) +func (m *StateModule) StateGetReceipt(ctx context.Context, msg cid.Cid, tsk types.TipSetKey) (*types.MessageReceipt, error) { + ts, err := m.Chain.GetTipSetFromKey(tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } - return a.StateManager.GetReceipt(ctx, msg, ts) + return m.StateManager.GetReceipt(ctx, msg, ts) } -func (a *StateAPI) StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) +func (m *StateModule) StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) { + ts, err := m.Chain.GetTipSetFromKey(tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } - return stmgr.ListMinerActors(ctx, a.StateManager, ts) + return stmgr.ListMinerActors(ctx, m.StateManager, ts) } func (a *StateAPI) StateListActors(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) { @@ -559,12 +615,12 @@ func (a *StateAPI) StateListActors(ctx context.Context, tsk types.TipSetKey) ([] return a.StateManager.ListAllActors(ctx, ts) } -func (a *StateAPI) StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) +func (m *StateModule) StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error) { + ts, err := m.Chain.GetTipSetFromKey(tsk) if err != nil { return api.MarketBalance{}, xerrors.Errorf("loading tipset %s: %w", tsk, err) } - return a.StateManager.MarketBalance(ctx, addr, ts) + return m.StateManager.MarketBalance(ctx, addr, ts) } func (a *StateAPI) StateMarketParticipants(ctx context.Context, tsk types.TipSetKey) (map[string]api.MarketBalance, error) { @@ -648,12 +704,12 @@ func (a *StateAPI) StateMarketDeals(ctx context.Context, tsk types.TipSetKey) (m return out, nil } -func (a *StateAPI) StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) +func (m *StateModule) StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error) { + ts, err := m.Chain.GetTipSetFromKey(tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } - return stmgr.GetStorageDeal(ctx, a.StateManager, dealId, ts) + return stmgr.GetStorageDeal(ctx, m.StateManager, dealId, ts) } func (a *StateAPI) StateChangedActors(ctx context.Context, old cid.Cid, new cid.Cid) (map[string]types.Actor, error) { @@ -759,7 +815,7 @@ func (a *StateAPI) StateSectorPartition(ctx context.Context, maddr address.Addre return mas.FindSector(sectorNumber) } -func (a *StateAPI) StateListMessages(ctx context.Context, match *types.Message, tsk types.TipSetKey, toheight abi.ChainEpoch) ([]cid.Cid, error) { +func (a *StateAPI) StateListMessages(ctx context.Context, match *api.MessageMatch, tsk types.TipSetKey, toheight abi.ChainEpoch) ([]cid.Cid, error) { ts, err := a.Chain.GetTipSetFromKey(tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) @@ -1049,7 +1105,7 @@ func (a *StateAPI) StateMinerInitialPledgeCollateral(ctx context.Context, maddr return types.EmptyInt, xerrors.Errorf("loading reward actor state: %w", err) } - circSupply, err := a.StateCirculatingSupply(ctx, ts.Key()) + circSupply, err := a.StateVMCirculatingSupplyInternal(ctx, ts.Key()) if err != nil { return big.Zero(), xerrors.Errorf("getting circulating supply: %w", err) } @@ -1096,6 +1152,25 @@ func (a *StateAPI) StateMinerAvailableBalance(ctx context.Context, maddr address return types.BigAdd(abal, vested), nil } +func (a *StateAPI) StateMinerSectorAllocated(ctx context.Context, maddr address.Address, s abi.SectorNumber, tsk types.TipSetKey) (bool, error) { + ts, err := a.Chain.GetTipSetFromKey(tsk) + if err != nil { + return false, xerrors.Errorf("loading tipset %s: %w", tsk, err) + } + + act, err := a.StateManager.LoadActor(ctx, maddr, ts) + if err != nil { + return false, xerrors.Errorf("failed to load miner actor: %w", err) + } + + mas, err := miner.Load(a.StateManager.ChainStore().Store(ctx), act) + if err != nil { + return false, xerrors.Errorf("failed to load miner actor state: %w", err) + } + + return mas.IsAllocated(s) +} + // StateVerifiedClientStatus returns the data cap for the given address. // Returns zero if there is no entry in the data cap table for the // address. @@ -1130,19 +1205,19 @@ func (a *StateAPI) StateVerifierStatus(ctx context.Context, addr address.Address // StateVerifiedClientStatus returns the data cap for the given address. // Returns zero if there is no entry in the data cap table for the // address. -func (a *StateAPI) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) { - act, err := a.StateGetActor(ctx, verifreg.Address, tsk) +func (m *StateModule) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) { + act, err := m.StateGetActor(ctx, verifreg.Address, tsk) if err != nil { return nil, err } - aid, err := a.StateLookupID(ctx, addr, tsk) + aid, err := m.StateLookupID(ctx, addr, tsk) if err != nil { log.Warnf("lookup failure %v", err) return nil, err } - vrs, err := verifreg.Load(a.StateManager.ChainStore().Store(ctx), act) + vrs, err := verifreg.Load(m.StateManager.ChainStore().Store(ctx), act) if err != nil { return nil, xerrors.Errorf("failed to load verified registry state: %w", err) } @@ -1177,33 +1252,33 @@ var dealProviderCollateralDen = types.NewInt(100) // StateDealProviderCollateralBounds returns the min and max collateral a storage provider // can issue. It takes the deal size and verified status as parameters. -func (a *StateAPI) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) +func (m *StateModule) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) { + ts, err := m.Chain.GetTipSetFromKey(tsk) if err != nil { return api.DealCollateralBounds{}, xerrors.Errorf("loading tipset %s: %w", tsk, err) } - pact, err := a.StateGetActor(ctx, power.Address, tsk) + pact, err := m.StateGetActor(ctx, power.Address, tsk) if err != nil { return api.DealCollateralBounds{}, xerrors.Errorf("failed to load power actor: %w", err) } - ract, err := a.StateGetActor(ctx, reward.Address, tsk) + ract, err := m.StateGetActor(ctx, reward.Address, tsk) if err != nil { return api.DealCollateralBounds{}, xerrors.Errorf("failed to load reward actor: %w", err) } - pst, err := power.Load(a.StateManager.ChainStore().Store(ctx), pact) + pst, err := power.Load(m.StateManager.ChainStore().Store(ctx), pact) if err != nil { return api.DealCollateralBounds{}, xerrors.Errorf("failed to load power actor state: %w", err) } - rst, err := reward.Load(a.StateManager.ChainStore().Store(ctx), ract) + rst, err := reward.Load(m.StateManager.ChainStore().Store(ctx), ract) if err != nil { return api.DealCollateralBounds{}, xerrors.Errorf("failed to load reward actor state: %w", err) } - circ, err := a.StateCirculatingSupply(ctx, ts.Key()) + circ, err := stateVMCirculatingSupplyInternal(ctx, ts.Key(), m.Chain, m.StateManager) if err != nil { return api.DealCollateralBounds{}, xerrors.Errorf("getting total circulating supply: %w", err) } @@ -1224,80 +1299,53 @@ func (a *StateAPI) StateDealProviderCollateralBounds(ctx context.Context, size a powClaim.QualityAdjPower, rewPow, circ.FilCirculating, - a.StateManager.GetNtwkVersion(ctx, ts.Height())) + m.StateManager.GetNtwkVersion(ctx, ts.Height())) return api.DealCollateralBounds{ Min: types.BigDiv(types.BigMul(min, dealProviderCollateralNum), dealProviderCollateralDen), Max: max, }, nil } -func (a *StateAPI) StateCirculatingSupply(ctx context.Context, tsk types.TipSetKey) (api.CirculatingSupply, error) { +func (a *StateAPI) StateCirculatingSupply(ctx context.Context, tsk types.TipSetKey) (abi.TokenAmount, error) { ts, err := a.Chain.GetTipSetFromKey(tsk) if err != nil { - return api.CirculatingSupply{}, xerrors.Errorf("loading tipset %s: %w", tsk, err) + return types.EmptyInt, xerrors.Errorf("loading tipset %s: %w", tsk, err) } sTree, err := a.stateForTs(ctx, ts) if err != nil { - return api.CirculatingSupply{}, err + return types.EmptyInt, err } - return a.StateManager.GetCirculatingSupplyDetailed(ctx, ts.Height(), sTree) + return a.StateManager.GetCirculatingSupply(ctx, ts.Height(), sTree) } -func (a *StateAPI) StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (network.Version, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) +func (a *StateAPI) StateVMCirculatingSupplyInternal(ctx context.Context, tsk types.TipSetKey) (api.CirculatingSupply, error) { + return stateVMCirculatingSupplyInternal(ctx, tsk, a.Chain, a.StateManager) +} +func stateVMCirculatingSupplyInternal( + ctx context.Context, + tsk types.TipSetKey, + cstore *store.ChainStore, + smgr *stmgr.StateManager, +) (api.CirculatingSupply, error) { + ts, err := cstore.GetTipSetFromKey(tsk) + if err != nil { + return api.CirculatingSupply{}, xerrors.Errorf("loading tipset %s: %w", tsk, err) + } + + sTree, err := stateForTs(ctx, ts, cstore, smgr) + if err != nil { + return api.CirculatingSupply{}, err + } + + return smgr.GetVMCirculatingSupplyDetailed(ctx, ts.Height(), sTree) +} + +func (m *StateModule) StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (network.Version, error) { + ts, err := m.Chain.GetTipSetFromKey(tsk) if err != nil { return network.VersionMax, xerrors.Errorf("loading tipset %s: %w", tsk, err) } - return a.StateManager.GetNtwkVersion(ctx, ts.Height()), nil -} - -func (a *StateAPI) StateMsgGasCost(ctx context.Context, inputMsg cid.Cid, tsk types.TipSetKey) (*api.MsgGasCost, error) { - var msg cid.Cid - var ts *types.TipSet - var err error - if tsk != types.EmptyTSK { - msg = inputMsg - ts, err = a.Chain.LoadTipSet(tsk) - if err != nil { - return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) - } - } else { - mlkp, err := a.StateSearchMsg(ctx, inputMsg) - if err != nil { - return nil, xerrors.Errorf("searching for msg %s: %w", inputMsg, err) - } - if mlkp == nil { - return nil, xerrors.Errorf("didn't find msg %s", inputMsg) - } - - executionTs, err := a.Chain.GetTipSetFromKey(mlkp.TipSet) - if err != nil { - return nil, xerrors.Errorf("loading tipset %s: %w", mlkp.TipSet, err) - } - - ts, err = a.Chain.LoadTipSet(executionTs.Parents()) - if err != nil { - return nil, xerrors.Errorf("loading parent tipset %s: %w", mlkp.TipSet, err) - } - - msg = mlkp.Message - } - - m, r, err := a.StateManager.Replay(ctx, ts, msg) - if err != nil { - return nil, err - } - - return &api.MsgGasCost{ - Message: msg, - GasUsed: big.NewInt(r.GasUsed), - BaseFeeBurn: r.GasCosts.BaseFeeBurn, - OverEstimationBurn: r.GasCosts.OverEstimationBurn, - MinerPenalty: r.GasCosts.MinerPenalty, - MinerTip: r.GasCosts.MinerTip, - Refund: r.GasCosts.Refund, - TotalCost: big.Sub(m.RequiredFunds(), r.GasCosts.Refund), - }, nil + return m.StateManager.GetNtwkVersion(ctx, ts.Height()), nil } diff --git a/node/impl/paych/paych.go b/node/impl/paych/paych.go index af0a1db15..773a5efab 100644 --- a/node/impl/paych/paych.go +++ b/node/impl/paych/paych.go @@ -13,17 +13,12 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/builtin/paych" "github.com/filecoin-project/lotus/chain/types" - full "github.com/filecoin-project/lotus/node/impl/full" "github.com/filecoin-project/lotus/paychmgr" ) type PaychAPI struct { fx.In - full.MpoolAPI - full.WalletAPI - full.ChainAPI - PaychMgr *paychmgr.Manager } diff --git a/node/impl/storminer.go b/node/impl/storminer.go index 6090e8a58..a58621c97 100644 --- a/node/impl/storminer.go +++ b/node/impl/storminer.go @@ -8,8 +8,10 @@ import ( "strconv" "time" + "github.com/google/uuid" "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p-core/host" + "github.com/libp2p/go-libp2p-core/peer" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" @@ -53,6 +55,7 @@ type StorageMinerAPI struct { StorageMgr *sectorstorage.Manager `optional:"true"` IStorageMgr sectorstorage.SectorManager *stores.Index + storiface.WorkerReturn DataTransfer dtypes.ProviderDataTransfer Host host.Host @@ -84,11 +87,11 @@ func (sm *StorageMinerAPI) ServeRemote(w http.ResponseWriter, r *http.Request) { sm.StorageMgr.ServeHTTP(w, r) } -func (sm *StorageMinerAPI) WorkerStats(context.Context) (map[uint64]storiface.WorkerStats, error) { +func (sm *StorageMinerAPI) WorkerStats(context.Context) (map[uuid.UUID]storiface.WorkerStats, error) { return sm.StorageMgr.WorkerStats(), nil } -func (sm *StorageMinerAPI) WorkerJobs(ctx context.Context) (map[uint64][]storiface.WorkerJob, error) { +func (sm *StorageMinerAPI) WorkerJobs(ctx context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) { return sm.StorageMgr.WorkerJobs(), nil } @@ -293,8 +296,8 @@ func (sm *StorageMinerAPI) WorkerConnect(ctx context.Context, url string) error return sm.StorageMgr.AddWorker(ctx, w) } -func (sm *StorageMinerAPI) SealingSchedDiag(ctx context.Context) (interface{}, error) { - return sm.StorageMgr.SchedDiag(ctx) +func (sm *StorageMinerAPI) SealingSchedDiag(ctx context.Context, doSched bool) (interface{}, error) { + return sm.StorageMgr.SchedDiag(ctx, doSched) } func (sm *StorageMinerAPI) MarketImportDealData(ctx context.Context, propCid cid.Cid, path string) error { @@ -400,6 +403,22 @@ func (sm *StorageMinerAPI) MarketListDataTransfers(ctx context.Context) ([]api.D return apiChannels, nil } +func (sm *StorageMinerAPI) MarketRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error { + selfPeer := sm.Host.ID() + if isInitiator { + return sm.DataTransfer.RestartDataTransferChannel(ctx, datatransfer.ChannelID{Initiator: selfPeer, Responder: otherPeer, ID: transferID}) + } + return sm.DataTransfer.RestartDataTransferChannel(ctx, datatransfer.ChannelID{Initiator: otherPeer, Responder: selfPeer, ID: transferID}) +} + +func (sm *StorageMinerAPI) MarketCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error { + selfPeer := sm.Host.ID() + if isInitiator { + return sm.DataTransfer.CloseDataTransferChannel(ctx, datatransfer.ChannelID{Initiator: selfPeer, Responder: otherPeer, ID: transferID}) + } + return sm.DataTransfer.CloseDataTransferChannel(ctx, datatransfer.ChannelID{Initiator: otherPeer, Responder: selfPeer, ID: transferID}) +} + func (sm *StorageMinerAPI) MarketDataTransferUpdates(ctx context.Context) (<-chan api.DataTransferChannel, error) { channels := make(chan api.DataTransferChannel) diff --git a/node/modules/chain.go b/node/modules/chain.go index f563b4cdd..d1414b307 100644 --- a/node/modules/chain.go +++ b/node/modules/chain.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "os" + "time" "github.com/ipfs/go-bitswap" "github.com/ipfs/go-bitswap/network" @@ -30,6 +31,8 @@ import ( "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" "github.com/filecoin-project/lotus/lib/blockstore" + "github.com/filecoin-project/lotus/lib/bufbstore" + "github.com/filecoin-project/lotus/lib/timedbs" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/helpers" "github.com/filecoin-project/lotus/node/repo" @@ -41,8 +44,15 @@ func ChainBitswap(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, rt r bitswapNetwork := network.NewFromIpfsHost(host, rt, network.Prefix("/chain")) bitswapOptions := []bitswap.Option{bitswap.ProvideEnabled(false)} + // Write all incoming bitswap blocks into a temporary blockstore for two + // block times. If they validate, they'll be persisted later. + cache := timedbs.NewTimedCacheBS(2 * time.Duration(build.BlockDelaySecs) * time.Second) + lc.Append(fx.Hook{OnStop: cache.Stop, OnStart: cache.Start}) + + bitswapBs := bufbstore.NewTieredBstore(bs, cache) + // Use just exch.Close(), closing the context is not needed - exch := bitswap.New(mctx, bitswapNetwork, bs, bitswapOptions...) + exch := bitswap.New(mctx, bitswapNetwork, bitswapBs, bitswapOptions...) lc.Append(fx.Hook{ OnStop: func(ctx context.Context) error { return exch.Close() @@ -158,14 +168,19 @@ func SetGenesis(cs *store.ChainStore, g Genesis) (dtypes.AfterGenesisSet, error) return dtypes.AfterGenesisSet{}, cs.SetGenesis(genesis) } -func NetworkName(mctx helpers.MetricsCtx, lc fx.Lifecycle, cs *store.ChainStore, _ dtypes.AfterGenesisSet) (dtypes.NetworkName, error) { +func NetworkName(mctx helpers.MetricsCtx, lc fx.Lifecycle, cs *store.ChainStore, us stmgr.UpgradeSchedule, _ dtypes.AfterGenesisSet) (dtypes.NetworkName, error) { if !build.Devnet { return "testnetnet", nil } ctx := helpers.LifecycleCtx(mctx, lc) - netName, err := stmgr.GetNetworkName(ctx, stmgr.NewStateManager(cs), cs.GetHeaviestTipSet().ParentState()) + sm, err := stmgr.NewStateManagerWithUpgradeSchedule(cs, us) + if err != nil { + return "", err + } + + netName, err := stmgr.GetNetworkName(ctx, sm, cs.GetHeaviestTipSet().ParentState()) return netName, err } diff --git a/node/modules/client.go b/node/modules/client.go index d012e4539..f1380bc97 100644 --- a/node/modules/client.go +++ b/node/modules/client.go @@ -91,6 +91,7 @@ func NewClientGraphsyncDataTransfer(lc fx.Lifecycle, h host.Host, gs dtypes.Grap return nil, err } + dt.OnReady(marketevents.ReadyLogger("client data transfer")) lc.Append(fx.Hook{ OnStart: func(ctx context.Context) error { return dt.Start(ctx) diff --git a/node/modules/core.go b/node/modules/core.go index a695d8651..259c1ba3a 100644 --- a/node/modules/core.go +++ b/node/modules/core.go @@ -15,11 +15,13 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/go-jsonrpc/auth" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/api/apistruct" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/lib/addrutil" + "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/repo" ) @@ -99,9 +101,34 @@ func DrandBootstrap(ds dtypes.DrandSchedule) (dtypes.DrandBootstrap, error) { addrs, err := addrutil.ParseAddresses(context.TODO(), d.Config.Relays) if err != nil { log.Errorf("reoslving drand relays addresses: %+v", err) - return res, nil + continue } res = append(res, addrs...) } return res, nil } + +func NewDefaultMaxFeeFunc(r repo.LockedRepo) dtypes.DefaultMaxFeeFunc { + return func() (out abi.TokenAmount, err error) { + err = readNodeCfg(r, func(cfg *config.FullNode) { + out = abi.TokenAmount(cfg.Fees.DefaultMaxFee) + }) + return + } +} + +func readNodeCfg(r repo.LockedRepo, accessor func(node *config.FullNode)) error { + raw, err := r.Config() + if err != nil { + return err + } + + cfg, ok := raw.(*config.FullNode) + if !ok { + return xerrors.New("expected config.FullNode") + } + + accessor(cfg) + + return nil +} diff --git a/node/modules/dtypes/miner.go b/node/modules/dtypes/miner.go index 5bb439b4d..1ef157b7e 100644 --- a/node/modules/dtypes/miner.go +++ b/node/modules/dtypes/miner.go @@ -7,6 +7,7 @@ import ( "github.com/ipfs/go-cid" "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" @@ -71,4 +72,5 @@ type SetExpectedSealDurationFunc func(time.Duration) error // too determine how long sealing is expected to take type GetExpectedSealDurationFunc func() (time.Duration, error) -type DealFilter func(ctx context.Context, deal storagemarket.MinerDeal) (bool, string, error) +type StorageDealFilter func(ctx context.Context, deal storagemarket.MinerDeal) (bool, string, error) +type RetrievalDealFilter func(ctx context.Context, deal retrievalmarket.ProviderDealState) (bool, string, error) diff --git a/node/modules/dtypes/mpool.go b/node/modules/dtypes/mpool.go index 1c64449f8..df96b8d0e 100644 --- a/node/modules/dtypes/mpool.go +++ b/node/modules/dtypes/mpool.go @@ -5,6 +5,7 @@ import ( "sync" "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" ) type MpoolLocker struct { @@ -33,3 +34,5 @@ func (ml *MpoolLocker) TakeLock(ctx context.Context, a address.Address) (func(), <-lk }, nil } + +type DefaultMaxFeeFunc func() (abi.TokenAmount, error) diff --git a/node/modules/ipfsclient.go b/node/modules/ipfsclient.go index 24e595fdb..a2d5de88d 100644 --- a/node/modules/ipfsclient.go +++ b/node/modules/ipfsclient.go @@ -16,7 +16,7 @@ import ( // If ipfsMaddr is empty, a local IPFS node is assumed considering IPFS_PATH configuration. // If ipfsMaddr is not empty, it will connect to the remote IPFS node with the provided multiaddress. // The flag useForRetrieval indicates if the IPFS node will also be used for storing retrieving deals. -func IpfsClientBlockstore(ipfsMaddr string) func(helpers.MetricsCtx, fx.Lifecycle, dtypes.ClientImportMgr) (dtypes.ClientBlockstore, error) { +func IpfsClientBlockstore(ipfsMaddr string, onlineMode bool) func(helpers.MetricsCtx, fx.Lifecycle, dtypes.ClientImportMgr) (dtypes.ClientBlockstore, error) { return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, localStore dtypes.ClientImportMgr) (dtypes.ClientBlockstore, error) { var err error var ipfsbs blockstore.Blockstore @@ -26,9 +26,9 @@ func IpfsClientBlockstore(ipfsMaddr string) func(helpers.MetricsCtx, fx.Lifecycl if err != nil { return nil, xerrors.Errorf("parsing ipfs multiaddr: %w", err) } - ipfsbs, err = ipfsbstore.NewRemoteIpfsBstore(helpers.LifecycleCtx(mctx, lc), ma) + ipfsbs, err = ipfsbstore.NewRemoteIpfsBstore(helpers.LifecycleCtx(mctx, lc), ma, onlineMode) } else { - ipfsbs, err = ipfsbstore.NewIpfsBstore(helpers.LifecycleCtx(mctx, lc)) + ipfsbs, err = ipfsbstore.NewIpfsBstore(helpers.LifecycleCtx(mctx, lc), onlineMode) } if err != nil { return nil, xerrors.Errorf("constructing ipfs blockstore: %w", err) diff --git a/node/modules/lp2p/libp2p.go b/node/modules/lp2p/libp2p.go index 5a1666cb6..51749c4d6 100644 --- a/node/modules/lp2p/libp2p.go +++ b/node/modules/lp2p/libp2p.go @@ -20,8 +20,8 @@ import ( var log = logging.Logger("p2pnode") const ( - KLibp2pHost = "libp2p-host" - KTLibp2pHost = KLibp2pHost + KLibp2pHost = "libp2p-host" + KTLibp2pHost types.KeyType = KLibp2pHost ) type Libp2pOpts struct { diff --git a/node/modules/rpcstatemanager.go b/node/modules/rpcstatemanager.go index 0ed054d45..7d7b92437 100644 --- a/node/modules/rpcstatemanager.go +++ b/node/modules/rpcstatemanager.go @@ -3,19 +3,40 @@ package modules import ( "context" - "github.com/filecoin-project/lotus/api" + "golang.org/x/xerrors" "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/apibstore" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin/paych" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" + cbor "github.com/ipfs/go-ipld-cbor" ) type RPCStateManager struct { - gapi api.GatewayAPI + gapi api.GatewayAPI + cstore *cbor.BasicIpldStore } func NewRPCStateManager(api api.GatewayAPI) *RPCStateManager { - return &RPCStateManager{gapi: api} + cstore := cbor.NewCborStore(apibstore.NewAPIBlockstore(api)) + return &RPCStateManager{gapi: api, cstore: cstore} +} + +func (s *RPCStateManager) GetPaychState(ctx context.Context, addr address.Address, ts *types.TipSet) (*types.Actor, paych.State, error) { + act, err := s.gapi.StateGetActor(ctx, addr, ts.Key()) + if err != nil { + return nil, nil, err + } + + actState, err := paych.Load(adt.WrapStore(ctx, s.cstore), act) + if err != nil { + return nil, nil, err + } + return act, actState, nil + } func (s *RPCStateManager) LoadActorTsk(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*types.Actor, error) { @@ -30,4 +51,8 @@ func (s *RPCStateManager) ResolveToKeyAddress(ctx context.Context, addr address. return s.gapi.StateAccountKey(ctx, addr, ts.Key()) } +func (s *RPCStateManager) Call(ctx context.Context, msg *types.Message, ts *types.TipSet) (*api.InvocResult, error) { + return nil, xerrors.Errorf("RPCStateManager does not implement StateManager.Call") +} + var _ stmgr.StateManagerAPI = (*RPCStateManager)(nil) diff --git a/node/modules/storageminer.go b/node/modules/storageminer.go index 9011c4821..e9f5db008 100644 --- a/node/modules/storageminer.go +++ b/node/modules/storageminer.go @@ -43,23 +43,24 @@ import ( "github.com/filecoin-project/go-multistore" paramfetch "github.com/filecoin-project/go-paramfetch" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-statestore" "github.com/filecoin-project/go-storedcounter" - "github.com/filecoin-project/specs-actors/actors/builtin" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/sector-storage/stores" sealing "github.com/filecoin-project/lotus/extern/storage-sealing" "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" - "github.com/filecoin-project/lotus/journal" - "github.com/filecoin-project/lotus/markets" lapi "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/gen" "github.com/filecoin-project/lotus/chain/gen/slashfilter" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/journal" "github.com/filecoin-project/lotus/lib/blockstore" + "github.com/filecoin-project/lotus/markets" marketevents "github.com/filecoin-project/lotus/markets/loggers" "github.com/filecoin-project/lotus/markets/retrievaladapter" "github.com/filecoin-project/lotus/miner" @@ -186,22 +187,12 @@ func StorageMiner(fc config.MinerFeeConfig) func(params StorageMinerParams) (*st ctx := helpers.LifecycleCtx(mctx, lc) - mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) + fps, err := storage.NewWindowedPoStScheduler(api, fc, sealer, sealer, j, maddr) if err != nil { return nil, err } - worker, err := api.StateAccountKey(ctx, mi.Worker, types.EmptyTSK) - if err != nil { - return nil, err - } - - fps, err := storage.NewWindowedPoStScheduler(api, fc, sealer, sealer, j, maddr, worker) - if err != nil { - return nil, err - } - - sm, err := storage.NewMiner(api, maddr, worker, h, ds, sealer, sc, verif, gsd, fc, j) + sm, err := storage.NewMiner(api, maddr, h, ds, sealer, sc, verif, gsd, fc, j) if err != nil { return nil, err } @@ -267,6 +258,7 @@ func NewProviderDAGServiceDataTransfer(lc fx.Lifecycle, h host.Host, gs dtypes.S return nil, err } + dt.OnReady(marketevents.ReadyLogger("provider data transfer")) lc.Append(fx.Hook{ OnStart: func(ctx context.Context) error { return dt.Start(ctx) @@ -392,13 +384,11 @@ func NewStorageAsk(ctx helpers.MetricsCtx, fapi lapi.FullNode, ds dtypes.Metadat if err != nil { return nil, err } - storedAsk, err := storedask.NewStoredAsk(namespace.Wrap(providerDs, datastore.NewKey("/storage-ask")), datastore.NewKey("latest"), spn, address.Address(minerAddress)) + storedAsk, err := storedask.NewStoredAsk(namespace.Wrap(providerDs, datastore.NewKey("/storage-ask")), datastore.NewKey("latest"), spn, address.Address(minerAddress), + storagemarket.MaxPieceSize(abi.PaddedPieceSize(mi.SectorSize))) if err != nil { return nil, err } - // Hacky way to set max piece size to the sector size - a := storedAsk.GetAsk().Ask - err = storedAsk.SetAsk(a.Price, a.VerifiedPrice, a.Expiry-a.Timestamp, storagemarket.MaxPieceSize(abi.PaddedPieceSize(mi.SectorSize))) if err != nil { return storedAsk, err } @@ -411,16 +401,16 @@ func NewProviderDealFunds(ds dtypes.MetadataDS) (ProviderDealFunds, error) { return funds.NewDealFunds(ds, datastore.NewKey("/marketfunds/provider")) } -func BasicDealFilter(user dtypes.DealFilter) func(onlineOk dtypes.ConsiderOnlineStorageDealsConfigFunc, +func BasicDealFilter(user dtypes.StorageDealFilter) func(onlineOk dtypes.ConsiderOnlineStorageDealsConfigFunc, offlineOk dtypes.ConsiderOfflineStorageDealsConfigFunc, blocklistFunc dtypes.StorageDealPieceCidBlocklistConfigFunc, expectedSealTimeFunc dtypes.GetExpectedSealDurationFunc, - spn storagemarket.StorageProviderNode) dtypes.DealFilter { + spn storagemarket.StorageProviderNode) dtypes.StorageDealFilter { return func(onlineOk dtypes.ConsiderOnlineStorageDealsConfigFunc, offlineOk dtypes.ConsiderOfflineStorageDealsConfigFunc, blocklistFunc dtypes.StorageDealPieceCidBlocklistConfigFunc, expectedSealTimeFunc dtypes.GetExpectedSealDurationFunc, - spn storagemarket.StorageProviderNode) dtypes.DealFilter { + spn storagemarket.StorageProviderNode) dtypes.StorageDealFilter { return func(ctx context.Context, deal storagemarket.MinerDeal) (bool, string, error) { b, err := onlineOk() @@ -473,7 +463,7 @@ func BasicDealFilter(user dtypes.DealFilter) func(onlineOk dtypes.ConsiderOnline // Reject if it's more than 7 days in the future // TODO: read from cfg - maxStartEpoch := ht + abi.ChainEpoch(7*builtin.EpochsInDay) + maxStartEpoch := earliest + abi.ChainEpoch(7*builtin.SecondsInDay/build.BlockDelaySecs) if deal.Proposal.StartEpoch > maxStartEpoch { return false, fmt.Sprintf("deal start epoch is too far in the future: %s > %s", deal.Proposal.StartEpoch, maxStartEpoch), nil } @@ -496,7 +486,7 @@ func StorageProvider(minerAddress dtypes.MinerAddress, pieceStore dtypes.ProviderPieceStore, dataTransfer dtypes.ProviderDataTransfer, spn storagemarket.StorageProviderNode, - df dtypes.DealFilter, + df dtypes.StorageDealFilter, funds ProviderDealFunds, ) (storagemarket.StorageProvider, error) { net := smnet.NewFromLibp2pHost(h) @@ -510,8 +500,52 @@ func StorageProvider(minerAddress dtypes.MinerAddress, return storageimpl.NewProvider(net, namespace.Wrap(ds, datastore.NewKey("/deals/provider")), store, mds, pieceStore, dataTransfer, spn, address.Address(minerAddress), ffiConfig.SealProofType, storedAsk, funds, opt) } +func RetrievalDealFilter(userFilter dtypes.RetrievalDealFilter) func(onlineOk dtypes.ConsiderOnlineRetrievalDealsConfigFunc, + offlineOk dtypes.ConsiderOfflineRetrievalDealsConfigFunc) dtypes.RetrievalDealFilter { + return func(onlineOk dtypes.ConsiderOnlineRetrievalDealsConfigFunc, + offlineOk dtypes.ConsiderOfflineRetrievalDealsConfigFunc) dtypes.RetrievalDealFilter { + return func(ctx context.Context, state retrievalmarket.ProviderDealState) (bool, string, error) { + b, err := onlineOk() + if err != nil { + return false, "miner error", err + } + + if !b { + log.Warn("online retrieval deal consideration disabled; rejecting retrieval deal proposal from client") + return false, "miner is not accepting online retrieval deals", nil + } + + b, err = offlineOk() + if err != nil { + return false, "miner error", err + } + + if !b { + log.Info("offline retrieval has not been implemented yet") + } + + if userFilter != nil { + return userFilter(ctx, state) + } + + return true, "", nil + } + } +} + // RetrievalProvider creates a new retrieval provider attached to the provider blockstore -func RetrievalProvider(h host.Host, miner *storage.Miner, sealer sectorstorage.SectorManager, full lapi.FullNode, ds dtypes.MetadataDS, pieceStore dtypes.ProviderPieceStore, mds dtypes.StagingMultiDstore, dt dtypes.ProviderDataTransfer, onlineOk dtypes.ConsiderOnlineRetrievalDealsConfigFunc, offlineOk dtypes.ConsiderOfflineRetrievalDealsConfigFunc) (retrievalmarket.RetrievalProvider, error) { +func RetrievalProvider(h host.Host, + miner *storage.Miner, + sealer sectorstorage.SectorManager, + full lapi.FullNode, + ds dtypes.MetadataDS, + pieceStore dtypes.ProviderPieceStore, + mds dtypes.StagingMultiDstore, + dt dtypes.ProviderDataTransfer, + onlineOk dtypes.ConsiderOnlineRetrievalDealsConfigFunc, + offlineOk dtypes.ConsiderOfflineRetrievalDealsConfigFunc, + userFilter dtypes.RetrievalDealFilter, +) (retrievalmarket.RetrievalProvider, error) { adapter := retrievaladapter.NewRetrievalProviderNode(miner, sealer, full) maddr, err := minerAddrFromDS(ds) @@ -520,37 +554,21 @@ func RetrievalProvider(h host.Host, miner *storage.Miner, sealer sectorstorage.S } netwk := rmnet.NewFromLibp2pHost(h) - - opt := retrievalimpl.DealDeciderOpt(func(ctx context.Context, state retrievalmarket.ProviderDealState) (bool, string, error) { - b, err := onlineOk() - if err != nil { - return false, "miner error", err - } - - if !b { - log.Warn("online retrieval deal consideration disabled; rejecting retrieval deal proposal from client") - return false, "miner is not accepting online retrieval deals", nil - } - - b, err = offlineOk() - if err != nil { - return false, "miner error", err - } - - if !b { - log.Info("offline retrieval has not been implemented yet") - } - - return true, "", nil - }) + opt := retrievalimpl.DealDeciderOpt(retrievalimpl.DealDecider(userFilter)) return retrievalimpl.NewProvider(maddr, adapter, netwk, pieceStore, mds, dt, namespace.Wrap(ds, datastore.NewKey("/retrievals/provider")), opt) } -func SectorStorage(mctx helpers.MetricsCtx, lc fx.Lifecycle, ls stores.LocalStorage, si stores.SectorIndex, cfg *ffiwrapper.Config, sc sectorstorage.SealerConfig, urls sectorstorage.URLs, sa sectorstorage.StorageAuth) (*sectorstorage.Manager, error) { +var WorkerCallsPrefix = datastore.NewKey("/worker/calls") +var ManagerWorkPrefix = datastore.NewKey("/stmgr/calls") + +func SectorStorage(mctx helpers.MetricsCtx, lc fx.Lifecycle, ls stores.LocalStorage, si stores.SectorIndex, cfg *ffiwrapper.Config, sc sectorstorage.SealerConfig, urls sectorstorage.URLs, sa sectorstorage.StorageAuth, ds dtypes.MetadataDS) (*sectorstorage.Manager, error) { ctx := helpers.LifecycleCtx(mctx, lc) - sst, err := sectorstorage.New(ctx, ls, si, cfg, sc, urls, sa) + wsts := statestore.New(namespace.Wrap(ds, WorkerCallsPrefix)) + smsts := statestore.New(namespace.Wrap(ds, ManagerWorkPrefix)) + + sst, err := sectorstorage.New(ctx, ls, si, cfg, sc, urls, sa, wsts, smsts) if err != nil { return nil, err } diff --git a/node/node_test.go b/node/node_test.go index 001b99c04..e553e83b2 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -117,6 +117,16 @@ func TestPledgeSectors(t *testing.T) { }) } +func TestTapeFix(t *testing.T) { + logging.SetLogLevel("miner", "ERROR") + logging.SetLogLevel("chainstore", "ERROR") + logging.SetLogLevel("chain", "ERROR") + logging.SetLogLevel("sub", "ERROR") + logging.SetLogLevel("storageminer", "ERROR") + + test.TestTapeFix(t, builder.MockSbBuilder, 2*time.Millisecond) +} + func TestWindowedPost(t *testing.T) { if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" { t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run") diff --git a/node/repo/fsrepo_ds.go b/node/repo/fsrepo_ds.go index aa91d2514..e7746cb8e 100644 --- a/node/repo/fsrepo_ds.go +++ b/node/repo/fsrepo_ds.go @@ -4,18 +4,27 @@ import ( "os" "path/filepath" - "github.com/ipfs/go-datastore" + dgbadger "github.com/dgraph-io/badger/v2" + ldbopts "github.com/syndtr/goleveldb/leveldb/opt" "golang.org/x/xerrors" - dgbadger "github.com/dgraph-io/badger/v2" + "github.com/ipfs/go-datastore" badger "github.com/ipfs/go-ds-badger2" levelds "github.com/ipfs/go-ds-leveldb" measure "github.com/ipfs/go-ds-measure" - ldbopts "github.com/syndtr/goleveldb/leveldb/opt" ) type dsCtor func(path string, readonly bool) (datastore.Batching, error) +func ChainBadgerOptions() badger.Options { + opts := badger.DefaultOptions + opts.GcInterval = 0 // disable GC for chain datastore + + opts.Options = dgbadger.DefaultOptions("").WithTruncate(true). + WithValueThreshold(128) + return opts +} + var fsDatastores = map[string]dsCtor{ "chain": chainBadgerDs, "metadata": levelDs, @@ -27,13 +36,8 @@ var fsDatastores = map[string]dsCtor{ } func chainBadgerDs(path string, readonly bool) (datastore.Batching, error) { - opts := badger.DefaultOptions - opts.GcInterval = 0 // disable GC for chain datastore + opts := ChainBadgerOptions() opts.ReadOnly = readonly - - opts.Options = dgbadger.DefaultOptions("").WithTruncate(true). - WithValueThreshold(1 << 10) - return badger.NewDatastore(path, &opts) } @@ -43,7 +47,6 @@ func badgerDs(path string, readonly bool) (datastore.Batching, error) { opts.Options = dgbadger.DefaultOptions("").WithTruncate(true). WithValueThreshold(1 << 10) - return badger.NewDatastore(path, &opts) } diff --git a/node/test/builder.go b/node/test/builder.go index 4aa8a55ea..ea9a82220 100644 --- a/node/test/builder.go +++ b/node/test/builder.go @@ -24,6 +24,8 @@ import ( "github.com/filecoin-project/lotus/api/test" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/gen" genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis" "github.com/filecoin-project/lotus/chain/types" @@ -33,14 +35,13 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/sector-storage/mock" "github.com/filecoin-project/lotus/genesis" - miner2 "github.com/filecoin-project/lotus/miner" + lotusminer "github.com/filecoin-project/lotus/miner" "github.com/filecoin-project/lotus/node" "github.com/filecoin-project/lotus/node/modules" testing2 "github.com/filecoin-project/lotus/node/modules/testing" "github.com/filecoin-project/lotus/node/repo" "github.com/filecoin-project/lotus/storage/mockstorage" - "github.com/filecoin-project/specs-actors/actors/builtin" - miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" "github.com/ipfs/go-datastore" "github.com/libp2p/go-libp2p-core/crypto" "github.com/libp2p/go-libp2p-core/peer" @@ -86,13 +87,13 @@ func CreateTestStorageNode(ctx context.Context, t *testing.T, waddr address.Addr peerid, err := peer.IDFromPrivateKey(pk) require.NoError(t, err) - enc, err := actors.SerializeParams(&miner0.ChangePeerIDParams{NewID: abi.PeerID(peerid)}) + enc, err := actors.SerializeParams(&miner2.ChangePeerIDParams{NewID: abi.PeerID(peerid)}) require.NoError(t, err) msg := &types.Message{ To: act, From: waddr, - Method: builtin.MethodsMiner.ChangePeerID, + Method: miner.Methods.ChangePeerID, Params: enc, Value: types.NewInt(0), } @@ -103,7 +104,7 @@ func CreateTestStorageNode(ctx context.Context, t *testing.T, waddr address.Addr // start node var minerapi api.StorageMiner - mineBlock := make(chan miner2.MineReq) + mineBlock := make(chan lotusminer.MineReq) stop, err := node.New(ctx, node.StorageMiner(&minerapi), node.Online(), @@ -113,7 +114,7 @@ func CreateTestStorageNode(ctx context.Context, t *testing.T, waddr address.Addr node.MockHost(mn), node.Override(new(api.FullNode), tnd), - node.Override(new(*miner2.Miner), miner2.NewTestMiner(mineBlock, act)), + node.Override(new(*lotusminer.Miner), lotusminer.NewTestMiner(mineBlock, act)), opts, ) @@ -129,7 +130,7 @@ func CreateTestStorageNode(ctx context.Context, t *testing.T, waddr address.Addr err = minerapi.NetConnect(ctx, remoteAddrs) require.NoError(t, err)*/ - mineOne := func(ctx context.Context, req miner2.MineReq) error { + mineOne := func(ctx context.Context, req lotusminer.MineReq) error { select { case mineBlock <- req: return nil @@ -444,7 +445,7 @@ func mockSbBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []tes storers[i] = CreateTestStorageNode(ctx, t, genms[i].Worker, maddrs[i], pidKeys[i], f, mn, node.Options( node.Override(new(sectorstorage.SectorManager), func() (sectorstorage.SectorManager, error) { - return mock.NewMockSectorMgr(build.DefaultSectorSize(), sectors), nil + return mock.NewMockSectorMgr(policy.GetDefaultSectorSize(), sectors), nil }), node.Override(new(ffiwrapper.Verifier), mock.MockVerifier), node.Unset(new(*sectorstorage.Manager)), diff --git a/paychmgr/manager.go b/paychmgr/manager.go index f2fc190c7..5e0aa88ce 100644 --- a/paychmgr/manager.go +++ b/paychmgr/manager.go @@ -16,7 +16,6 @@ import ( "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin/paych" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" @@ -61,14 +60,10 @@ type managerAPI interface { // managerAPIImpl is used to create a composite that implements managerAPI type managerAPIImpl struct { - *stmgr.StateManager + stmgr.StateManagerAPI paychAPI } -func (m *managerAPIImpl) AdtStore(ctx context.Context) adt.Store { - return m.ChainStore().Store(ctx) -} - type Manager struct { // The Manager context is used to terminate wait operations on shutdown ctx context.Context @@ -82,11 +77,11 @@ type Manager struct { channels map[string]*channelAccessor } -func NewManager(mctx helpers.MetricsCtx, lc fx.Lifecycle, sm *stmgr.StateManager, pchstore *Store, api PaychAPI) *Manager { +func NewManager(mctx helpers.MetricsCtx, lc fx.Lifecycle, sm stmgr.StateManagerAPI, pchstore *Store, api PaychAPI) *Manager { ctx := helpers.LifecycleCtx(mctx, lc) ctx, shutdown := context.WithCancel(ctx) - impl := &managerAPIImpl{StateManager: sm, paychAPI: &api} + impl := &managerAPIImpl{StateManagerAPI: sm, paychAPI: &api} return &Manager{ ctx: ctx, shutdown: shutdown, diff --git a/paychmgr/paych_test.go b/paychmgr/paych_test.go index fcd3d50a8..8557dfb63 100644 --- a/paychmgr/paych_test.go +++ b/paychmgr/paych_test.go @@ -14,9 +14,9 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/specs-actors/actors/builtin" - tutils "github.com/filecoin-project/specs-actors/support/testing" + "github.com/filecoin-project/specs-actors/v2/actors/builtin" paych2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/paych" + tutils "github.com/filecoin-project/specs-actors/v2/support/testing" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/builtin/paych" @@ -237,7 +237,7 @@ func TestCreateVoucher(t *testing.T) { // Create a voucher in lane 1 voucherLane1Amt := big.NewInt(5) - voucher := paych2.SignedVoucher{ + voucher := paych.SignedVoucher{ Lane: 1, Amount: voucherLane1Amt, } @@ -252,7 +252,7 @@ func TestCreateVoucher(t *testing.T) { // Create a voucher in lane 1 again, with a higher amount voucherLane1Amt = big.NewInt(8) - voucher = paych2.SignedVoucher{ + voucher = paych.SignedVoucher{ Lane: 1, Amount: voucherLane1Amt, } @@ -267,7 +267,7 @@ func TestCreateVoucher(t *testing.T) { // Create a voucher in lane 2 that covers all the remaining funds // in the channel voucherLane2Amt := big.Sub(s.amt, voucherLane1Amt) - voucher = paych2.SignedVoucher{ + voucher = paych.SignedVoucher{ Lane: 2, Amount: voucherLane2Amt, } @@ -281,7 +281,7 @@ func TestCreateVoucher(t *testing.T) { // Create a voucher in lane 2 that exceeds the remaining funds in the // channel voucherLane2Amt = big.Add(voucherLane2Amt, big.NewInt(1)) - voucher = paych2.SignedVoucher{ + voucher = paych.SignedVoucher{ Lane: 2, Amount: voucherLane2Amt, } diff --git a/paychmgr/paychget_test.go b/paychmgr/paychget_test.go index 9f19dd13d..e6b94db57 100644 --- a/paychmgr/paychget_test.go +++ b/paychmgr/paychget_test.go @@ -663,6 +663,7 @@ func TestPaychGetMergeAddFunds(t *testing.T) { defer addFundsSent.Done() // Request add funds - should block until create channel has completed + var err error addFundsCh1, addFundsMcid1, err = mgr.GetPaych(ctx, from, to, addFundsAmt1) require.NoError(t, err) }() @@ -671,6 +672,7 @@ func TestPaychGetMergeAddFunds(t *testing.T) { defer addFundsSent.Done() // Request add funds again - should merge with waiting add funds request + var err error addFundsCh2, addFundsMcid2, err = mgr.GetPaych(ctx, from, to, addFundsAmt2) require.NoError(t, err) }() @@ -766,6 +768,7 @@ func TestPaychGetMergeAddFundsCtxCancelOne(t *testing.T) { defer addFundsSent.Done() // Request add funds again - should merge with waiting add funds request + var err error addFundsCh2, addFundsMcid2, err = mgr.GetPaych(ctx, from, to, addFundsAmt2) require.NoError(t, err) }() @@ -861,7 +864,6 @@ func TestPaychGetMergeAddFundsCtxCancelAll(t *testing.T) { // Request add funds again - should merge with waiting add funds request _, _, addFundsErr2 = mgr.GetPaych(addFundsCtx2, from, to, big.NewInt(3)) - require.NoError(t, err) }() // Wait for add funds requests to be queued up waitForQueueSize(t, mgr, from, to, 2) @@ -950,6 +952,7 @@ func TestPaychAvailableFunds(t *testing.T) { defer addFundsSent.Done() // Request add funds - should block until create channel has completed + var err error _, addFundsMcid, err = mgr.GetPaych(ctx, from, to, addFundsAmt) require.NoError(t, err) }() diff --git a/paychmgr/paychvoucherfunds_test.go b/paychmgr/paychvoucherfunds_test.go index dcbb4acc9..f83a7cd62 100644 --- a/paychmgr/paychvoucherfunds_test.go +++ b/paychmgr/paychvoucherfunds_test.go @@ -4,19 +4,19 @@ import ( "context" "testing" - "github.com/filecoin-project/lotus/chain/actors/builtin/paych" - paychmock "github.com/filecoin-project/lotus/chain/actors/builtin/paych/mock" - - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/specs-actors/actors/builtin" - tutils "github.com/filecoin-project/specs-actors/support/testing" "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" ds_sync "github.com/ipfs/go-datastore/sync" "github.com/stretchr/testify/require" + + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + tutils2 "github.com/filecoin-project/specs-actors/v2/support/testing" + + "github.com/filecoin-project/lotus/chain/actors/builtin/paych" + paychmock "github.com/filecoin-project/lotus/chain/actors/builtin/paych/mock" + "github.com/filecoin-project/lotus/chain/types" ) // TestPaychAddVoucherAfterAddFunds tests adding a voucher to a channel with @@ -27,11 +27,11 @@ func TestPaychAddVoucherAfterAddFunds(t *testing.T) { store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) fromKeyPrivate, fromKeyPublic := testGenerateKeyPair(t) - ch := tutils.NewIDAddr(t, 100) - from := tutils.NewSECP256K1Addr(t, string(fromKeyPublic)) - to := tutils.NewSECP256K1Addr(t, "secpTo") - fromAcct := tutils.NewActorAddr(t, "fromAct") - toAcct := tutils.NewActorAddr(t, "toAct") + ch := tutils2.NewIDAddr(t, 100) + from := tutils2.NewSECP256K1Addr(t, string(fromKeyPublic)) + to := tutils2.NewSECP256K1Addr(t, "secpTo") + fromAcct := tutils2.NewActorAddr(t, "fromAct") + toAcct := tutils2.NewActorAddr(t, "toAct") mock := newMockManagerAPI() defer mock.close() @@ -55,7 +55,7 @@ func TestPaychAddVoucherAfterAddFunds(t *testing.T) { // Create an actor in state for the channel with the initial channel balance act := &types.Actor{ - Code: builtin.AccountActorCodeID, + Code: builtin2.AccountActorCodeID, Head: cid.Cid{}, Nonce: 0, Balance: createAmt, diff --git a/paychmgr/settler/settler.go b/paychmgr/settler/settler.go index 02fe9256e..41aaca665 100644 --- a/paychmgr/settler/settler.go +++ b/paychmgr/settler/settler.go @@ -14,8 +14,6 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" - builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/builtin/paych" @@ -105,27 +103,27 @@ func (pcs *paymentChannelSettler) revertHandler(ctx context.Context, ts *types.T return nil } -func (pcs *paymentChannelSettler) matcher(msg *types.Message) (matchOnce bool, matched bool, err error) { +func (pcs *paymentChannelSettler) matcher(msg *types.Message) (matched bool, err error) { // Check if this is a settle payment channel message - if msg.Method != builtin0.MethodsPaych.Settle { - return false, false, nil + if msg.Method != paych.Methods.Settle { + return false, nil } // Check if this payment channel is of concern to this node (i.e. tracked in payment channel store), // and its inbound (i.e. we're getting vouchers that we may need to redeem) trackedAddresses, err := pcs.api.PaychList(pcs.ctx) if err != nil { - return false, false, err + return false, err } for _, addr := range trackedAddresses { if msg.To == addr { status, err := pcs.api.PaychStatus(pcs.ctx, addr) if err != nil { - return false, false, err + return false, err } if status.Direction == api.PCHInbound { - return false, true, nil + return true, nil } } } - return false, false, nil + return false, nil } diff --git a/paychmgr/simple.go b/paychmgr/simple.go index 253075604..afa1ae1f7 100644 --- a/paychmgr/simple.go +++ b/paychmgr/simple.go @@ -101,10 +101,13 @@ type mergedFundsReq struct { func newMergedFundsReq(reqs []*fundsReq) *mergedFundsReq { ctx, cancel := context.WithCancel(context.Background()) + + rqs := make([]*fundsReq, len(reqs)) + copy(rqs, reqs) m := &mergedFundsReq{ ctx: ctx, cancel: cancel, - reqs: reqs, + reqs: rqs, } for _, r := range m.reqs { @@ -201,7 +204,7 @@ func (ca *channelAccessor) processQueue(channelID string) (*api.ChannelAvailable // Merge all pending requests into one. // For example if there are pending requests for 3, 2, 4 then // amt = 3 + 2 + 4 = 9 - merged := newMergedFundsReq(ca.fundsReqQueue[:]) + merged := newMergedFundsReq(ca.fundsReqQueue) amt := merged.sum() if amt.IsZero() { // Note: The amount can be zero if requests are cancelled as we're diff --git a/scripts/dev/api.bash b/scripts/dev/api.bash index 5539e4fef..ac9eb4e66 100644 --- a/scripts/dev/api.bash +++ b/scripts/dev/api.bash @@ -1,11 +1,11 @@ #!/bin/bash # vim: set expandtab ts=2 sw=2: -token=$(lotus auth create-token --perm admin) +_lotus_token=$(./lotus auth create-token --perm admin) runAPI() { curl -X POST \ -H "Content-Type: application/json" \ --data '{"jsonrpc":"2.0","id":2,"method":"Filecoin.'"$1"'","params":'"${2:-null}"'}' \ - 'http://127.0.0.1:1234/rpc/v0?token='"$token" + 'http://127.0.0.1:1234/rpc/v0?token='"$_lotus_token" } diff --git a/storage/adapter_storage_miner.go b/storage/adapter_storage_miner.go index 380fb4471..2279a9201 100644 --- a/storage/adapter_storage_miner.go +++ b/storage/adapter_storage_miner.go @@ -15,8 +15,7 @@ import ( "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" - builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" - market0 "github.com/filecoin-project/specs-actors/actors/builtin/market" + market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api/apibstore" @@ -94,6 +93,15 @@ func (s SealingAPIAdapter) StateMinerDeadlines(ctx context.Context, maddr addres return s.delegate.StateMinerDeadlines(ctx, maddr, tsk) } +func (s SealingAPIAdapter) StateMinerSectorAllocated(ctx context.Context, maddr address.Address, sid abi.SectorNumber, tok sealing.TipSetToken) (bool, error) { + tsk, err := types.TipSetKeyFromBytes(tok) + if err != nil { + return false, xerrors.Errorf("failed to unmarshal TipSetToken to TipSetKey: %w", err) + } + + return s.delegate.StateMinerSectorAllocated(ctx, maddr, sid, tsk) +} + func (s SealingAPIAdapter) StateWaitMsg(ctx context.Context, mcid cid.Cid) (sealing.MsgLookup, error) { wmsg, err := s.delegate.StateWaitMsg(ctx, mcid, build.MessageConfidence) if err != nil { @@ -138,7 +146,7 @@ func (s SealingAPIAdapter) StateComputeDataCommitment(ctx context.Context, maddr return cid.Undef, xerrors.Errorf("failed to unmarshal TipSetToken to TipSetKey: %w", err) } - ccparams, err := actors.SerializeParams(&market0.ComputeDataCommitmentParams{ + ccparams, err := actors.SerializeParams(&market2.ComputeDataCommitmentParams{ DealIDs: deals, SectorType: sectorType, }) @@ -150,7 +158,7 @@ func (s SealingAPIAdapter) StateComputeDataCommitment(ctx context.Context, maddr To: market.Address, From: maddr, Value: types.NewInt(0), - Method: builtin0.MethodsMarket.ComputeDataCommitment, + Method: market.Methods.ComputeDataCommitment, Params: ccparams, } r, err := s.delegate.StateCall(ctx, ccmt, tsk) diff --git a/storage/miner.go b/storage/miner.go index 74a048c8e..378c12b84 100644 --- a/storage/miner.go +++ b/storage/miner.go @@ -5,9 +5,6 @@ import ( "errors" "time" - miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" - proof0 "github.com/filecoin-project/specs-actors/actors/runtime/proof" - "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/go-state-types/dline" @@ -29,7 +26,9 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/events" "github.com/filecoin-project/lotus/chain/gen" "github.com/filecoin-project/lotus/chain/types" @@ -50,8 +49,7 @@ type Miner struct { sc sealing.SectorIDCounter verif ffiwrapper.Verifier - maddr address.Address - worker address.Address + maddr address.Address getSealConfig dtypes.GetSealingConfigFunc sealing *sealing.Sealing @@ -83,6 +81,7 @@ type storageMinerApi interface { StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) + StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (bool, error) StateSearchMsg(context.Context, cid.Cid) (*api.MsgLookup, error) StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64) (*api.MsgLookup, error) // TODO: removeme eventually StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error) @@ -112,7 +111,7 @@ type storageMinerApi interface { WalletHas(context.Context, address.Address) (bool, error) } -func NewMiner(api storageMinerApi, maddr, worker address.Address, h host.Host, ds datastore.Batching, sealer sectorstorage.SectorManager, sc sealing.SectorIDCounter, verif ffiwrapper.Verifier, gsd dtypes.GetSealingConfigFunc, feeCfg config.MinerFeeConfig, journal journal.Journal) (*Miner, error) { +func NewMiner(api storageMinerApi, maddr address.Address, h host.Host, ds datastore.Batching, sealer sectorstorage.SectorManager, sc sealing.SectorIDCounter, verif ffiwrapper.Verifier, gsd dtypes.GetSealingConfigFunc, feeCfg config.MinerFeeConfig, journal journal.Journal) (*Miner, error) { m := &Miner{ api: api, feeCfg: feeCfg, @@ -123,7 +122,6 @@ func NewMiner(api storageMinerApi, maddr, worker address.Address, h host.Host, d verif: verif, maddr: maddr, - worker: worker, getSealConfig: gsd, journal: journal, sealingEvtType: journal.RegisterEventType("storage", "sealing_states"), @@ -150,7 +148,7 @@ func (m *Miner) Run(ctx context.Context) error { evts := events.NewEvents(ctx, m.api) adaptedAPI := NewSealingAPIAdapter(m.api) // TODO: Maybe we update this policy after actor upgrades? - pcp := sealing.NewBasicPreCommitPolicy(adaptedAPI, miner0.MaxSectorExpirationExtension-(miner0.WPoStProvingPeriod*2), md.PeriodStart%miner0.WPoStProvingPeriod) + pcp := sealing.NewBasicPreCommitPolicy(adaptedAPI, policy.GetMaxSectorExpirationExtension()-(md.WPoStProvingPeriod*2), md.PeriodStart%md.WPoStProvingPeriod) m.sealing = sealing.New(adaptedAPI, fc, NewEventsAdapter(evts), m.maddr, m.ds, m.sealer, m.sc, m.verif, &pcp, sealing.GetSealingConfigFunc(m.getSealConfig), m.handleSealingNotifications) go m.sealing.Run(ctx) //nolint:errcheck // logged intside the function @@ -175,7 +173,17 @@ func (m *Miner) Stop(ctx context.Context) error { } func (m *Miner) runPreflightChecks(ctx context.Context) error { - has, err := m.api.WalletHas(ctx, m.worker) + mi, err := m.api.StateMinerInfo(ctx, m.maddr, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("failed to resolve miner info: %w", err) + } + + workerKey, err := m.api.StateAccountKey(ctx, mi.Worker, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("failed to resolve worker key: %w", err) + } + + has, err := m.api.WalletHas(ctx, workerKey) if err != nil { return xerrors.Errorf("failed to check wallet for worker key: %w", err) } @@ -184,7 +192,7 @@ func (m *Miner) runPreflightChecks(ctx context.Context) error { return errors.New("key for worker not found in local wallet") } - log.Infof("starting up miner %s, worker addr %s", m.maddr, m.worker) + log.Infof("starting up miner %s, worker addr %s", m.maddr, workerKey) return nil } @@ -238,9 +246,9 @@ func (wpp *StorageWpp) GenerateCandidates(ctx context.Context, randomness abi.Po return cds, nil } -func (wpp *StorageWpp) ComputeProof(ctx context.Context, ssi []proof0.SectorInfo, rand abi.PoStRandomness) ([]proof0.PoStProof, error) { +func (wpp *StorageWpp) ComputeProof(ctx context.Context, ssi []builtin.SectorInfo, rand abi.PoStRandomness) ([]builtin.PoStProof, error) { if build.InsecurePoStValidation { - return []proof0.PoStProof{{ProofBytes: []byte("valid proof")}}, nil + return []builtin.PoStProof{{ProofBytes: []byte("valid proof")}}, nil } log.Infof("Computing WinningPoSt ;%+v; %v", ssi, rand) diff --git a/storage/mockstorage/preseal.go b/storage/mockstorage/preseal.go index 8ca789ba6..0417405c8 100644 --- a/storage/mockstorage/preseal.go +++ b/storage/mockstorage/preseal.go @@ -7,10 +7,9 @@ import ( commcid "github.com/filecoin-project/go-fil-commcid" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/lotus/extern/sector-storage/mock" - market0 "github.com/filecoin-project/specs-actors/actors/builtin/market" + market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/wallet" @@ -20,7 +19,7 @@ import ( ) func PreSeal(ssize abi.SectorSize, maddr address.Address, sectors int) (*genesis.Miner, *types.KeyInfo, error) { - k, err := wallet.GenerateKey(crypto.SigTypeBLS) + k, err := wallet.GenerateKey(types.KTBLS) if err != nil { return nil, nil, err } @@ -49,7 +48,7 @@ func PreSeal(ssize abi.SectorSize, maddr address.Address, sectors int) (*genesis r := mock.CommDR(d) preseal.CommR, _ = commcid.ReplicaCommitmentV1ToCID(r[:]) preseal.SectorID = abi.SectorNumber(i + 1) - preseal.Deal = market0.DealProposal{ + preseal.Deal = market2.DealProposal{ PieceCID: preseal.CommD, PieceSize: abi.PaddedPieceSize(ssize), Client: k.Address, diff --git a/storage/wdpost_run.go b/storage/wdpost_run.go index d4ed4d64c..f1da4f221 100644 --- a/storage/wdpost_run.go +++ b/storage/wdpost_run.go @@ -12,19 +12,19 @@ import ( "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/dline" + "github.com/filecoin-project/go-state-types/network" "github.com/ipfs/go-cid" "go.opencensus.io/trace" "golang.org/x/xerrors" - builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" - miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" - "github.com/filecoin-project/specs-actors/actors/runtime/proof" + proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/types" ) @@ -152,7 +152,15 @@ func (s *WindowPoStScheduler) runSubmitPoST( defer span.End() // Get randomness from tickets + // use the challenge epoch if we've upgraded to network version 4 + // (actors version 2). We want to go back as far as possible to be safe. commEpoch := deadline.Open + if ver, err := s.api.StateNetworkVersion(ctx, types.EmptyTSK); err != nil { + log.Errorw("failed to get network version to determine PoSt epoch randomness lookback", "error", err) + } else if ver >= network.Version4 { + commEpoch = deadline.Challenge + } + commRand, err := s.api.ChainGetRandomnessFromTickets(ctx, ts.Key(), crypto.DomainSeparationTag_PoStChainCommit, commEpoch, nil) if err != nil { err = xerrors.Errorf("failed to get chain randomness from tickets for windowPost (ts=%d; deadline=%d): %w", ts.Height(), commEpoch, err) @@ -181,11 +189,6 @@ func (s *WindowPoStScheduler) runSubmitPoST( } func (s *WindowPoStScheduler) checkSectors(ctx context.Context, check bitfield.BitField) (bitfield.BitField, error) { - spt, err := s.proofType.RegisteredSealProof() - if err != nil { - return bitfield.BitField{}, xerrors.Errorf("getting seal proof type: %w", err) - } - mid, err := address.IDFromAddress(s.actor) if err != nil { return bitfield.BitField{}, err @@ -207,7 +210,7 @@ func (s *WindowPoStScheduler) checkSectors(ctx context.Context, check bitfield.B return bitfield.BitField{}, xerrors.Errorf("iterating over bitfield: %w", err) } - bad, err := s.faultTracker.CheckProvable(ctx, spt, tocheck) + bad, err := s.faultTracker.CheckProvable(ctx, s.proofType, tocheck) if err != nil { return bitfield.BitField{}, xerrors.Errorf("checking provable sectors: %w", err) } @@ -289,13 +292,14 @@ func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uin msg := &types.Message{ To: s.actor, - From: s.worker, - Method: builtin0.MethodsMiner.DeclareFaultsRecovered, + Method: miner.Methods.DeclareFaultsRecovered, Params: enc, Value: types.NewInt(0), } spec := &api.MessageSendSpec{MaxFee: abi.TokenAmount(s.feeCfg.MaxWindowPoStGasFee)} - s.setSender(ctx, msg, spec) + if err := s.setSender(ctx, msg, spec); err != nil { + return recoveries, nil, err + } sm, err := s.api.MpoolPushMessage(ctx, msg, &api.MessageSendSpec{MaxFee: abi.TokenAmount(s.feeCfg.MaxWindowPoStGasFee)}) if err != nil { @@ -373,13 +377,14 @@ func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, dlIdx uint64, msg := &types.Message{ To: s.actor, - From: s.worker, - Method: builtin0.MethodsMiner.DeclareFaults, + Method: miner.Methods.DeclareFaults, Params: enc, Value: types.NewInt(0), // TODO: Is there a fee? } spec := &api.MessageSendSpec{MaxFee: abi.TokenAmount(s.feeCfg.MaxWindowPoStGasFee)} - s.setSender(ctx, msg, spec) + if err := s.setSender(ctx, msg, spec); err != nil { + return faults, nil, err + } sm, err := s.api.MpoolPushMessage(ctx, msg, spec) if err != nil { @@ -505,12 +510,12 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty skipCount := uint64(0) postSkipped := bitfield.New() - var postOut []proof.PoStProof - somethingToProve := true + somethingToProve := false - for retries := 0; retries < 5; retries++ { + // Retry until we run out of sectors to prove. + for retries := 0; ; retries++ { var partitions []miner.PoStPartition - var sinfos []proof.SectorInfo + var sinfos []proof2.SectorInfo for partIdx, partition := range batch { // TODO: Can do this in parallel toProve, err := bitfield.SubtractBitField(partition.LiveSectors, partition.FaultySectors) @@ -562,7 +567,6 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty if len(sinfos) == 0 { // nothing to prove for this batch - somethingToProve = false break } @@ -580,27 +584,43 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty return nil, err } - var ps []abi.SectorID - postOut, ps, err = s.prover.GenerateWindowPoSt(ctx, abi.ActorID(mid), sinfos, abi.PoStRandomness(rand)) + postOut, ps, err := s.prover.GenerateWindowPoSt(ctx, abi.ActorID(mid), sinfos, abi.PoStRandomness(rand)) elapsed := time.Since(tsStart) log.Infow("computing window post", "batch", batchIdx, "elapsed", elapsed) if err == nil { - // Proof generation successful, stop retrying - params.Partitions = append(params.Partitions, partitions...) + if len(postOut) == 0 { + return nil, xerrors.Errorf("received no proofs back from generate window post") + } + // Proof generation successful, stop retrying + somethingToProve = true + params.Partitions = partitions + params.Proofs = postOut break } // Proof generation failed, so retry if len(ps) == 0 { + // If we didn't skip any new sectors, we failed + // for some other reason and we need to abort. return nil, xerrors.Errorf("running window post failed: %w", err) } + // TODO: maybe mark these as faulty somewhere? log.Warnw("generate window post skipped sectors", "sectors", ps, "error", err, "try", retries) + // Explicitly make sure we haven't aborted this PoSt + // (GenerateWindowPoSt may or may not check this). + // Otherwise, we could try to continue proving a + // deadline after the deadline has ended. + if ctx.Err() != nil { + log.Warnw("aborting PoSt due to context cancellation", "error", ctx.Err(), "deadline", di.Index) + return nil, ctx.Err() + } + skipCount += uint64(len(ps)) for _, sector := range ps { postSkipped.Set(uint64(sector.Number)) @@ -612,12 +632,6 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty continue } - if len(postOut) == 0 { - return nil, xerrors.Errorf("received no proofs back from generate window post") - } - - params.Proofs = postOut - posts = append(posts, params) } @@ -625,12 +639,6 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty } func (s *WindowPoStScheduler) batchPartitions(partitions []api.Partition) ([][]api.Partition, error) { - // Get the number of sectors allowed in a partition, for this proof size - sectorsPerPartition, err := builtin0.PoStProofWindowPoStPartitionSectors(s.proofType) - if err != nil { - return nil, xerrors.Errorf("getting sectors per partition: %w", err) - } - // We don't want to exceed the number of sectors allowed in a message. // So given the number of sectors in a partition, work out the number of // partitions that can be in a message without exceeding sectors per @@ -641,9 +649,10 @@ func (s *WindowPoStScheduler) batchPartitions(partitions []api.Partition) ([][]a // sectors per partition 3: ooo // partitions per message 2: oooOOO // <1><2> (3rd doesn't fit) - // TODO(NETUPGRADE): we're going to need some form of policy abstraction - // where we can get policy from the future. Unfortunately, we can't just get this from the state. - partitionsPerMsg := int(miner0.AddressedSectorsMax / sectorsPerPartition) + partitionsPerMsg, err := policy.GetMaxPoStPartitions(s.proofType) + if err != nil { + return nil, xerrors.Errorf("getting sectors per partition: %w", err) + } // The number of messages will be: // ceiling(number of partitions / partitions per message) @@ -665,7 +674,7 @@ func (s *WindowPoStScheduler) batchPartitions(partitions []api.Partition) ([][]a return batches, nil } -func (s *WindowPoStScheduler) sectorsForProof(ctx context.Context, goodSectors, allSectors bitfield.BitField, ts *types.TipSet) ([]proof.SectorInfo, error) { +func (s *WindowPoStScheduler) sectorsForProof(ctx context.Context, goodSectors, allSectors bitfield.BitField, ts *types.TipSet) ([]proof2.SectorInfo, error) { sset, err := s.api.StateMinerSectors(ctx, s.actor, &goodSectors, ts.Key()) if err != nil { return nil, err @@ -675,22 +684,22 @@ func (s *WindowPoStScheduler) sectorsForProof(ctx context.Context, goodSectors, return nil, nil } - substitute := proof.SectorInfo{ + substitute := proof2.SectorInfo{ SectorNumber: sset[0].SectorNumber, SealedCID: sset[0].SealedCID, SealProof: sset[0].SealProof, } - sectorByID := make(map[uint64]proof.SectorInfo, len(sset)) + sectorByID := make(map[uint64]proof2.SectorInfo, len(sset)) for _, sector := range sset { - sectorByID[uint64(sector.SectorNumber)] = proof.SectorInfo{ + sectorByID[uint64(sector.SectorNumber)] = proof2.SectorInfo{ SectorNumber: sector.SectorNumber, SealedCID: sector.SealedCID, SealProof: sector.SealProof, } } - proofSectors := make([]proof.SectorInfo, 0, len(sset)) + proofSectors := make([]proof2.SectorInfo, 0, len(sset)) if err := allSectors.ForEach(func(sectorNo uint64) error { if info, found := sectorByID[sectorNo]; found { proofSectors = append(proofSectors, info) @@ -718,13 +727,14 @@ func (s *WindowPoStScheduler) submitPost(ctx context.Context, proof *miner.Submi msg := &types.Message{ To: s.actor, - From: s.worker, - Method: builtin0.MethodsMiner.SubmitWindowedPoSt, + Method: miner.Methods.SubmitWindowedPoSt, Params: enc, Value: types.NewInt(0), } spec := &api.MessageSendSpec{MaxFee: abi.TokenAmount(s.feeCfg.MaxWindowPoStGasFee)} - s.setSender(ctx, msg, spec) + if err := s.setSender(ctx, msg, spec); err != nil { + return nil, err + } // TODO: consider maybe caring about the output sm, err := s.api.MpoolPushMessage(ctx, msg, spec) @@ -752,21 +762,18 @@ func (s *WindowPoStScheduler) submitPost(ctx context.Context, proof *miner.Submi return sm, nil } -func (s *WindowPoStScheduler) setSender(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) { +func (s *WindowPoStScheduler) setSender(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) error { mi, err := s.api.StateMinerInfo(ctx, s.actor, types.EmptyTSK) if err != nil { - log.Errorw("error getting miner info", "error", err) - - // better than just failing - msg.From = s.worker - return + return xerrors.Errorf("error getting miner info: %w", err) } + // use the worker as a fallback + msg.From = mi.Worker gm, err := s.api.GasEstimateMessageGas(ctx, msg, spec, types.EmptyTSK) if err != nil { log.Errorw("estimating gas", "error", err) - msg.From = s.worker - return + return nil } *msg = *gm @@ -775,9 +782,9 @@ func (s *WindowPoStScheduler) setSender(ctx context.Context, msg *types.Message, pa, err := AddressFor(ctx, s.api, mi, PoStAddr, minFunds) if err != nil { log.Errorw("error selecting address for window post", "error", err) - msg.From = s.worker - return + return nil } msg.From = pa + return nil } diff --git a/storage/wdpost_run_test.go b/storage/wdpost_run_test.go index dd7ac4c24..a76483a5f 100644 --- a/storage/wdpost_run_test.go +++ b/storage/wdpost_run_test.go @@ -17,12 +17,13 @@ import ( "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/go-state-types/network" - builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" - miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" - proof0 "github.com/filecoin-project/specs-actors/actors/runtime/proof" - tutils "github.com/filecoin-project/specs-actors/support/testing" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + tutils "github.com/filecoin-project/specs-actors/v2/support/testing" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/journal" @@ -31,6 +32,7 @@ import ( type mockStorageMinerAPI struct { partitions []api.Partition pushedMessages chan *types.Message + storageMinerApi } func newMockStorageMinerAPI() *mockStorageMinerAPI { @@ -46,10 +48,6 @@ func (m *mockStorageMinerAPI) StateMinerInfo(ctx context.Context, a address.Addr }, nil } -func (m *mockStorageMinerAPI) StateNetworkVersion(ctx context.Context, key types.TipSetKey) (network.Version, error) { - panic("implement me") -} - func (m *mockStorageMinerAPI) ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { return abi.Randomness("ticket rand"), nil } @@ -95,15 +93,19 @@ func (m *mockStorageMinerAPI) StateWaitMsg(ctx context.Context, cid cid.Cid, con }, nil } +func (m *mockStorageMinerAPI) StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error) { + return build.NewestNetworkVersion, nil +} + type mockProver struct { } -func (m *mockProver) GenerateWinningPoSt(context.Context, abi.ActorID, []proof0.SectorInfo, abi.PoStRandomness) ([]proof0.PoStProof, error) { +func (m *mockProver) GenerateWinningPoSt(context.Context, abi.ActorID, []proof2.SectorInfo, abi.PoStRandomness) ([]proof2.PoStProof, error) { panic("implement me") } -func (m *mockProver) GenerateWindowPoSt(ctx context.Context, aid abi.ActorID, sis []proof0.SectorInfo, pr abi.PoStRandomness) ([]proof0.PoStProof, []abi.SectorID, error) { - return []proof0.PoStProof{ +func (m *mockProver) GenerateWindowPoSt(ctx context.Context, aid abi.ActorID, sis []proof2.SectorInfo, pr abi.PoStRandomness) ([]proof2.PoStProof, []abi.SectorID, error) { + return []proof2.PoStProof{ { PoStProof: abi.RegisteredPoStProof_StackedDrgWindow2KiBV1, ProofBytes: []byte("post-proof"), @@ -114,7 +116,7 @@ func (m *mockProver) GenerateWindowPoSt(ctx context.Context, aid abi.ActorID, si type mockFaultTracker struct { } -func (m mockFaultTracker) CheckProvable(ctx context.Context, spt abi.RegisteredSealProof, sectors []abi.SectorID) ([]abi.SectorID, error) { +func (m mockFaultTracker) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []abi.SectorID) ([]abi.SectorID, error) { // Returns "bad" sectors so just return nil meaning all sectors are good return nil, nil } @@ -127,18 +129,17 @@ func TestWDPostDoPost(t *testing.T) { proofType := abi.RegisteredPoStProof_StackedDrgWindow2KiBV1 postAct := tutils.NewIDAddr(t, 100) - workerAct := tutils.NewIDAddr(t, 101) mockStgMinerAPI := newMockStorageMinerAPI() // Get the number of sectors allowed in a partition for this proof type - sectorsPerPartition, err := builtin0.PoStProofWindowPoStPartitionSectors(proofType) + sectorsPerPartition, err := builtin2.PoStProofWindowPoStPartitionSectors(proofType) require.NoError(t, err) // Work out the number of partitions that can be included in a message // without exceeding the message sector limit require.NoError(t, err) - partitionsPerMsg := int(miner0.AddressedSectorsMax / sectorsPerPartition) + partitionsPerMsg := int(miner2.AddressedSectorsMax / sectorsPerPartition) // Enough partitions to fill expectedMsgCount-1 messages partitionCount := (expectedMsgCount - 1) * partitionsPerMsg @@ -168,16 +169,15 @@ func TestWDPostDoPost(t *testing.T) { faultTracker: &mockFaultTracker{}, proofType: proofType, actor: postAct, - worker: workerAct, journal: journal.NilJournal(), } di := &dline.Info{ - WPoStPeriodDeadlines: miner0.WPoStPeriodDeadlines, - WPoStProvingPeriod: miner0.WPoStProvingPeriod, - WPoStChallengeWindow: miner0.WPoStChallengeWindow, - WPoStChallengeLookback: miner0.WPoStChallengeLookback, - FaultDeclarationCutoff: miner0.FaultDeclarationCutoff, + WPoStPeriodDeadlines: miner2.WPoStPeriodDeadlines, + WPoStProvingPeriod: miner2.WPoStProvingPeriod, + WPoStChallengeWindow: miner2.WPoStChallengeWindow, + WPoStChallengeLookback: miner2.WPoStChallengeLookback, + FaultDeclarationCutoff: miner2.FaultDeclarationCutoff, } ts := mockTipSet(t) @@ -188,7 +188,7 @@ func TestWDPostDoPost(t *testing.T) { // Read the window PoST messages for i := 0; i < expectedMsgCount; i++ { msg := <-mockStgMinerAPI.pushedMessages - require.Equal(t, builtin0.MethodsMiner.SubmitWindowedPoSt, msg.Method) + require.Equal(t, miner.Methods.SubmitWindowedPoSt, msg.Method) var params miner.SubmitWindowedPoStParams err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)) require.NoError(t, err) @@ -254,11 +254,11 @@ func (m *mockStorageMinerAPI) StateMinerProvingDeadline(ctx context.Context, add Close: 0, Challenge: 0, FaultCutoff: 0, - WPoStPeriodDeadlines: miner0.WPoStPeriodDeadlines, - WPoStProvingPeriod: miner0.WPoStProvingPeriod, - WPoStChallengeWindow: miner0.WPoStChallengeWindow, - WPoStChallengeLookback: miner0.WPoStChallengeLookback, - FaultDeclarationCutoff: miner0.FaultDeclarationCutoff, + WPoStPeriodDeadlines: miner2.WPoStPeriodDeadlines, + WPoStProvingPeriod: miner2.WPoStProvingPeriod, + WPoStChallengeWindow: miner2.WPoStChallengeWindow, + WPoStChallengeLookback: miner2.WPoStChallengeLookback, + FaultDeclarationCutoff: miner2.FaultDeclarationCutoff, }, nil } @@ -276,7 +276,7 @@ func (m *mockStorageMinerAPI) StateSearchMsg(ctx context.Context, cid cid.Cid) ( func (m *mockStorageMinerAPI) StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error) { return &types.Actor{ - Code: builtin0.StorageMinerActorCodeID, + Code: builtin2.StorageMinerActorCodeID, }, nil } diff --git a/storage/wdpost_sched.go b/storage/wdpost_sched.go index 99bea2e66..1a1422e19 100644 --- a/storage/wdpost_sched.go +++ b/storage/wdpost_sched.go @@ -31,8 +31,7 @@ type WindowPoStScheduler struct { partitionSectors uint64 ch *changeHandler - actor address.Address - worker address.Address + actor address.Address evtTypes [4]journal.EventType journal journal.Journal @@ -41,7 +40,7 @@ type WindowPoStScheduler struct { // failLk sync.Mutex } -func NewWindowedPoStScheduler(api storageMinerApi, fc config.MinerFeeConfig, sb storage.Prover, ft sectorstorage.FaultTracker, j journal.Journal, actor address.Address, worker address.Address) (*WindowPoStScheduler, error) { +func NewWindowedPoStScheduler(api storageMinerApi, fc config.MinerFeeConfig, sb storage.Prover, ft sectorstorage.FaultTracker, j journal.Journal, actor address.Address) (*WindowPoStScheduler, error) { mi, err := api.StateMinerInfo(context.TODO(), actor, types.EmptyTSK) if err != nil { return nil, xerrors.Errorf("getting sector size: %w", err) @@ -60,8 +59,7 @@ func NewWindowedPoStScheduler(api storageMinerApi, fc config.MinerFeeConfig, sb proofType: rt, partitionSectors: mi.WindowPoStPartitionSectors, - actor: actor, - worker: worker, + actor: actor, evtTypes: [...]journal.EventType{ evtTypeWdPoStScheduler: j.RegisterEventType("wdpost", "scheduler"), evtTypeWdPoStProofs: j.RegisterEventType("wdpost", "proofs_processed"), diff --git a/tools/stats/metrics.go b/tools/stats/metrics.go index aee61b2aa..795203c40 100644 --- a/tools/stats/metrics.go +++ b/tools/stats/metrics.go @@ -13,6 +13,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" "github.com/filecoin-project/lotus/chain/actors/builtin/reward" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" @@ -269,25 +270,27 @@ func RecordTipsetStatePoints(ctx context.Context, api api.FullNode, pl *PointLis p = NewPoint("chain.power", totalPower.TotalPower.QualityAdjPower.Int64()) pl.AddPoint(p) - miners, err := api.StateListMiners(ctx, tipset.Key()) + powerActor, err := api.StateGetActor(ctx, power.Address, tipset.Key()) if err != nil { return err } - for _, addr := range miners { - mp, err := api.StateMinerPower(ctx, addr, tipset.Key()) - if err != nil { - return err - } - - if !mp.MinerPower.QualityAdjPower.IsZero() { - p = NewPoint("chain.miner_power", mp.MinerPower.QualityAdjPower.Int64()) - p.AddTag("miner", addr.String()) - pl.AddPoint(p) - } + powerActorState, err := power.Load(&ApiIpldStore{ctx, api}, powerActor) + if err != nil { + return err } - return nil + return powerActorState.ForEachClaim(func(addr address.Address, claim power.Claim) error { + if claim.QualityAdjPower.Int64() == 0 { + return nil + } + + p = NewPoint("chain.miner_power", claim.QualityAdjPower.Int64()) + p.AddTag("miner", addr.String()) + pl.AddPoint(p) + + return nil + }) } type msgTag struct {