Merge branch 'master' into refactor/lib/blockstore

This commit is contained in:
Raúl Kripalani 2021-02-28 19:55:23 +00:00
commit 7f0f7d0b36
149 changed files with 4352 additions and 1640 deletions

View File

@ -287,6 +287,9 @@ jobs:
- install-deps
- prepare
- run: cd extern/filecoin-ffi && make
- run:
name: "go get lotus@master"
command: cd testplans/lotus-soup && go get github.com/filecoin-project/lotus@master
- run:
name: "build lotus-soup testplan"
command: pushd testplans/lotus-soup && go build -tags=testground .
@ -306,8 +309,11 @@ jobs:
name: "prepare .env.toml"
command: pushd testplans/lotus-soup && mkdir -p $HOME/testground && cp env-ci.toml $HOME/testground/.env.toml && echo 'endpoint="https://ci.testground.ipfs.team"' >> $HOME/testground/.env.toml && echo 'user="circleci"' >> $HOME/testground/.env.toml
- run:
name: "prepare testground home dir"
command: mkdir -p $HOME/testground/plans && mv testplans/lotus-soup testplans/graphsync $HOME/testground/plans/
name: "prepare testground home dir and link test plans"
command: mkdir -p $HOME/testground/plans && ln -s $(pwd)/testplans/lotus-soup $HOME/testground/plans/lotus-soup && ln -s $(pwd)/testplans/graphsync $HOME/testground/plans/graphsync
- run:
name: "go get lotus@master"
command: cd testplans/lotus-soup && go get github.com/filecoin-project/lotus@master
- run:
name: "trigger deals baseline testplan on taas"
command: ~/testground-cli run composition -f $HOME/testground/plans/lotus-soup/_compositions/baseline-k8s-3-1.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH
@ -527,6 +533,14 @@ jobs:
description: A comma-separated string containing docker image tags to build and push (default = latest)
steps:
- run:
name: Confirm that environment variables are set
command: |
if [ -z "$AWS_ACCESS_KEY_ID" ]; then
echo "No AWS_ACCESS_KEY_ID is set. Skipping build-and-push job ..."
circleci-agent step halt
fi
- aws-cli/setup:
profile-name: <<parameters.profile-name>>
aws-access-key-id: <<parameters.aws-access-key-id>>

View File

@ -1,5 +1,125 @@
# Lotus changelog
# 1.5.0 / 2021-02-23
This is a mandatory release of Lotus that introduces the fifth upgrade to the Filecoin network. The network upgrade occurs at height 550321, before which time all nodes must have updated to this release (or later). At this height, [v3 specs-actors](https://github.com/filecoin-project/specs-actors/releases/tag/v3.0.0) will take effect, which in turn implements the following two FIPs:
- [FIP-0007 h/amt-v3](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0007.md) which improves the performance of the Filecoin HAMT and AMT.
- [FIP-0010 off-chain Window PoSt Verification](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0010.md) which reduces the gas consumption of `SubmitWindowedPoSt` messages significantly by optimistically accepting Window PoSt proofs without verification, and allowing them to be disputed later by off-chain verifiers.
Note that the integration of v3 actors was already completed in 1.4.2, this upgrade simply sets the epoch for the upgrade to occur.
## Disputer
FIP-0010 introduces the ability to dispute bad Window PoSts. Node operators are encouraged to run the new Lotus disputer alongside their Lotus daemons. For more information, see the announcement [here](https://github.com/filecoin-project/lotus/discussions/5617#discussioncomment-387333).
## Changes
- [#5341](https://github.com/filecoin-project/lotus/pull/5341) Add a `LOTUS_DISABLE_V3_ACTOR_MIGRATION` envvar
- Setting this envvar to 1 disables the v3 actor migration, should only be used in the event of a failed migration
# 1.4.2 / 2021-02-17
This is a large, and highly recommended, optional release with new features and improvements for lotus miner and deal-making UX. The release also integrates [v3 specs-actors](https://github.com/filecoin-project/specs-actors/releases/tag/v3.0.0), which implements two FIPs:
- [FIP-0007 h/amt-v3](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0007.md) which improves the performance of the Filecoin HAMT and AMT.
- [FIP-0010 off-chain Window PoSt Verification](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0010.md) which reduces the gas consumption of `SubmitWindowedPoSt` messages significantly by optimistically accepting Window PoSt proofs without verification, and allowing them to be disputed later by off-chain verifiers.
Note that this release does NOT set an upgrade epoch for v3 actors to take effect. That will be done in the upcoming 1.5.0 release.
## New Features
- [#5341](https://github.com/filecoin-project/lotus/pull/5341) Added sector termination API and CLI
- Run `lotus-miner sectors terminate`
- [#5342](https://github.com/filecoin-project/lotus/pull/5342) Added CLI for using a multisig wallet as miner's owner address
- See how to set it up [here](https://github.com/filecoin-project/lotus/pull/5342#issue-554009129)
- [#5363](https://github.com/filecoin-project/lotus/pull/5363), [#5418](https://github.com/filecoin-project/lotus/pull/), [#5476](https://github.com/filecoin-project/lotus/pull/5476), [#5459](https://github.com/filecoin-project/lotus/pull/5459) Integrated [spec-actor v3](https://github.com/filecoin-pro5418ject/specs-actors/releases/tag/v3.0.0)
- [#5472](https://github.com/filecoin-project/lotus/pull/5472) Generate actor v3 methods for pond
- [#5379](https://github.com/filecoin-project/lotus/pull/5379) Added WindowPoSt disputer
- This is to support [FIP-0010 off-chian Window PoSt verification](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0010.md)
- See how to run a disputer [here](https://github.com/filecoin-project/lotus/pull/5379#issuecomment-776482445)
- [#5309](https://github.com/filecoin-project/lotus/pull/5309) Batch multiple deals in one `PublishStorageMessages`
- [#5411](https://github.com/filecoin-project/lotus/pull/5411) Handle batch `PublishStorageDeals` message in sealing recovery
- [#5505](https://github.com/filecoin-project/lotus/pull/5505) Exclude expired deals from batching in `PublishStorageDeals` messages
- Added `PublishMsgPeriod` and `MaxDealsPerPublishMsg` to miner `Dealmaking` [configuration](https://docs.filecoin.io/mine/lotus/miner-configuration/#dealmaking-section). See how they work [here](https://docs.filecoin.io/mine/lotus/miner-configuration/#publishing-several-deals-in-one-message).
- [#5538](https://github.com/filecoin-project/lotus/pull/5538), [#5549](https://github.com/filecoin-project/lotus/pull/5549) Added a command to list pending deals and force publish messages.
- Run `lotus-miner market pending-publish`
- [#5428](https://github.com/filecoin-project/lotus/pull/5428) Moved waiting for `PublishStorageDeals` messages' receipt from markets to lotus
- [#5510](https://github.com/filecoin-project/lotus/pull/5510) Added `nerpanet` build option
- To build `nerpanet`, run `make nerpanet`
- [#5433](https://github.com/filecoin-project/lotus/pull/5433) Added `AlwaysKeepUnsealedCopy` option to the miner configuration
- [#5520](https://github.com/filecoin-project/lotus/pull/5520) Added `MsigGetPending` to get pending transactions for multisig wallets
- [#5219](https://github.com/filecoin-project/lotus/pull/5219) Added interactive mode for lotus-wallet
- [5529](https://github.com/filecoin-project/lotus/pull/5529) Added support for minder nodes in `lotus-shed rpc` util
## Bug Fixes
- [#5210](https://github.com/filecoin-project/lotus/pull/5210) Miner should not dial client on restart
- [#5403](https://github.com/filecoin-project/lotus/pull/5403) When estimating GasLimit only apply prior messages up to the nonce
- [#5410](https://github.com/filecoin-project/lotus/pull/510) Fix the calibnet build option
- [#5492](https://github.com/filecoin-project/lotus/pull/5492) Fixed `has` for ipfsbstore for non-existing blocks
- [#5361](https://github.com/filecoin-project/lotus/pull/5361) Fixed retrieval hangs when using `IpfsOnlineMode=true`
- [#5493](https://github.com/filecoin-project/lotus/pull/5493) Fixed retrieval failure when price-per-byte is zero
- [#5506](https://github.com/filecoin-project/lotus/pull/5506) Fixed contexts in the storage adpater
- [#5515](https://github.com/filecoin-project/lotus/pull/5515) Properly wire up `StateReadState` on gateway API
- [#5582](https://github.com/filecoin-project/lotus/pull/5582) Fixed error logging format strings
- [#5614](https://github.com/filecoin-project/lotus/pull/5614) Fixed websocket reconnecting handling
## Improvements
- [#5389](https://github.com/filecoin-project/lotus/pull/5389) Show verified indicator for `./lotus-miner storage-deals list`
- [#5229](https://github.com/filecoin-project/lotus/pull/5220) Show power for verified deals in `./lotus-miner setocr list`
- [#5407](https://github.com/filecoin-project/lotus/pull/5407) Added explicit check of the miner address protocol
- [#5399](https://github.com/filecoin-project/lotus/pull/5399) watchdog: increase heapprof capture threshold to 90%
- [#5398](https://github.com/filecoin-project/lotus/pull/5398) storageadapter: Look at precommits on-chain since deal publish msg
- [#5470](https://github.com/filecoin-project/lotus/pull/5470) Added `--no-timing` option for `./lotus state compute-state --html`
- [#5417](https://github.com/filecoin-project/lotus/pull/5417) Storage Manager: Always unseal full sectors
- [#5393](https://github.com/filecoin-project/lotus/pull/5393) Switched to [filecoin-ffi bls api ](https://github.com/filecoin-project/filecoin-ffi/pull/159)for bls signatures
- [#5380](https://github.com/filecoin-project/lotus/pull/5210) Refactor deals API tests
- [#5397](https://github.com/filecoin-project/lotus/pull/5397) Fixed a flake in the sync manager edge case test
- [#5406](https://github.com/filecoin-project/lotus/pull/5406) Added a test to ensure a correct window post cannot be disputed
- [#5294](https://github.com/filecoin-project/lotus/pull/5394) Added jobs to build Lotus docker image and push it to AWS ECR
- [#5387](https://github.com/filecoin-project/lotus/pull/5387) Added network info(mainnet|calibnet) in version
- [#5497](https://github.com/filecoin-project/lotus/pull/5497) Export metric for lotus-gateaway
- [#4950](https://github.com/filecoin-project/lotus/pull/4950) Removed bench policy
- [#5047](https://github.com/filecoin-project/lotus/pull/5047) Improved the UX for `./lotus-shed bitfield enc`
- [#5282](https://github.com/filecoin-project/lotus/pull/5282) Snake a context through the chian blockstore creation
- [#5350](https://github.com/filecoin-project/lotus/pull/5350) Avoid using `mp.cfg` directrly to prevent race condition
- [#5449](https://github.com/filecoin-project/lotus/pull/5449) Documented the block-header better
- [#5404](https://github.com/filecoin-project/lotus/pull/5404) Added retrying proofs if an incorrect one is generated
- [#4545](https://github.com/filecoin-project/lotus/pull/4545) Made state tipset usage consistent in the API
- [#5540](https://github.com/filecoin-project/lotus/pull/5540) Removed unnecessary database reads in validation check
- [#5554](https://github.com/filecoin-project/lotus/pull/5554) Fixed `build lotus-soup` CI job
- [#5552](https://github.com/filecoin-project/lotus/pull/5552) Updated CircleCI to halt gracefully
- [#5555](https://github.com/filecoin-project/lotus/pull/5555) Cleanup and add docstrings of node builder
- [#5564](https://github.com/filecoin-project/lotus/pull/5564) Stopped depending on gocheck with gomod
- [#5574](https://github.com/filecoin-project/lotus/pull/5574) Updated CLI UI
- [#5570](https://github.com/filecoin-project/lotus/pull/5570) Added code CID to `StateReadState` return object
- [#5565](https://github.com/filecoin-project/lotus/pull/5565) Added storageadapter.PublishMsgConfig to miner in testkit for lotus-soup testplan
- [#5571](https://github.com/filecoin-project/lotus/pull/5571) Added `lotus-seed gensis car` to generate lotus block for devnets
- [#5613](https://github.com/filecoin-project/lotus/pull/5613) Check format in client commP util
- [#5507](https://github.com/filecoin-project/lotus/pull/5507) Refactored coalescing logic into its own function and take both cancellation sets into account
- [#5592](https://github.com/filecoin-project/lotus/pull/5592) Verify FFI version before building
## Dependency Updates
- [#5296](https://github.com/filecoin-project/lotus/pull/5396) Upgraded to [raulk/go-watchdog@v1.0.1](https://github.com/raulk/go-watchdog/releases/tag/v1.0.1)
- [#5450](https://github.com/filecoin-project/lotus/pull/5450) Dependency updates
- [#5425](https://github.com/filecoin-project/lotus/pull/5425) Fixed stale imports in testplans/lotus-soup
- [#5535](https://github.com/filecoin-project/lotus/pull/5535) Updated to [go-fil-markets@v1.1.7](https://github.com/filecoin-project/go-fil-markets/releases/tag/v1.1.7)
- [#5616](https://github.com/filecoin-project/lotus/pull/5600) Updated to [filecoin-ffi@b6e0b35fb49ed0fe](https://github.com/filecoin-project/filecoin-ffi/releases/tag/b6e0b35fb49ed0fe)
- [#5599](https://github.com/filecoin-project/lotus/pull/5599) Updated to [go-bitfield@v0.2.4](https://github.com/filecoin-project/go-bitfield/releases/tag/v0.2.4)
- [#5614](https://github.com/filecoin-project/lotus/pull/5614), , [#5621](https://github.com/filecoin-project/lotus/pull/5621) Updated to [go-jsonrpc@v0.1.3](https://github.com/filecoin-project/go-jsonrpc/releases/tag/v0.1.3)
- [#5459](https://github.com/filecoin-project/lotus/pull/5459) Updated to [spec-actors@v3.0.1](https://github.com/filecoin-project/specs-actors/releases/tag/v3.0.1)
## Network Version v10 Upgrade
- [#5473](https://github.com/filecoin-project/lotus/pull/5473) Merged staging branch for v1.5.0
- [#5603](https://github.com/filecoin-project/lotus/pull/5603) Set nerpanet's upgrade epochs up to v3 actors
- [#5471](https://github.com/filecoin-project/lotus/pull/5471), [#5456](https://github.com/filecoin-project/lotus/pull/5456) Set calibration net actor v3 migration epochs for testing
- [#5434](https://github.com/filecoin-project/lotus/pull/5434) Implemented pre-migration framework
- [#5476](https://github.com/filecoin-project/lotus/pull/5477) Tune migration
# 1.4.1 / 2021-01-20
This is an optional Lotus release that introduces various improvements to the sealing, mining, and deal-making processes. In particular, [#5341](https://github.com/filecoin-project/lotus/pull/5341) introduces the ability for Lotus miners to terminate sectors.

View File

@ -41,8 +41,14 @@ MODULES+=$(FFI_PATH)
BUILD_DEPS+=build/.filecoin-install
CLEAN+=build/.filecoin-install
$(MODULES): build/.update-modules ;
ffi-version-check:
@[[ "$$(awk '/const Version/{print $$5}' extern/filecoin-ffi/version.go)" -eq 2 ]] || (echo "FFI version mismatch, update submodules"; exit 1)
BUILD_DEPS+=ffi-version-check
.PHONY: ffi-version-check
$(MODULES): build/.update-modules ;
# dummy file that marks the last time modules were updated
build/.update-modules:
git submodule update --init --recursive
@ -66,6 +72,12 @@ debug: lotus lotus-miner lotus-worker lotus-seed
calibnet: GOFLAGS+=-tags=calibnet
calibnet: lotus lotus-miner lotus-worker lotus-seed
nerpanet: GOFLAGS+=-tags=nerpanet
nerpanet: lotus lotus-miner lotus-worker lotus-seed
butterflynet: GOFLAGS+=-tags=butterflynet
butterflynet: lotus lotus-miner lotus-worker lotus-seed
lotus: $(BUILD_DEPS)
rm -f lotus
go build $(GOFLAGS) -o lotus ./cmd/lotus

View File

@ -5,7 +5,6 @@ import (
"fmt"
"github.com/google/uuid"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/go-jsonrpc/auth"
metrics "github.com/libp2p/go-libp2p-core/metrics"

View File

@ -337,10 +337,14 @@ type FullNode interface {
// MethodGroup: State
// The State methods are used to query, inspect, and interact with chain state.
// Most methods take a TipSetKey as a parameter. The state looked up is the state at that tipset.
// Most methods take a TipSetKey as a parameter. The state looked up is the parent state of the tipset.
// A nil TipSetKey can be provided as a param, this will cause the heaviest tipset in the chain to be used.
// StateCall runs the given message and returns its result without any persisted changes.
//
// StateCall applies the message to the tipset's parent state. The
// message is not applied on-top-of the messages in the passed-in
// tipset.
StateCall(context.Context, *types.Message, types.TipSetKey) (*InvocResult, error)
// StateReplay replays a given message, assuming it was included in a block in the specified tipset.
// If no tipset key is provided, the appropriate tipset is looked up.
@ -466,6 +470,12 @@ type FullNode interface {
// MsigGetVested returns the amount of FIL that vested in a multisig in a certain period.
// It takes the following params: <multisig address>, <start epoch>, <end epoch>
MsigGetVested(context.Context, address.Address, types.TipSetKey, types.TipSetKey) (types.BigInt, error)
//MsigGetPending returns pending transactions for the given multisig
//wallet. Once pending transactions are fully approved, they will no longer
//appear here.
MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*MsigTransaction, error)
// MsigCreate creates a multisig wallet
// It takes the following params: <required number of senders>, <approving addresses>, <unlock duration>
//<initial balance>, <sender address of the create msg>, <gas price>
@ -646,6 +656,7 @@ type Message struct {
type ActorState struct {
Balance types.BigInt
Code cid.Cid
State interface{}
}
@ -988,3 +999,13 @@ type MessageMatch struct {
To address.Address
From address.Address
}
type MsigTransaction struct {
ID int64
To address.Address
Value abi.TokenAmount
Method abi.MethodNum
Params []byte
Approved []address.Address
}

View File

@ -27,6 +27,7 @@ type GatewayAPI interface {
MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error)
MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error)
MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error)
MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*MsigTransaction, error)
StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (DealCollateralBounds, error)
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)

View File

@ -15,6 +15,7 @@ import (
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
"github.com/filecoin-project/specs-storage/storage"
"github.com/filecoin-project/lotus/chain/types"
@ -105,10 +106,12 @@ type StorageMiner interface {
MarketGetRetrievalAsk(ctx context.Context) (*retrievalmarket.Ask, error)
MarketListDataTransfers(ctx context.Context) ([]DataTransferChannel, error)
MarketDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error)
// MinerRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer
// MarketRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer
MarketRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error
// ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer
// MarketCancelDataTransfer cancels a data transfer with the given transfer ID and other peer
MarketCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error
MarketPendingDeals(ctx context.Context) (PendingDealInfo, error)
MarketPublishPendingDeals(ctx context.Context) error
DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error
DealsList(ctx context.Context) ([]MarketDeal, error)
@ -236,3 +239,11 @@ type AddressConfig struct {
CommitControl []address.Address
TerminateControl []address.Address
}
// PendingDealInfo has info about pending deals and when they are due to be
// published
type PendingDealInfo struct {
Deals []market.ClientDealProposal
PublishPeriodStart time.Time
PublishPeriod time.Duration
}

View File

@ -232,6 +232,7 @@ type FullNodeStruct struct {
MsigGetAvailableBalance func(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) `perm:"read"`
MsigGetVestingSchedule func(context.Context, address.Address, types.TipSetKey) (api.MsigVesting, error) `perm:"read"`
MsigGetVested func(context.Context, address.Address, types.TipSetKey, types.TipSetKey) (types.BigInt, error) `perm:"read"`
MsigGetPending func(context.Context, address.Address, types.TipSetKey) ([]*api.MsigTransaction, error) `perm:"read"`
MsigCreate func(context.Context, uint64, []address.Address, abi.ChainEpoch, types.BigInt, address.Address, types.BigInt) (cid.Cid, error) `perm:"sign"`
MsigPropose func(context.Context, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) `perm:"sign"`
MsigApprove func(context.Context, address.Address, uint64, address.Address) (cid.Cid, error) `perm:"sign"`
@ -298,8 +299,10 @@ type StorageMinerStruct struct {
MarketGetRetrievalAsk func(ctx context.Context) (*retrievalmarket.Ask, error) `perm:"read"`
MarketListDataTransfers func(ctx context.Context) ([]api.DataTransferChannel, error) `perm:"write"`
MarketDataTransferUpdates func(ctx context.Context) (<-chan api.DataTransferChannel, error) `perm:"write"`
MarketRestartDataTransfer func(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error `perm:"read"`
MarketCancelDataTransfer func(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error `perm:"read"`
MarketRestartDataTransfer func(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error `perm:"write"`
MarketCancelDataTransfer func(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error `perm:"write"`
MarketPendingDeals func(ctx context.Context) (api.PendingDealInfo, error) `perm:"write"`
MarketPublishPendingDeals func(ctx context.Context) error `perm:"admin"`
PledgeSector func(context.Context) error `perm:"write"`
@ -434,6 +437,7 @@ type GatewayStruct struct {
MpoolPush func(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error)
MsigGetAvailableBalance func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error)
MsigGetVested func(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error)
MsigGetPending func(context.Context, address.Address, types.TipSetKey) ([]*api.MsigTransaction, error)
StateAccountKey func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
StateDealProviderCollateralBounds func(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error)
StateGetActor func(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
@ -444,9 +448,10 @@ type GatewayStruct struct {
StateMinerProvingDeadline func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error)
StateMinerPower func(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error)
StateMarketBalance func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error)
StateSearchMsg func(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error)
StateMarketStorageDeal func(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error)
StateReadState func(context.Context, address.Address, types.TipSetKey) (*api.ActorState, error)
StateNetworkVersion func(ctx context.Context, tsk types.TipSetKey) (stnetwork.Version, error)
StateSearchMsg func(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error)
StateSectorGetInfo func(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error)
StateVerifiedClientStatus func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
StateWaitMsg func(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error)
@ -1111,6 +1116,10 @@ func (c *FullNodeStruct) MsigGetVested(ctx context.Context, a address.Address, s
return c.Internal.MsigGetVested(ctx, a, sTsk, eTsk)
}
func (c *FullNodeStruct) MsigGetPending(ctx context.Context, a address.Address, tsk types.TipSetKey) ([]*api.MsigTransaction, error) {
return c.Internal.MsigGetPending(ctx, a, tsk)
}
func (c *FullNodeStruct) MsigCreate(ctx context.Context, req uint64, addrs []address.Address, duration abi.ChainEpoch, val types.BigInt, src address.Address, gp types.BigInt) (cid.Cid, error) {
return c.Internal.MsigCreate(ctx, req, addrs, duration, val, src, gp)
}
@ -1499,6 +1508,14 @@ func (c *StorageMinerStruct) MarketCancelDataTransfer(ctx context.Context, trans
return c.Internal.MarketCancelDataTransfer(ctx, transferID, otherPeer, isInitiator)
}
func (c *StorageMinerStruct) MarketPendingDeals(ctx context.Context) (api.PendingDealInfo, error) {
return c.Internal.MarketPendingDeals(ctx)
}
func (c *StorageMinerStruct) MarketPublishPendingDeals(ctx context.Context) error {
return c.Internal.MarketPublishPendingDeals(ctx)
}
func (c *StorageMinerStruct) DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error {
return c.Internal.DealsImportData(ctx, dealPropCid, file)
}
@ -1737,6 +1754,10 @@ func (g GatewayStruct) MsigGetVested(ctx context.Context, addr address.Address,
return g.Internal.MsigGetVested(ctx, addr, start, end)
}
func (g GatewayStruct) MsigGetPending(ctx context.Context, addr address.Address, tsk types.TipSetKey) ([]*api.MsigTransaction, error) {
return g.Internal.MsigGetPending(ctx, addr, tsk)
}
func (g GatewayStruct) StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) {
return g.Internal.StateAccountKey(ctx, addr, tsk)
}
@ -1801,6 +1822,10 @@ func (g GatewayStruct) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence
return g.Internal.StateWaitMsg(ctx, msg, confidence)
}
func (g GatewayStruct) StateReadState(ctx context.Context, addr address.Address, ts types.TipSetKey) (*api.ActorState, error) {
return g.Internal.StateReadState(ctx, addr, ts)
}
func (c *WalletStruct) WalletNew(ctx context.Context, typ types.KeyType) (address.Address, error) {
return c.Internal.WalletNew(ctx, typ)
}

View File

@ -5,14 +5,18 @@ package api
import (
"fmt"
"io"
"sort"
abi "github.com/filecoin-project/go-state-types/abi"
paych "github.com/filecoin-project/specs-actors/actors/builtin/paych"
cid "github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
xerrors "golang.org/x/xerrors"
)
var _ = xerrors.Errorf
var _ = cid.Undef
var _ = sort.Sort
func (t *PaymentInfo) MarshalCBOR(w io.Writer) error {
if t == nil {
@ -171,7 +175,8 @@ func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) error {
}
default:
return fmt.Errorf("unknown struct field %d: '%s'", i, name)
// Field doesn't exist on this type, so ignore it
cbg.ScanForLinks(r, func(cid.Cid) {})
}
}
@ -319,7 +324,8 @@ func (t *SealedRef) UnmarshalCBOR(r io.Reader) error {
}
default:
return fmt.Errorf("unknown struct field %d: '%s'", i, name)
// Field doesn't exist on this type, so ignore it
cbg.ScanForLinks(r, func(cid.Cid) {})
}
}
@ -427,7 +433,8 @@ func (t *SealedRefs) UnmarshalCBOR(r io.Reader) error {
}
default:
return fmt.Errorf("unknown struct field %d: '%s'", i, name)
// Field doesn't exist on this type, so ignore it
cbg.ScanForLinks(r, func(cid.Cid) {})
}
}
@ -575,7 +582,8 @@ func (t *SealTicket) UnmarshalCBOR(r io.Reader) error {
}
default:
return fmt.Errorf("unknown struct field %d: '%s'", i, name)
// Field doesn't exist on this type, so ignore it
cbg.ScanForLinks(r, func(cid.Cid) {})
}
}
@ -723,7 +731,8 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) error {
}
default:
return fmt.Errorf("unknown struct field %d: '%s'", i, name)
// Field doesn't exist on this type, so ignore it
cbg.ScanForLinks(r, func(cid.Cid) {})
}
}

View File

@ -20,9 +20,13 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
"github.com/filecoin-project/lotus/chain/types"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
"github.com/filecoin-project/lotus/markets/storageadapter"
"github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/impl"
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
ipld "github.com/ipfs/go-ipld-format"
dag "github.com/ipfs/go-merkledag"
dstest "github.com/ipfs/go-merkledag/test"
@ -88,6 +92,97 @@ func CreateClientFile(ctx context.Context, client api.FullNode, rseed int) (*api
return res, data, nil
}
func TestPublishDealsBatching(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
publishPeriod := 10 * time.Second
maxDealsPerMsg := uint64(2)
// Set max deals per publish deals message to 2
minerDef := []StorageMiner{{
Full: 0,
Opts: node.Override(
new(*storageadapter.DealPublisher),
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
Period: publishPeriod,
MaxDealsPerMsg: maxDealsPerMsg,
})),
Preseal: PresealGenesis,
}}
// Create a connect client and miner node
n, sn := b(t, OneFull, minerDef)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
s := connectAndStartMining(t, b, blocktime, client, miner)
defer s.blockMiner.Stop()
// Starts a deal and waits until it's published
runDealTillPublish := func(rseed int) {
res, _, err := CreateClientFile(s.ctx, s.client, rseed)
require.NoError(t, err)
upds, err := client.ClientGetDealUpdates(s.ctx)
require.NoError(t, err)
startDeal(t, s.ctx, s.miner, s.client, res.Root, false, startEpoch)
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second)
done := make(chan struct{})
go func() {
for upd := range upds {
if upd.DataRef.Root == res.Root && upd.State == storagemarket.StorageDealAwaitingPreCommit {
done <- struct{}{}
}
}
}()
<-done
}
// Run three deals in parallel
done := make(chan struct{}, maxDealsPerMsg+1)
for rseed := 1; rseed <= 3; rseed++ {
rseed := rseed
go func() {
runDealTillPublish(rseed)
done <- struct{}{}
}()
}
// Wait for two of the deals to be published
for i := 0; i < int(maxDealsPerMsg); i++ {
<-done
}
// Expect a single PublishStorageDeals message that includes the first two deals
msgCids, err := s.client.StateListMessages(s.ctx, &api.MessageMatch{To: market.Address}, types.EmptyTSK, 1)
require.NoError(t, err)
count := 0
for _, msgCid := range msgCids {
msg, err := s.client.ChainGetMessage(s.ctx, msgCid)
require.NoError(t, err)
if msg.Method == market.Methods.PublishStorageDeals {
count++
var pubDealsParams market2.PublishStorageDealsParams
err = pubDealsParams.UnmarshalCBOR(bytes.NewReader(msg.Params))
require.NoError(t, err)
require.Len(t, pubDealsParams.Deals, int(maxDealsPerMsg))
}
}
require.Equal(t, 1, count)
// The third deal should be published once the publish period expires.
// Allow a little padding as it takes a moment for the state change to
// be noticed by the client.
padding := 10 * time.Second
select {
case <-time.After(publishPeriod + padding):
require.Fail(t, "Expected 3rd deal to be published once publish period elapsed")
case <-done: // Success
}
}
func TestFastRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
s := setupOneClientOneMiner(t, b, blocktime)
defer s.blockMiner.Stop()
@ -159,6 +254,21 @@ func TestSecondDealRetrieval(t *testing.T, b APIBuilder, blocktime time.Duration
}
}
func TestZeroPricePerByteRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
s := setupOneClientOneMiner(t, b, blocktime)
defer s.blockMiner.Stop()
// Set price-per-byte to zero
ask, err := s.miner.MarketGetRetrievalAsk(s.ctx)
require.NoError(t, err)
ask.PricePerByte = abi.NewTokenAmount(0)
err = s.miner.MarketSetRetrievalAsk(s.ctx, ask)
require.NoError(t, err)
MakeDeal(t, s.ctx, 6, s.client, s.miner, false, false, startEpoch)
}
func startDeal(t *testing.T, ctx context.Context, miner TestStorageNode, client api.FullNode, fcid cid.Cid, fastRet bool, startEpoch abi.ChainEpoch) *cid.Cid {
maddr, err := miner.ActorAddress(ctx)
if err != nil {

View File

@ -18,7 +18,9 @@ import (
)
func TestTapeFix(t *testing.T, b APIBuilder, blocktime time.Duration) {
t.Run("before", func(t *testing.T) { testTapeFix(t, b, blocktime, false) })
// The "before" case is disabled, because we need the builder to mock 32 GiB sectors to accurately repro this case
// TODO: Make the mock sector size configurable and reenable this
//t.Run("before", func(t *testing.T) { testTapeFix(t, b, blocktime, false) })
t.Run("after", func(t *testing.T) { testTapeFix(t, b, blocktime, true) })
}
func testTapeFix(t *testing.T, b APIBuilder, blocktime time.Duration, after bool) {

View File

@ -59,6 +59,7 @@ const GenesisPreseals = 2
// Options for setting up a mock storage miner
type StorageMiner struct {
Full int
Opts node.Option
Preseal int
}

View File

@ -19,8 +19,8 @@ import (
)
type IPFSBlockstore struct {
ctx context.Context
api iface.CoreAPI
ctx context.Context
api, offlineAPI iface.CoreAPI
}
var _ BasicBlockstore = (*IPFSBlockstore)(nil)
@ -34,11 +34,22 @@ func NewLocalIPFSBlockstore(ctx context.Context, onlineMode bool) (Blockstore, e
if err != nil {
return nil, xerrors.Errorf("setting offline mode: %s", err)
}
b := &IPFSBlockstore{
ctx: ctx,
api: api,
offlineAPI := api
if onlineMode {
offlineAPI, err = localApi.WithOptions(options.Api.Offline(true))
if err != nil {
return nil, xerrors.Errorf("applying offline mode: %s", err)
}
}
return Adapt(b), nil
bs := &IPFSBlockstore{
ctx: ctx,
api: api,
offlineAPI: offlineAPI,
}
return Adapt(bs), nil
}
func NewRemoteIPFSBlockstore(ctx context.Context, maddr multiaddr.Multiaddr, onlineMode bool) (Blockstore, error) {
@ -50,11 +61,22 @@ func NewRemoteIPFSBlockstore(ctx context.Context, maddr multiaddr.Multiaddr, onl
if err != nil {
return nil, xerrors.Errorf("applying offline mode: %s", err)
}
b := &IPFSBlockstore{
ctx: ctx,
api: api,
offlineAPI := api
if onlineMode {
offlineAPI, err = httpApi.WithOptions(options.Api.Offline(true))
if err != nil {
return nil, xerrors.Errorf("applying offline mode: %s", err)
}
}
return Adapt(b), nil
bs := &IPFSBlockstore{
ctx: ctx,
api: api,
offlineAPI: offlineAPI,
}
return Adapt(bs), nil
}
func (i *IPFSBlockstore) DeleteBlock(cid cid.Cid) error {
@ -62,7 +84,7 @@ func (i *IPFSBlockstore) DeleteBlock(cid cid.Cid) error {
}
func (i *IPFSBlockstore) Has(cid cid.Cid) (bool, error) {
_, err := i.api.Block().Stat(i.ctx, path.IpldPath(cid))
_, err := i.offlineAPI.Block().Stat(i.ctx, path.IpldPath(cid))
if err != nil {
// The underlying client is running in Offline mode.
// Stat() will fail with an err if the block isn't in the

View File

@ -0,0 +1,2 @@
/dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWBQb5Eg2DRqL1Xrzm8S7AFnG2gkj1iKQKBBMEXJ7mXuWQ
/dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWJ2qKfGJg2i4Pn7nVCQ13oktS4eZfXFJk9ZjQy9uxLdq9

View File

@ -1,4 +1,4 @@
/dns4/bootstrap-0.calibration.fildev.network/tcp/1347/p2p/12D3KooWK1QYsm6iqyhgH7vqsbeoNoKHbT368h1JLHS1qYN36oyc
/dns4/bootstrap-1.calibration.fildev.network/tcp/1347/p2p/12D3KooWKDyJZoPsNak1iYNN1GGmvGnvhyVbWBL6iusYfP3RpgYs
/dns4/bootstrap-2.calibration.fildev.network/tcp/1347/p2p/12D3KooWJRSTnzABB6MYYEBbSTT52phQntVD1PpRTMh1xt9mh6yH
/dns4/bootstrap-3.calibration.fildev.network/tcp/1347/p2p/12D3KooWQLi3kY6HnMYLUtwCe26zWMdNhniFgHVNn1DioQc7NiWv
/dns4/bootstrap-0.calibration.fildev.network/tcp/1347/p2p/12D3KooWRLZAseMo9h7fRD6ojn6YYDXHsBSavX5YmjBZ9ngtAEec
/dns4/bootstrap-1.calibration.fildev.network/tcp/1347/p2p/12D3KooWJFtDXgZEQMEkjJPSrbfdvh2xfjVKrXeNFG1t8ioJXAzv
/dns4/bootstrap-2.calibration.fildev.network/tcp/1347/p2p/12D3KooWP1uB9Lo7yCA3S17TD4Y5wStP5Nk7Vqh53m8GsFjkyujD
/dns4/bootstrap-3.calibration.fildev.network/tcp/1347/p2p/12D3KooWLrPM4WPK1YRGPCUwndWcDX8GCYgms3DiuofUmxwvhMCn

View File

@ -0,0 +1,4 @@
/dns4/bootstrap-0.nerpa.interplanetary.dev/tcp/1347/p2p/12D3KooWNfuGjtzWTVz8eJGZ2C3aJg2xLqorhsagu4LTWw6CwpK9
/dns4/bootstrap-1.nerpa.interplanetary.dev/tcp/1347/p2p/12D3KooWDfsxYk7dC6NNsHqZqqyMJCzkjZuXhjsmqBk3TUCBZLga
/dns4/bootstrap-2.nerpa.interplanetary.dev/tcp/1347/p2p/12D3KooWRZAGHmCCaa2gkYmnC4Q2TEwHGFSh6Fh1FFJ7RSXak5yN
/dns4/bootstrap-3.nerpa.interplanetary.dev/tcp/1347/p2p/12D3KooWBFxEigSKLvxJVdw3JziC9ePHHnyAn5LifWSqg2kttcth

Binary file not shown.

Binary file not shown.

BIN
build/genesis/nerpanet.car Normal file

Binary file not shown.

52
build/params_butterfly.go Normal file
View File

@ -0,0 +1,52 @@
// +build butterflynet
package build
import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/actors/policy"
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
)
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet,
}
const BootstrappersFile = "butterflynet.pi"
const GenesisFile = "butterflynet.car"
const UpgradeBreezeHeight = -1
const BreezeGasTampingDuration = 120
const UpgradeSmokeHeight = -2
const UpgradeIgnitionHeight = -3
const UpgradeRefuelHeight = -4
var UpgradeActorsV2Height = abi.ChainEpoch(30)
const UpgradeTapeHeight = 60
const UpgradeLiftoffHeight = -5
const UpgradeKumquatHeight = 90
const UpgradeCalicoHeight = 120
const UpgradePersianHeight = 150
const UpgradeClausHeight = 180
const UpgradeOrangeHeight = 210
const UpgradeActorsV3Height = 240
func init() {
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2 << 30))
policy.SetSupportedProofTypes(
abi.RegisteredSealProof_StackedDrg512MiBV1,
)
SetAddressNetwork(address.Testnet)
Devnet = true
}
const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
const PropagationDelaySecs = uint64(6)
// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
const BootstrapPeerThreshold = 2

View File

@ -28,29 +28,22 @@ var UpgradeActorsV2Height = abi.ChainEpoch(30)
const UpgradeTapeHeight = 60
// This signals our tentative epoch for mainnet launch. Can make it later, but not earlier.
// Miners, clients, developers, custodians all need time to prepare.
// We still have upgrades and state changes to do, but can happen after signaling timing here.
const UpgradeLiftoffHeight = -5
const UpgradeKumquatHeight = 90
const UpgradeCalicoHeight = 92000
const UpgradePersianHeight = UpgradeCalicoHeight + (builtin2.EpochsInHour * 60)
const UpgradeCalicoHeight = 100
const UpgradePersianHeight = UpgradeCalicoHeight + (builtin2.EpochsInHour * 1)
// 2020-12-17T19:00:00Z
const UpgradeClausHeight = 161386
const UpgradeClausHeight = 250
// 2021-01-17T19:00:00Z
const UpgradeOrangeHeight = 250666
const UpgradeOrangeHeight = 300
// 2021-01-28T21:00:00Z
const UpgradeActorsV3Height = 282586
const UpgradeActorsV3Height = 600
func init() {
policy.SetConsensusMinerMinPower(abi.NewStoragePower(10 << 30))
policy.SetConsensusMinerMinPower(abi.NewStoragePower(32 << 30))
policy.SetSupportedProofTypes(
abi.RegisteredSealProof_StackedDrg512MiBV1,
abi.RegisteredSealProof_StackedDrg32GiBV1,
abi.RegisteredSealProof_StackedDrg64GiBV1,
)

View File

@ -2,10 +2,13 @@
// +build !2k
// +build !testground
// +build !calibnet
// +build !nerpanet
// +build !butterflynet
package build
import (
"math"
"os"
"github.com/filecoin-project/go-address"
@ -50,8 +53,8 @@ const UpgradeOrangeHeight = 336458
// 2020-12-22T02:00:00Z
const UpgradeClausHeight = 343200
// TODO
const UpgradeActorsV3Height = 999999999
// 2021-03-04T00:00:30Z
var UpgradeActorsV3Height = abi.ChainEpoch(550321)
func init() {
policy.SetConsensusMinerMinPower(abi.NewStoragePower(10 << 40))
@ -60,6 +63,10 @@ func init() {
SetAddressNetwork(address.Mainnet)
}
if os.Getenv("LOTUS_DISABLE_V3_ACTOR_MIGRATION") == "1" {
UpgradeActorsV3Height = math.MaxInt64
}
Devnet = false
BuildType = BuildMainnet

72
build/params_nerpanet.go Normal file
View File

@ -0,0 +1,72 @@
// +build nerpanet
package build
import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/actors/policy"
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
)
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet,
}
const BootstrappersFile = "nerpanet.pi"
const GenesisFile = "nerpanet.car"
const UpgradeBreezeHeight = -1
const BreezeGasTampingDuration = 0
const UpgradeSmokeHeight = -1
const UpgradeIgnitionHeight = -2
const UpgradeRefuelHeight = -3
const UpgradeTapeHeight = -4
const UpgradeLiftoffHeight = -5
const UpgradeActorsV2Height = 120 // critical: the network can bootstrap from v1 only
const UpgradeKumquatHeight = -6
const UpgradeCalicoHeight = 306000
const UpgradePersianHeight = UpgradeCalicoHeight + (builtin2.EpochsInHour * 12)
const UpgradeOrangeHeight = 307500
const UpgradeClausHeight = 307600
const UpgradeActorsV3Height = 308000
func init() {
// Minimum block production power is set to 4 TiB
// Rationale is to discourage small-scale miners from trying to take over the network
// One needs to invest in ~2.3x the compute to break consensus, making it not worth it
//
// DOWNSIDE: the fake-seals need to be kept alive/protected, otherwise network will seize
//
policy.SetConsensusMinerMinPower(abi.NewStoragePower(4 << 40))
policy.SetSupportedProofTypes(
abi.RegisteredSealProof_StackedDrg512MiBV1,
abi.RegisteredSealProof_StackedDrg32GiBV1,
abi.RegisteredSealProof_StackedDrg64GiBV1,
)
// Lower the most time-consuming parts of PoRep
policy.SetPreCommitChallengeDelay(10)
// TODO - make this a variable
//miner.WPoStChallengeLookback = abi.ChainEpoch(2)
Devnet = false
}
const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
const PropagationDelaySecs = uint64(6)
// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
const BootstrapPeerThreshold = 4

View File

@ -3,5 +3,6 @@
package build
import (
_ "github.com/GeertJohan/go.rice/rice"
_ "github.com/whyrusleeping/bencher"
)

View File

@ -35,7 +35,7 @@ func buildType() string {
}
// BuildVersion is the local build version, set by build system
const BuildVersion = "1.4.1"
const BuildVersion = "1.5.0"
func UserVersion() string {
return BuildVersion + buildType() + CurrentCommit
@ -89,7 +89,7 @@ func VersionForType(nodeType NodeType) (Version, error) {
// semver versions of the rpc api exposed
var (
FullAPIVersion = newVer(1, 0, 0)
FullAPIVersion = newVer(1, 1, 0)
MinerAPIVersion = newVer(1, 0, 1)
WorkerAPIVersion = newVer(1, 0, 0)
)

View File

@ -51,6 +51,7 @@ type MessageBuilder interface {
// this type is the same between v0 and v2
type ProposalHashData = multisig3.ProposalHashData
type ProposeReturn = multisig3.ProposeReturn
type ProposeParams = multisig3.ProposeParams
func txnParams(id uint64, data *ProposalHashData) ([]byte, error) {
params := multisig3.TxnIDParams{ID: multisig3.TxnID(id)}

View File

@ -99,11 +99,13 @@ func (e *Events) listenHeadChanges(ctx context.Context) {
} else {
log.Warn("listenHeadChanges quit")
}
if ctx.Err() != nil {
select {
case <-build.Clock.After(time.Second):
case <-ctx.Done():
log.Warnf("not restarting listenHeadChanges: context error: %s", ctx.Err())
return
}
build.Clock.Sleep(time.Second)
log.Info("restarting listenHeadChanges")
}
}

View File

@ -5,6 +5,7 @@ package exchange
import (
"fmt"
"io"
"sort"
types "github.com/filecoin-project/lotus/chain/types"
cid "github.com/ipfs/go-cid"
@ -13,6 +14,8 @@ import (
)
var _ = xerrors.Errorf
var _ = cid.Undef
var _ = sort.Sort
var lengthBufRequest = []byte{131}

View File

@ -120,12 +120,12 @@ func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) {
return nil, xerrors.Errorf("taking mem-repo lock failed: %w", err)
}
ds, err := lr.Datastore("/metadata")
ds, err := lr.Datastore(context.TODO(), "/metadata")
if err != nil {
return nil, xerrors.Errorf("failed to get metadata datastore: %w", err)
}
bs, err := lr.Blockstore(repo.BlockstoreChain)
bs, err := lr.Blockstore(context.TODO(), repo.BlockstoreChain)
if err != nil {
return nil, err
}

View File

@ -5,12 +5,16 @@ package market
import (
"fmt"
"io"
"sort"
cid "github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
xerrors "golang.org/x/xerrors"
)
var _ = xerrors.Errorf
var _ = cid.Undef
var _ = sort.Sort
var lengthBufFundedAddressState = []byte{131}

View File

@ -324,7 +324,7 @@ func (a *fundedAddress) saveState() {
// Not much we can do if saving to the datastore fails, just log
err := a.str.save(a.state)
if err != nil {
log.Errorf("saving state to store for addr %s: %w", a.state.Addr, err)
log.Errorf("saving state to store for addr %s: %v", a.state.Addr, err)
}
}
@ -579,7 +579,7 @@ func (a *fundedAddress) startWaitForResults(msgCid cid.Cid) {
if err != nil {
// We don't really care about the results here, we're just waiting
// so as to only process one on-chain message at a time
log.Errorf("waiting for results of message %s for addr %s: %w", msgCid, a.state.Addr, err)
log.Errorf("waiting for results of message %s for addr %s: %v", msgCid, a.state.Addr, err)
}
a.lk.Lock()

View File

@ -48,9 +48,13 @@ func saveConfig(cfg *types.MpoolConfig, ds dtypes.MetadataDS) error {
}
func (mp *MessagePool) GetConfig() *types.MpoolConfig {
mp.cfgLk.Lock()
defer mp.cfgLk.Unlock()
return mp.cfg.Clone()
return mp.getConfig().Clone()
}
func (mp *MessagePool) getConfig() *types.MpoolConfig {
mp.cfgLk.RLock()
defer mp.cfgLk.RUnlock()
return mp.cfg
}
func validateConfg(cfg *types.MpoolConfig) error {

View File

@ -133,7 +133,7 @@ type MessagePool struct {
curTsLk sync.Mutex // DO NOT LOCK INSIDE lk
curTs *types.TipSet
cfgLk sync.Mutex
cfgLk sync.RWMutex
cfg *types.MpoolConfig
api Provider
@ -781,7 +781,7 @@ func (mp *MessagePool) addLocked(m *types.SignedMessage, strict, untrusted bool)
if incr {
mp.currentSize++
if mp.currentSize > mp.cfg.SizeLimitHigh {
if mp.currentSize > mp.getConfig().SizeLimitHigh {
// send signal to prune messages if it hasnt already been sent
select {
case mp.pruneTrigger <- struct{}{}:

View File

@ -19,7 +19,8 @@ func (mp *MessagePool) pruneExcessMessages() error {
mp.lk.Lock()
defer mp.lk.Unlock()
if mp.currentSize < mp.cfg.SizeLimitHigh {
mpCfg := mp.getConfig()
if mp.currentSize < mpCfg.SizeLimitHigh {
return nil
}
@ -27,7 +28,7 @@ func (mp *MessagePool) pruneExcessMessages() error {
case <-mp.pruneCooldown:
err := mp.pruneMessages(context.TODO(), ts)
go func() {
time.Sleep(mp.cfg.PruneCooldown)
time.Sleep(mpCfg.PruneCooldown)
mp.pruneCooldown <- struct{}{}
}()
return err
@ -53,8 +54,9 @@ func (mp *MessagePool) pruneMessages(ctx context.Context, ts *types.TipSet) erro
// protected actors -- not pruned
protected := make(map[address.Address]struct{})
mpCfg := mp.getConfig()
// we never prune priority addresses
for _, actor := range mp.cfg.PriorityAddrs {
for _, actor := range mpCfg.PriorityAddrs {
protected[actor] = struct{}{}
}
@ -90,7 +92,7 @@ func (mp *MessagePool) pruneMessages(ctx context.Context, ts *types.TipSet) erro
})
// Keep messages (remove them from pruneMsgs) from chains while we are under the low water mark
loWaterMark := mp.cfg.SizeLimitLow
loWaterMark := mpCfg.SizeLimitLow
keepLoop:
for _, chain := range chains {
for _, m := range chain.msgs {

View File

@ -532,14 +532,14 @@ func (mp *MessagePool) selectPriorityMessages(pending map[address.Address]map[ui
log.Infow("select priority messages done", "took", dt)
}
}()
result := make([]*types.SignedMessage, 0, mp.cfg.SizeLimitLow)
mpCfg := mp.getConfig()
result := make([]*types.SignedMessage, 0, mpCfg.SizeLimitLow)
gasLimit := int64(build.BlockGasLimit)
minGas := int64(gasguess.MinGas)
// 1. Get priority actor chains
var chains []*msgChain
priority := mp.cfg.PriorityAddrs
priority := mpCfg.PriorityAddrs
for _, actor := range priority {
mset, ok := pending[actor]
if ok {
@ -719,7 +719,7 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6
// the balance
a, err := mp.api.GetActorAfter(actor, ts)
if err != nil {
log.Errorf("failed to load actor state, not building chain for %s: %w", actor, err)
log.Errorf("failed to load actor state, not building chain for %s: %v", actor, err)
return nil
}

View File

@ -421,20 +421,9 @@ func doTransfer(tree types.StateTree, from, to address.Address, amt abi.TokenAmo
if cb != nil {
// record the transfer in execution traces
fakeMsg := &types.Message{
From: from,
To: to,
Value: amt,
}
fakeRct := &types.MessageReceipt{
ExitCode: 0,
Return: nil,
GasUsed: 0,
}
cb(types.ExecutionTrace{
Msg: fakeMsg,
MsgRct: fakeRct,
Msg: makeFakeMsg(from, to, amt, 0),
MsgRct: makeFakeRct(),
Error: "",
Duration: 0,
GasCharges: nil,
@ -698,24 +687,14 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ Migratio
if cb != nil {
// record the transfer in execution traces
fakeMsg := &types.Message{
From: builtin.SystemActorAddr,
To: builtin.SystemActorAddr,
Value: big.Zero(),
Nonce: uint64(epoch),
}
fakeRct := &types.MessageReceipt{
ExitCode: 0,
Return: nil,
GasUsed: 0,
}
fakeMsg := makeFakeMsg(builtin.SystemActorAddr, builtin.SystemActorAddr, big.Zero(), uint64(epoch))
if err := cb(fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{
MessageReceipt: *fakeRct,
MessageReceipt: *makeFakeRct(),
ActorErr: nil,
ExecutionTrace: types.ExecutionTrace{
Msg: fakeMsg,
MsgRct: fakeRct,
MsgRct: makeFakeRct(),
Error: "",
Duration: 0,
GasCharges: nil,
@ -914,6 +893,66 @@ func UpgradeCalico(ctx context.Context, sm *StateManager, _ MigrationCache, cb E
return newRoot, nil
}
func terminateActor(ctx context.Context, tree *state.StateTree, addr address.Address, cb ExecCallback, epoch abi.ChainEpoch) error {
a, err := tree.GetActor(addr)
if xerrors.Is(err, types.ErrActorNotFound) {
return types.ErrActorNotFound
} else if err != nil {
return xerrors.Errorf("failed to get actor to delete: %w", err)
}
var trace types.ExecutionTrace
if err := doTransfer(tree, addr, builtin.BurntFundsActorAddr, a.Balance, func(t types.ExecutionTrace) {
trace = t
}); err != nil {
return xerrors.Errorf("transferring terminated actor's balance: %w", err)
}
if cb != nil {
// record the transfer in execution traces
fakeMsg := makeFakeMsg(builtin.SystemActorAddr, addr, big.Zero(), uint64(epoch))
if err := cb(fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{
MessageReceipt: *makeFakeRct(),
ActorErr: nil,
ExecutionTrace: trace,
Duration: 0,
GasCosts: nil,
}); err != nil {
return xerrors.Errorf("recording transfers: %w", err)
}
}
err = tree.DeleteActor(addr)
if err != nil {
return xerrors.Errorf("deleting actor from tree: %w", err)
}
ia, err := tree.GetActor(init_.Address)
if err != nil {
return xerrors.Errorf("loading init actor: %w", err)
}
ias, err := init_.Load(&state.AdtStore{IpldStore: tree.Store}, ia)
if err != nil {
return xerrors.Errorf("loading init actor state: %w", err)
}
if err := ias.Remove(addr); err != nil {
return xerrors.Errorf("deleting entry from address map: %w", err)
}
nih, err := tree.Store.Put(ctx, ias)
if err != nil {
return xerrors.Errorf("writing new init actor state: %w", err)
}
ia.Head = nih
return tree.SetActor(init_.Address, ia)
}
func UpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
// Use all the CPUs except 3.
workerCount := runtime.NumCPU() - 3
@ -932,16 +971,21 @@ func UpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache
return cid.Undef, xerrors.Errorf("migrating actors v3 state: %w", err)
}
// perform some basic sanity checks to make sure everything still works.
store := store.ActorStore(ctx, sm.ChainStore().Blockstore())
if newSm, err := state.LoadStateTree(store, newRoot); err != nil {
return cid.Undef, xerrors.Errorf("state tree sanity load failed: %w", err)
} else if newRoot2, err := newSm.Flush(ctx); err != nil {
return cid.Undef, xerrors.Errorf("state tree sanity flush failed: %w", err)
} else if newRoot2 != newRoot {
return cid.Undef, xerrors.Errorf("state-root mismatch: %s != %s", newRoot, newRoot2)
} else if _, err := newSm.GetActor(init_.Address); err != nil {
return cid.Undef, xerrors.Errorf("failed to load init actor after upgrade: %w", err)
tree, err := sm.StateTree(newRoot)
if err != nil {
return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
}
if build.BuildType == build.BuildMainnet {
err := terminateActor(ctx, tree, build.ZeroAddress, cb, epoch)
if err != nil && !xerrors.Is(err, types.ErrActorNotFound) {
return cid.Undef, xerrors.Errorf("deleting zero bls actor: %w", err)
}
newRoot, err = tree.Flush(ctx)
if err != nil {
return cid.Undef, xerrors.Errorf("flushing state tree: %w", err)
}
}
return newRoot, nil
@ -1138,24 +1182,14 @@ func splitGenesisMultisig0(ctx context.Context, cb ExecCallback, addr address.Ad
if cb != nil {
// record the transfer in execution traces
fakeMsg := &types.Message{
From: builtin.SystemActorAddr,
To: addr,
Value: big.Zero(),
Nonce: uint64(epoch),
}
fakeRct := &types.MessageReceipt{
ExitCode: 0,
Return: nil,
GasUsed: 0,
}
fakeMsg := makeFakeMsg(builtin.SystemActorAddr, addr, big.Zero(), uint64(epoch))
if err := cb(fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{
MessageReceipt: *fakeRct,
MessageReceipt: *makeFakeRct(),
ActorErr: nil,
ExecutionTrace: types.ExecutionTrace{
Msg: fakeMsg,
MsgRct: fakeRct,
MsgRct: makeFakeRct(),
Error: "",
Duration: 0,
GasCharges: nil,
@ -1274,3 +1308,20 @@ func resetMultisigVesting0(ctx context.Context, store adt0.Store, tree *state.St
return nil
}
func makeFakeMsg(from address.Address, to address.Address, amt abi.TokenAmount, nonce uint64) *types.Message {
return &types.Message{
From: from,
To: to,
Value: amt,
Nonce: nonce,
}
}
func makeFakeRct() *types.MessageReceipt {
return &types.MessageReceipt{
ExitCode: 0,
Return: nil,
GasUsed: 0,
}
}

View File

@ -621,7 +621,7 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confid
go func() {
fts, r, foundMsg, err := sm.searchBackForMsg(ctx, head[0].Val, msg, lookbackLimit)
if err != nil {
log.Warnf("failed to look back through chain for message: %w", err)
log.Warnf("failed to look back through chain for message: %v", err)
return
}

View File

@ -145,20 +145,6 @@ func MinerSectorInfo(ctx context.Context, sm *StateManager, maddr address.Addres
return mas.GetSector(sid)
}
func GetMinerSectorSet(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address, snos *bitfield.BitField) ([]*miner.SectorOnChainInfo, error) {
act, err := sm.LoadActor(ctx, maddr, ts)
if err != nil {
return nil, xerrors.Errorf("(get sset) failed to load miner actor: %w", err)
}
mas, err := miner.Load(sm.cs.Store(ctx), act)
if err != nil {
return nil, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err)
}
return mas.LoadSectors(snos)
}
func GetSectorsForWinningPoSt(ctx context.Context, nv network.Version, pv ffiwrapper.Verifier, sm *StateManager, st cid.Cid, maddr address.Address, rand abi.PoStRandomness) ([]builtin.SectorInfo, error) {
act, err := sm.LoadActorRaw(ctx, maddr, st)
if err != nil {

View File

@ -157,52 +157,51 @@ func (c *HeadChangeCoalescer) coalesce(revert, apply []*types.TipSet) {
// coalesced revert set
// - pending reverts are cancelled by incoming applys
// - incoming reverts are cancelled by pending applys
newRevert := make([]*types.TipSet, 0, len(c.revert)+len(revert))
for _, ts := range c.revert {
_, cancel := applying[ts.Key()]
if cancel {
continue
}
newRevert = append(newRevert, ts)
}
for _, ts := range revert {
_, cancel := pendApply[ts.Key()]
if cancel {
continue
}
newRevert = append(newRevert, ts)
}
newRevert := c.merge(c.revert, revert, pendApply, applying)
// coalesced apply set
// - pending applys are cancelled by incoming reverts
// - incoming applys are cancelled by pending reverts
newApply := make([]*types.TipSet, 0, len(c.apply)+len(apply))
for _, ts := range c.apply {
_, cancel := reverting[ts.Key()]
if cancel {
continue
}
newApply = append(newApply, ts)
}
for _, ts := range apply {
_, cancel := pendRevert[ts.Key()]
if cancel {
continue
}
newApply = append(newApply, ts)
}
newApply := c.merge(c.apply, apply, pendRevert, reverting)
// commit the coalesced sets
c.revert = newRevert
c.apply = newApply
}
func (c *HeadChangeCoalescer) merge(pend, incoming []*types.TipSet, cancel1, cancel2 map[types.TipSetKey]struct{}) []*types.TipSet {
result := make([]*types.TipSet, 0, len(pend)+len(incoming))
for _, ts := range pend {
_, cancel := cancel1[ts.Key()]
if cancel {
continue
}
_, cancel = cancel2[ts.Key()]
if cancel {
continue
}
result = append(result, ts)
}
for _, ts := range incoming {
_, cancel := cancel1[ts.Key()]
if cancel {
continue
}
_, cancel = cancel2[ts.Key()]
if cancel {
continue
}
result = append(result, ts)
}
return result
}
func (c *HeadChangeCoalescer) dispatch() {
err := c.notify(c.revert, c.apply)
if err != nil {

View File

@ -52,7 +52,7 @@ func BenchmarkGetRandomness(b *testing.B) {
b.Fatal(err)
}
bs, err := lr.Blockstore(repo.BlockstoreChain)
bs, err := lr.Blockstore(context.TODO(), repo.BlockstoreChain)
if err != nil {
b.Fatal(err)
}
@ -65,7 +65,7 @@ func BenchmarkGetRandomness(b *testing.B) {
}
}()
mds, err := lr.Datastore("/metadata")
mds, err := lr.Datastore(context.Background(), "/metadata")
if err != nil {
b.Fatal(err)
}

View File

@ -1791,11 +1791,10 @@ func (syncer *Syncer) getLatestBeaconEntry(_ context.Context, ts *types.TipSet)
}
func (syncer *Syncer) IsEpochBeyondCurrMax(epoch abi.ChainEpoch) bool {
g, err := syncer.store.GetGenesis()
if err != nil {
if syncer.Genesis == nil {
return false
}
now := uint64(build.Clock.Now().Unix())
return epoch > (abi.ChainEpoch((now-g.Timestamp)/build.BlockDelaySecs) + MaxHeightDrift)
return epoch > (abi.ChainEpoch((now-syncer.Genesis.MinTimestamp())/build.BlockDelaySecs) + MaxHeightDrift)
}

View File

@ -573,11 +573,14 @@ func TestDuplicateNonce(t *testing.T) {
base := tu.g.CurTipset
// Get the banker from computed tipset state, not the parent.
st, _, err := tu.g.StateManager().TipSetState(context.TODO(), base.TipSet())
require.NoError(t, err)
ba, err := tu.g.StateManager().LoadActorRaw(context.TODO(), tu.g.Banker(), st)
require.NoError(t, err)
// Produce a message from the banker to the rcvr
makeMsg := func(rcvr address.Address) *types.SignedMessage {
ba, err := tu.nds[0].StateGetActor(context.TODO(), tu.g.Banker(), base.TipSet().Key())
require.NoError(t, err)
msg := types.Message{
To: rcvr,
From: tu.g.Banker(),

View File

@ -47,41 +47,24 @@ func NewBeaconEntry(round uint64, data []byte) BeaconEntry {
}
type BlockHeader struct {
Miner address.Address // 0
Miner address.Address // 0 unique per block/miner
Ticket *Ticket // 1 unique per block/miner: should be a valid VRF
ElectionProof *ElectionProof // 2 unique per block/miner: should be a valid VRF
BeaconEntries []BeaconEntry // 3 identical for all blocks in same tipset
WinPoStProof []proof2.PoStProof // 4 unique per block/miner
Parents []cid.Cid // 5 identical for all blocks in same tipset
ParentWeight BigInt // 6 identical for all blocks in same tipset
Height abi.ChainEpoch // 7 identical for all blocks in same tipset
ParentStateRoot cid.Cid // 8 identical for all blocks in same tipset
ParentMessageReceipts cid.Cid // 9 identical for all blocks in same tipset
Messages cid.Cid // 10 unique per block
BLSAggregate *crypto.Signature // 11 unique per block: aggrregate of BLS messages from above
Timestamp uint64 // 12 identical for all blocks in same tipset / hard-tied to the value of Height above
BlockSig *crypto.Signature // 13 unique per block/miner: miner signature
ForkSignaling uint64 // 14 currently unused/undefined
ParentBaseFee abi.TokenAmount // 15 identical for all blocks in same tipset: the base fee after executing parent tipset
Ticket *Ticket // 1
ElectionProof *ElectionProof // 2
BeaconEntries []BeaconEntry // 3
WinPoStProof []proof2.PoStProof // 4
Parents []cid.Cid // 5
ParentWeight BigInt // 6
Height abi.ChainEpoch // 7
ParentStateRoot cid.Cid // 8
ParentMessageReceipts cid.Cid // 8
Messages cid.Cid // 10
BLSAggregate *crypto.Signature // 11
Timestamp uint64 // 12
BlockSig *crypto.Signature // 13
ForkSignaling uint64 // 14
// ParentBaseFee is the base fee after executing parent tipset
ParentBaseFee abi.TokenAmount // 15
// internal
validated bool // true if the signature has been validated
validated bool // internal, true if the signature has been validated
}
func (blk *BlockHeader) ToStorageBlock() (block.Block, error) {

View File

@ -5,6 +5,7 @@ package types
import (
"fmt"
"io"
"sort"
abi "github.com/filecoin-project/go-state-types/abi"
crypto "github.com/filecoin-project/go-state-types/crypto"
@ -16,6 +17,8 @@ import (
)
var _ = xerrors.Errorf
var _ = cid.Undef
var _ = sort.Sort
var lengthBufBlockHeader = []byte{144}

View File

@ -3,6 +3,10 @@ package vm
import (
"context"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/lotus/chain/actors"
@ -39,6 +43,10 @@ func TryCreateAccountActor(rt *Runtime, addr address.Address) (*types.Actor, add
return nil, address.Undef, err
}
if addr == build.ZeroAddress && rt.NetworkVersion() >= network.Version10 {
return nil, address.Undef, aerrors.New(exitcode.ErrIllegalArgument, "cannot create the zero bls actor")
}
addrID, err := rt.state.RegisterNewAddress(addr)
if err != nil {
return nil, address.Undef, aerrors.Escalate(err, "registering actor address")

View File

@ -46,7 +46,7 @@ func BackupCmd(repoFlag string, rt repo.RepoType, getApi BackupApiFn) *cli.Comma
}
defer lr.Close() // nolint:errcheck
mds, err := lr.Datastore("/metadata")
mds, err := lr.Datastore(context.TODO(), "/metadata")
if err != nil {
return xerrors.Errorf("getting metadata datastore: %w", err)
}

View File

@ -324,6 +324,6 @@ var Commands = []*cli.Command{
}
func WithCategory(cat string, cmd *cli.Command) *cli.Command {
cmd.Category = cat
cmd.Category = strings.ToUpper(cat)
return cmd
}

View File

@ -20,10 +20,14 @@ import (
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
"golang.org/x/xerrors"
logging "github.com/ipfs/go-log/v2"
"github.com/filecoin-project/lotus/chain/store"
"github.com/urfave/cli/v2"
)
var disputeLog = logging.Logger("disputer")
const Confidence = 10
type minerDeadline struct {
@ -165,13 +169,13 @@ var disputerStartCmd = &cli.Command{
startEpoch = abi.ChainEpoch(cctx.Uint64("height"))
}
fmt.Println("checking sync status")
disputeLog.Info("checking sync status")
if err := SyncWait(ctx, api, false); err != nil {
return xerrors.Errorf("sync wait: %w", err)
}
fmt.Println("setting up window post disputer")
disputeLog.Info("setting up window post disputer")
// subscribe to head changes and validate the current value
@ -220,10 +224,10 @@ var disputerStartCmd = &cli.Command{
statusCheckTicker := time.NewTicker(time.Hour)
defer statusCheckTicker.Stop()
fmt.Println("starting up window post disputer")
disputeLog.Info("starting up window post disputer")
applyTsk := func(tsk types.TipSetKey) error {
log.Infof("last checked height: %d", lastEpoch)
disputeLog.Infow("last checked epoch", "epoch", lastEpoch)
dls, ok := deadlineMap[lastEpoch]
delete(deadlineMap, lastEpoch)
if !ok || startEpoch >= lastEpoch {
@ -261,12 +265,12 @@ var disputerStartCmd = &cli.Command{
// TODO: Parallelizeable / can be integrated into the previous deadline-iterating for loop
for _, dpmsg := range dpmsgs {
log.Infof("disputing a PoSt from miner %s", dpmsg.To)
disputeLog.Infow("disputing a PoSt", "miner", dpmsg.To)
m, err := api.MpoolPushMessage(ctx, dpmsg, mss)
if err != nil {
log.Infof("failed to dispute post message: %s", err.Error())
disputeLog.Errorw("failed to dispute post message", "err", err.Error(), "miner", dpmsg.To)
} else {
log.Infof("disputed a PoSt in message: %s", m.Cid())
disputeLog.Infof("submited dispute", "mcid", m.Cid(), "miner", dpmsg.To)
}
}
@ -296,7 +300,7 @@ var disputerStartCmd = &cli.Command{
}
}
case <-statusCheckTicker.C:
log.Infof("Running status check: ")
disputeLog.Infof("running status check")
minerList, err = api.StateListMiners(ctx, types.EmptyTSK)
if err != nil {
@ -321,14 +325,14 @@ var disputerStartCmd = &cli.Command{
// if an epoch got "skipped" from the deadlineMap somehow, just fry it now instead of letting it sit around forever
_, ok := deadlineMap[lastStatusCheckEpoch]
if ok {
log.Infof("epoch %d was skipped during execution, deleting it from deadlineMap")
disputeLog.Infow("epoch skipped during execution, deleting it from deadlineMap", "epoch", lastStatusCheckEpoch)
delete(deadlineMap, lastStatusCheckEpoch)
}
}
log.Infof("Status check complete")
log.Infof("status check complete")
case <-ctx.Done():
return xerrors.Errorf("context cancelled")
return ctx.Err()
}
return nil
@ -336,10 +340,14 @@ var disputerStartCmd = &cli.Command{
for {
err := disputeLoop()
if err != nil {
fmt.Println("disputer shutting down: ", err)
if err == context.Canceled {
disputeLog.Info("disputer shutting down")
break
}
if err != nil {
disputeLog.Errorw("disputer shutting down", "err", err)
return err
}
}
return nil

View File

@ -93,7 +93,7 @@ var logSetLevel = &cli.Command{
for _, system := range systems {
if err := api.LogSetLevel(ctx, system, cctx.Args().First()); err != nil {
return xerrors.Errorf("setting log level on %s: %w", system, err)
return xerrors.Errorf("setting log level on %s: %v", system, err)
}
}

View File

@ -935,6 +935,10 @@ var stateComputeStateCmd = &cli.Command{
Name: "compute-state-output",
Usage: "a json file containing pre-existing compute-state output, to generate html reports without rerunning state changes",
},
&cli.BoolFlag{
Name: "no-timing",
Usage: "don't show timing information in html traces",
},
},
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx)
@ -1026,7 +1030,9 @@ var stateComputeStateCmd = &cli.Command{
return c.Code, nil
}
return ComputeStateHTMLTempl(os.Stdout, ts, stout, getCode)
_, _ = fmt.Fprintln(os.Stderr, "computed state cid: ", stout.Root)
return ComputeStateHTMLTempl(os.Stdout, ts, stout, !cctx.Bool("no-timing"), getCode)
}
fmt.Println("computed state cid: ", stout.Root)
@ -1147,8 +1153,11 @@ var compStateMsg = `
{{if gt (len .Msg.Params) 0}}
<div><pre class="params">{{JsonParams ($code) (.Msg.Method) (.Msg.Params) | html}}</pre></div>
{{end}}
<div><span class="slow-{{IsSlow .Duration}}-{{IsVerySlow .Duration}}">Took {{.Duration}}</span>, <span class="exit{{IntExit .MsgRct.ExitCode}}">Exit: <b>{{.MsgRct.ExitCode}}</b></span>{{if gt (len .MsgRct.Return) 0}}, Return{{end}}</div>
{{if PrintTiming}}
<div><span class="slow-{{IsSlow .Duration}}-{{IsVerySlow .Duration}}">Took {{.Duration}}</span>, <span class="exit{{IntExit .MsgRct.ExitCode}}">Exit: <b>{{.MsgRct.ExitCode}}</b></span>{{if gt (len .MsgRct.Return) 0}}, Return{{end}}</div>
{{else}}
<div><span class="exit{{IntExit .MsgRct.ExitCode}}">Exit: <b>{{.MsgRct.ExitCode}}</b></span>{{if gt (len .MsgRct.Return) 0}}, Return{{end}}</div>
{{end}}
{{if gt (len .MsgRct.Return) 0}}
<div><pre class="ret">{{JsonReturn ($code) (.Msg.Method) (.MsgRct.Return) | html}}</pre></div>
{{end}}
@ -1174,7 +1183,7 @@ var compStateMsg = `
{{range .GasCharges}}
<tr><td>{{.Name}}{{if .Extra}}:{{.Extra}}{{end}}</td>
{{template "gasC" .}}
<td>{{.TimeTaken}}</td>
<td>{{if PrintTiming}}{{.TimeTaken}}{{end}}</td>
<td>
{{ $fImp := FirstImportant .Location }}
{{ if $fImp }}
@ -1213,7 +1222,7 @@ var compStateMsg = `
{{with SumGas .GasCharges}}
<tr class="sum"><td><b>Sum</b></td>
{{template "gasC" .}}
<td>{{.TimeTaken}}</td>
<td>{{if PrintTiming}}{{.TimeTaken}}{{end}}</td>
<td></td></tr>
{{end}}
</table>
@ -1234,19 +1243,20 @@ type compStateHTMLIn struct {
Comp *api.ComputeStateOutput
}
func ComputeStateHTMLTempl(w io.Writer, ts *types.TipSet, o *api.ComputeStateOutput, getCode func(addr address.Address) (cid.Cid, error)) error {
func ComputeStateHTMLTempl(w io.Writer, ts *types.TipSet, o *api.ComputeStateOutput, printTiming bool, getCode func(addr address.Address) (cid.Cid, error)) error {
t, err := template.New("compute_state").Funcs(map[string]interface{}{
"GetCode": getCode,
"GetMethod": getMethod,
"ToFil": toFil,
"JsonParams": JsonParams,
"JsonReturn": jsonReturn,
"IsSlow": isSlow,
"IsVerySlow": isVerySlow,
"IntExit": func(i exitcode.ExitCode) int64 { return int64(i) },
"SumGas": sumGas,
"CodeStr": codeStr,
"Call": call,
"GetCode": getCode,
"GetMethod": getMethod,
"ToFil": toFil,
"JsonParams": JsonParams,
"JsonReturn": jsonReturn,
"IsSlow": isSlow,
"IsVerySlow": isVerySlow,
"IntExit": func(i exitcode.ExitCode) int64 { return int64(i) },
"SumGas": sumGas,
"CodeStr": codeStr,
"Call": call,
"PrintTiming": func() bool { return printTiming },
"FirstImportant": func(locs []types.Loc) *types.Loc {
if len(locs) != 0 {
for _, l := range locs {
@ -1393,34 +1403,10 @@ var stateWaitMsgCmd = &cli.Command{
return err
}
fmt.Printf("message was executed in tipset: %s\n", mw.TipSet.Cids())
fmt.Printf("Exit Code: %d\n", mw.Receipt.ExitCode)
fmt.Printf("Gas Used: %d\n", mw.Receipt.GasUsed)
fmt.Printf("Return: %x\n", mw.Receipt.Return)
if err := printReceiptReturn(ctx, api, m, mw.Receipt); err != nil {
return err
}
return nil
return printMsg(ctx, api, msg, mw, m)
},
}
func printReceiptReturn(ctx context.Context, api api.FullNode, m *types.Message, r types.MessageReceipt) error {
act, err := api.StateGetActor(ctx, m.To, types.EmptyTSK)
if err != nil {
return err
}
jret, err := jsonReturn(act.Code, m.Method, r.Return)
if err != nil {
return err
}
fmt.Println(jret)
return nil
}
var stateSearchMsgCmd = &cli.Command{
Name: "search-msg",
Usage: "Search to see whether a message has appeared on chain",
@ -1448,18 +1434,56 @@ var stateSearchMsgCmd = &cli.Command{
return err
}
if mw != nil {
fmt.Printf("message was executed in tipset: %s", mw.TipSet.Cids())
fmt.Printf("\nExit Code: %d", mw.Receipt.ExitCode)
fmt.Printf("\nGas Used: %d", mw.Receipt.GasUsed)
fmt.Printf("\nReturn: %x", mw.Receipt.Return)
} else {
fmt.Print("message was not found on chain")
m, err := api.ChainGetMessage(ctx, msg)
if err != nil {
return err
}
return nil
return printMsg(ctx, api, msg, mw, m)
},
}
func printReceiptReturn(ctx context.Context, api api.FullNode, m *types.Message, r types.MessageReceipt) error {
if len(r.Return) == 0 {
return nil
}
act, err := api.StateGetActor(ctx, m.To, types.EmptyTSK)
if err != nil {
return err
}
jret, err := jsonReturn(act.Code, m.Method, r.Return)
if err != nil {
return err
}
fmt.Println("Decoded return value: ", jret)
return nil
}
func printMsg(ctx context.Context, api api.FullNode, msg cid.Cid, mw *lapi.MsgLookup, m *types.Message) error {
if mw != nil {
if mw.Message != msg {
fmt.Printf("Message was replaced: %s\n", mw.Message)
}
fmt.Printf("Executed in tipset: %s\n", mw.TipSet.Cids())
fmt.Printf("Exit Code: %d\n", mw.Receipt.ExitCode)
fmt.Printf("Gas Used: %d\n", mw.Receipt.GasUsed)
fmt.Printf("Return: %x\n", mw.Receipt.Return)
} else {
fmt.Println("message was not found on chain")
}
if err := printReceiptReturn(ctx, api, m, mw.Receipt); err != nil {
return err
}
return nil
}
var stateCallCmd = &cli.Command{
Name: "call",
Usage: "Invoke a method on an actor locally",

View File

@ -32,7 +32,6 @@ import (
lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/genesis"
)
@ -177,8 +176,6 @@ var sealBenchCmd = &cli.Command{
},
},
Action: func(c *cli.Context) error {
policy.AddSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
if c.Bool("no-gpu") {
err := os.Setenv("BELLMAN_NO_GPU", "1")
if err != nil {

View File

@ -146,7 +146,7 @@ func (p *Processor) Start(ctx context.Context) {
go func() {
defer grp.Done()
if err := p.HandleMarketChanges(ctx, actorChanges[builtin2.StorageMarketActorCodeID]); err != nil {
log.Errorf("Failed to handle market changes: %w", err)
log.Errorf("Failed to handle market changes: %v", err)
return
}
}()
@ -155,7 +155,7 @@ func (p *Processor) Start(ctx context.Context) {
go func() {
defer grp.Done()
if err := p.HandleMinerChanges(ctx, actorChanges[builtin2.StorageMinerActorCodeID]); err != nil {
log.Errorf("Failed to handle miner changes: %w", err)
log.Errorf("Failed to handle miner changes: %v", err)
return
}
}()
@ -164,7 +164,7 @@ func (p *Processor) Start(ctx context.Context) {
go func() {
defer grp.Done()
if err := p.HandleRewardChanges(ctx, actorChanges[builtin2.RewardActorCodeID], nullRounds); err != nil {
log.Errorf("Failed to handle reward changes: %w", err)
log.Errorf("Failed to handle reward changes: %v", err)
return
}
}()
@ -173,7 +173,7 @@ func (p *Processor) Start(ctx context.Context) {
go func() {
defer grp.Done()
if err := p.HandlePowerChanges(ctx, actorChanges[builtin2.StoragePowerActorCodeID]); err != nil {
log.Errorf("Failed to handle power actor changes: %w", err)
log.Errorf("Failed to handle power actor changes: %v", err)
return
}
}()
@ -182,7 +182,7 @@ func (p *Processor) Start(ctx context.Context) {
go func() {
defer grp.Done()
if err := p.HandleMessageChanges(ctx, toProcess); err != nil {
log.Errorf("Failed to handle message changes: %w", err)
log.Errorf("Failed to handle message changes: %v", err)
return
}
}()
@ -191,7 +191,7 @@ func (p *Processor) Start(ctx context.Context) {
go func() {
defer grp.Done()
if err := p.HandleCommonActorsChanges(ctx, actorChanges); err != nil {
log.Errorf("Failed to handle common actor changes: %w", err)
log.Errorf("Failed to handle common actor changes: %v", err)
return
}
}()

View File

@ -3,6 +3,7 @@ package main
import (
"context"
"fmt"
"html/template"
"net"
"net/http"
"os"
@ -68,6 +69,10 @@ var runCmd = &cli.Command{
EnvVars: []string{"LOTUS_FOUNTAIN_AMOUNT"},
Value: "50",
},
&cli.Float64Flag{
Name: "captcha-threshold",
Value: 0.5,
},
},
Action: func(cctx *cli.Context) error {
sendPerRequest, err := types.ParseFIL(cctx.String("amount"))
@ -107,11 +112,13 @@ var runCmd = &cli.Command{
WalletRate: 15 * time.Minute,
WalletBurst: 2,
}),
recapThreshold: cctx.Float64("captcha-threshold"),
}
http.Handle("/", http.FileServer(rice.MustFindBox("site").HTTPBox()))
http.HandleFunc("/send", h.send)
box := rice.MustFindBox("site")
http.Handle("/", http.FileServer(box.HTTPBox()))
http.HandleFunc("/funds.html", prepFundsHtml(box))
http.Handle("/send", h)
fmt.Printf("Open http://%s\n", cctx.String("front"))
go func() {
@ -123,6 +130,17 @@ var runCmd = &cli.Command{
},
}
func prepFundsHtml(box *rice.Box) http.HandlerFunc {
tmpl := template.Must(template.New("funds").Parse(box.MustString("funds.html")))
return func(w http.ResponseWriter, r *http.Request) {
err := tmpl.Execute(w, os.Getenv("RECAPTCHA_SITE_KEY"))
if err != nil {
http.Error(w, err.Error(), http.StatusBadGateway)
return
}
}
}
type handler struct {
ctx context.Context
api api.FullNode
@ -130,15 +148,45 @@ type handler struct {
from address.Address
sendPerRequest types.FIL
limiter *Limiter
limiter *Limiter
recapThreshold float64
}
func (h *handler) send(w http.ResponseWriter, r *http.Request) {
func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "only POST is allowed", http.StatusBadRequest)
return
}
reqIP := r.Header.Get("X-Real-IP")
if reqIP == "" {
h, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
log.Errorf("could not get ip from: %s, err: %s", r.RemoteAddr, err)
}
reqIP = h
}
capResp, err := VerifyToken(r.FormValue("g-recaptcha-response"), reqIP)
if err != nil {
http.Error(w, err.Error(), http.StatusBadGateway)
return
}
if !capResp.Success || capResp.Score < h.recapThreshold {
log.Infow("spam", "capResp", capResp)
http.Error(w, "spam protection", http.StatusUnprocessableEntity)
return
}
to, err := address.NewFromString(r.FormValue("address"))
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
if to == address.Undef {
http.Error(w, "empty address", http.StatusBadRequest)
return
}
// Limit based on wallet address
limiter := h.limiter.GetWalletLimiter(to.String())
@ -148,15 +196,6 @@ func (h *handler) send(w http.ResponseWriter, r *http.Request) {
}
// Limit based on IP
reqIP := r.Header.Get("X-Real-IP")
if reqIP == "" {
h, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
log.Errorf("could not get ip from: %s, err: %s", r.RemoteAddr, err)
}
reqIP = h
}
if i := net.ParseIP(reqIP); i != nil && i.IsLoopback() {
log.Errorf("rate limiting localhost: %s", reqIP)
}

View File

@ -0,0 +1,73 @@
// From https://github.com/lukasaron/recaptcha
// BLS-3 Licensed
// Copyright (c) 2020, Lukas Aron
// Modified by Kubuxu
package main
import (
"encoding/json"
"io/ioutil"
"net/http"
"net/url"
"os"
"time"
)
// content type for communication with the verification server.
const (
contentType = "application/json"
)
// VerifyURL defines the endpoint which is called when a token needs to be verified.
var (
VerifyURL, _ = url.Parse("https://www.google.com/recaptcha/api/siteverify")
)
// Response defines the response format from the verification endpoint.
type Response struct {
Success bool `json:"success"` // status of the verification
TimeStamp time.Time `json:"challenge_ts"` // timestamp of the challenge load (ISO format)
HostName string `json:"hostname"` // the hostname of the site where the reCAPTCHA was solved
Score float64 `json:"score"` // the score for this request (0.0 - 1.0)
Action string `json:"action"` // the action name for this request
ErrorCodes []string `json:"error-codes"` // error codes
AndroidPackageName string `json:"apk_package_name"` // android related only
}
// VerifyToken function implements the basic logic of verification of ReCaptcha token that is usually created
// on the user site (front-end) and then sent to verify on the server side (back-end).
// To provide a successful verification process the secret key is required. Based on the security recommendations
// the key has to be passed as an environmental variable SECRET_KEY.
//
// Token parameter is required, however remoteIP is optional.
func VerifyToken(token, remoteIP string) (Response, error) {
resp := Response{}
if len(token) == 0 {
resp.ErrorCodes = []string{"no-token"}
return resp, nil
}
q := url.Values{}
q.Add("secret", os.Getenv("RECAPTCHA_SECRET_KEY"))
q.Add("response", token)
q.Add("remoteip", remoteIP)
var u *url.URL
{
verifyCopy := *VerifyURL
u = &verifyCopy
}
u.RawQuery = q.Encode()
r, err := http.Post(u.String(), contentType, nil)
if err != nil {
return resp, err
}
b, err := ioutil.ReadAll(r.Body)
_ = r.Body.Close() // close immediately after reading finished
if err != nil {
return resp, err
}
return resp, json.Unmarshal(b, &resp)
}

View File

@ -3,6 +3,13 @@
<head>
<title>Sending Funds - Lotus Fountain</title>
<link rel="stylesheet" type="text/css" href="main.css">
<script src="https://www.google.com/recaptcha/api.js"></script>
<script>
function onSubmit(token) {
document.getElementById("funds-form").submit();
}
</script>
</head>
<body>
<div class="Index">
@ -11,10 +18,13 @@
[SENDING FUNDS]
</div>
<div class="Index-node">
<form action='/send' method='get'>
<form action='/send' method='post' id='funds-form'>
<span>Enter destination address:</span>
<input type='text' name='address' style="width: 300px">
<button type='submit'>Send Funds</button>
<input type='text' name='address' style="width: 300px">
<button class="g-recaptcha"
data-sitekey="{{ . }}"
data-callback='onSubmit'
data-action='submit'>Send Funds</button>
</form>
</div>
</div>

View File

@ -48,6 +48,7 @@ type gatewayDepsAPI interface {
MpoolPushUntrusted(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error)
MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error)
MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error)
MsigGetPending(ctx context.Context, addr address.Address, ts types.TipSetKey) ([]*api.MsigTransaction, error)
StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error)
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
@ -228,6 +229,14 @@ func (a *GatewayAPI) MsigGetVested(ctx context.Context, addr address.Address, st
return a.api.MsigGetVested(ctx, addr, start, end)
}
func (a *GatewayAPI) MsigGetPending(ctx context.Context, addr address.Address, tsk types.TipSetKey) ([]*api.MsigTransaction, error) {
if err := a.checkTipsetKey(ctx, tsk); err != nil {
return nil, err
}
return a.api.MsigGetPending(ctx, addr, tsk)
}
func (a *GatewayAPI) StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) {
if err := a.checkTipsetKey(ctx, tsk); err != nil {
return address.Undef, err

View File

@ -6,7 +6,9 @@ import (
"net/http"
"os"
"contrib.go.opencensus.io/exporter/prometheus"
"github.com/filecoin-project/go-jsonrpc"
promclient "github.com/prometheus/client_golang/prometheus"
"go.opencensus.io/tag"
"github.com/filecoin-project/lotus/build"
@ -99,6 +101,17 @@ var runCmd = &cli.Command{
rpcServer.Register("Filecoin", metrics.MetricedGatewayAPI(NewGatewayAPI(api)))
mux.Handle("/rpc/v0", rpcServer)
registry := promclient.DefaultRegisterer.(*promclient.Registry)
exporter, err := prometheus.NewExporter(prometheus.Options{
Registry: registry,
Namespace: "lotus_gw",
})
if err != nil {
return err
}
mux.Handle("/debug/metrics", exporter)
mux.PathPrefix("/").Handler(http.DefaultServeMux)
/*ah := &auth.Handler{

View File

@ -308,7 +308,7 @@ var runCmd = &cli.Command{
{
// init datastore for r.Exists
_, err := lr.Datastore("/metadata")
_, err := lr.Datastore(context.Background(), "/metadata")
if err != nil {
return err
}
@ -327,7 +327,7 @@ var runCmd = &cli.Command{
log.Error("closing repo", err)
}
}()
ds, err := lr.Datastore("/metadata")
ds, err := lr.Datastore(context.Background(), "/metadata")
if err != nil {
return err
}

View File

@ -9,6 +9,11 @@ import (
"strconv"
"strings"
"github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/chain/vm"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/journal"
"github.com/filecoin-project/lotus/node/modules/testing"
"github.com/google/uuid"
"github.com/mitchellh/go-homedir"
"github.com/urfave/cli/v2"
@ -32,6 +37,7 @@ var genesisCmd = &cli.Command{
genesisNewCmd,
genesisAddMinerCmd,
genesisAddMsigsCmd,
genesisCarCmd,
},
}
@ -302,3 +308,28 @@ func parseMultisigCsv(csvf string) ([]GenAccountEntry, error) {
return entries, nil
}
var genesisCarCmd = &cli.Command{
Name: "car",
Description: "write genesis car file",
ArgsUsage: "genesis template `FILE`",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "out",
Aliases: []string{"o"},
Value: "genesis.car",
Usage: "write output to `FILE`",
},
},
Action: func(c *cli.Context) error {
if c.Args().Len() != 1 {
return xerrors.Errorf("Please specify a genesis template. (i.e, the one created with `genesis new`)")
}
ofile := c.String("out")
jrnl := journal.NilJournal()
bstor := blockstore.NewMemorySync()
sbldr := vm.Syscalls(ffiwrapper.ProofVerifier)
_, err := testing.MakeGenesis(ofile, c.Args().First())(bstor, sbldr, jrnl)()
return err
},
}

View File

@ -175,7 +175,7 @@ var chainBalanceStateCmd = &cli.Command{
defer lkrepo.Close() //nolint:errcheck
bs, err := lkrepo.Blockstore(repo.BlockstoreChain)
bs, err := lkrepo.Blockstore(ctx, repo.BlockstoreChain)
if err != nil {
return fmt.Errorf("failed to open blockstore: %w", err)
}
@ -188,7 +188,7 @@ var chainBalanceStateCmd = &cli.Command{
}
}()
mds, err := lkrepo.Datastore("/metadata")
mds, err := lkrepo.Datastore(context.Background(), "/metadata")
if err != nil {
return err
}
@ -396,7 +396,7 @@ var chainPledgeCmd = &cli.Command{
defer lkrepo.Close() //nolint:errcheck
bs, err := lkrepo.Blockstore(repo.BlockstoreChain)
bs, err := lkrepo.Blockstore(ctx, repo.BlockstoreChain)
if err != nil {
return xerrors.Errorf("failed to open blockstore: %w", err)
}
@ -409,7 +409,7 @@ var chainPledgeCmd = &cli.Command{
}
}()
mds, err := lkrepo.Datastore("/metadata")
mds, err := lkrepo.Datastore(context.Background(), "/metadata")
if err != nil {
return err
}

View File

@ -17,6 +17,7 @@ import (
var bitFieldCmd = &cli.Command{
Name: "bitfield",
Usage: "Bitfield analyze tool",
Description: "analyze bitfields",
Flags: []cli.Flag{
&cli.StringFlag{
@ -26,53 +27,24 @@ var bitFieldCmd = &cli.Command{
},
},
Subcommands: []*cli.Command{
bitFieldEncodeCmd,
bitFieldDecodeCmd,
bitFieldRunsCmd,
bitFieldStatCmd,
bitFieldDecodeCmd,
bitFieldMergeCmd,
bitFieldIntersectCmd,
bitFieldEncodeCmd,
bitFieldSubCmd,
},
}
var bitFieldRunsCmd = &cli.Command{
Name: "runs",
Usage: "Bitfield bit runs",
Description: "print bit runs in a bitfield",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "enc",
Value: "base64",
Usage: "specify input encoding to parse",
},
},
Action: func(cctx *cli.Context) error {
var val string
if cctx.Args().Present() {
val = cctx.Args().Get(0)
} else {
b, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return err
}
val = string(b)
}
var dec []byte
switch cctx.String("enc") {
case "base64":
d, err := base64.StdEncoding.DecodeString(val)
if err != nil {
return fmt.Errorf("decoding base64 value: %w", err)
}
dec = d
case "hex":
d, err := hex.DecodeString(val)
if err != nil {
return fmt.Errorf("decoding hex value: %w", err)
}
dec = d
default:
return fmt.Errorf("unrecognized encoding: %s", cctx.String("enc"))
dec, err := decodeToByte(cctx, 0)
if err != nil {
return err
}
rle, err := rlepluslazy.FromBuf(dec)
@ -98,7 +70,7 @@ var bitFieldRunsCmd = &cli.Command{
s = "FALSE"
}
fmt.Printf("@%d %s * %d\n", idx, s, r.Len)
fmt.Printf("@%08d %s * %d\n", idx, s, r.Len)
idx += r.Len
}
@ -109,43 +81,14 @@ var bitFieldRunsCmd = &cli.Command{
var bitFieldStatCmd = &cli.Command{
Name: "stat",
Usage: "Bitfield stats",
Description: "print bitfield stats",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "enc",
Value: "base64",
Usage: "specify input encoding to parse",
},
},
Action: func(cctx *cli.Context) error {
var val string
if cctx.Args().Present() {
val = cctx.Args().Get(0)
} else {
b, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return err
}
val = string(b)
}
var dec []byte
switch cctx.String("enc") {
case "base64":
d, err := base64.StdEncoding.DecodeString(val)
if err != nil {
return fmt.Errorf("decoding base64 value: %w", err)
}
dec = d
case "hex":
d, err := hex.DecodeString(val)
if err != nil {
return fmt.Errorf("decoding hex value: %w", err)
}
dec = d
default:
return fmt.Errorf("unrecognized encoding: %s", cctx.String("enc"))
dec, err := decodeToByte(cctx, 0)
if err != nil {
return err
}
fmt.Printf("Raw length: %d bits (%d bytes)\n", len(dec)*8, len(dec))
rle, err := rlepluslazy.FromBuf(dec)
if err != nil {
@ -157,10 +100,7 @@ var bitFieldStatCmd = &cli.Command{
return xerrors.Errorf("getting run iterator: %w", err)
}
fmt.Printf("Raw length: %d bits (%d bytes)\n", len(dec)*8, len(dec))
var ones, zeros, oneRuns, zeroRuns, invalid uint64
for rit.HasNext() {
r, err := rit.NextRun()
if err != nil {
@ -195,14 +135,8 @@ var bitFieldStatCmd = &cli.Command{
var bitFieldDecodeCmd = &cli.Command{
Name: "decode",
Usage: "Bitfield to decimal number",
Description: "decode bitfield and print all numbers in it",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "enc",
Value: "base64",
Usage: "specify input encoding to parse",
},
},
Action: func(cctx *cli.Context) error {
rle, err := decode(cctx, 0)
if err != nil {
@ -219,43 +153,61 @@ var bitFieldDecodeCmd = &cli.Command{
},
}
var bitFieldIntersectCmd = &cli.Command{
Name: "intersect",
Description: "intersect 2 bitfields and print the resulting bitfield as base64",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "enc",
Value: "base64",
Usage: "specify input encoding to parse",
},
},
var bitFieldMergeCmd = &cli.Command{
Name: "merge",
Usage: "Merge 2 bitfields",
Description: "Merge 2 bitfields and print the resulting bitfield",
Action: func(cctx *cli.Context) error {
a, err := decode(cctx, 0)
if err != nil {
return err
}
b, err := decode(cctx, 1)
if err != nil {
return err
}
o, err := bitfield.MergeBitFields(a, b)
if err != nil {
return xerrors.Errorf("merge: %w", err)
}
str, err := encode(cctx, o)
if err != nil {
return err
}
fmt.Println(str)
return nil
},
}
var bitFieldIntersectCmd = &cli.Command{
Name: "intersect",
Usage: "Intersect 2 bitfields",
Description: "intersect 2 bitfields and print the resulting bitfield",
Action: func(cctx *cli.Context) error {
a, err := decode(cctx, 0)
if err != nil {
return err
}
b, err := decode(cctx, 1)
if err != nil {
return err
}
o, err := bitfield.IntersectBitField(a, b)
if err != nil {
return xerrors.Errorf("intersect: %w", err)
}
s, err := o.RunIterator()
str, err := encode(cctx, o)
if err != nil {
return err
}
bytes, err := rlepluslazy.EncodeRuns(s, []byte{})
if err != nil {
return err
}
fmt.Println(base64.StdEncoding.EncodeToString(bytes))
fmt.Println(str)
return nil
},
@ -263,41 +215,29 @@ var bitFieldIntersectCmd = &cli.Command{
var bitFieldSubCmd = &cli.Command{
Name: "sub",
Description: "subtract 2 bitfields and print the resulting bitfield as base64",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "enc",
Value: "base64",
Usage: "specify input encoding to parse",
},
},
Usage: "Subtract 2 bitfields",
Description: "subtract 2 bitfields and print the resulting bitfield",
Action: func(cctx *cli.Context) error {
b, err := decode(cctx, 1)
a, err := decode(cctx, 0)
if err != nil {
return err
}
a, err := decode(cctx, 0)
b, err := decode(cctx, 1)
if err != nil {
return err
}
o, err := bitfield.SubtractBitField(a, b)
if err != nil {
return xerrors.Errorf("intersect: %w", err)
return xerrors.Errorf("subtract: %w", err)
}
s, err := o.RunIterator()
str, err := encode(cctx, o)
if err != nil {
return err
}
bytes, err := rlepluslazy.EncodeRuns(s, []byte{})
if err != nil {
return err
}
fmt.Println(base64.StdEncoding.EncodeToString(bytes))
fmt.Println(str)
return nil
},
@ -305,15 +245,9 @@ var bitFieldSubCmd = &cli.Command{
var bitFieldEncodeCmd = &cli.Command{
Name: "encode",
Usage: "Decimal number to bitfield",
Description: "encode a series of decimal numbers into a bitfield",
ArgsUsage: "[infile]",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "enc",
Value: "base64",
Usage: "specify input encoding to parse",
},
},
Action: func(cctx *cli.Context) error {
f, err := os.Open(cctx.Args().First())
if err != nil {
@ -331,38 +265,64 @@ var bitFieldEncodeCmd = &cli.Command{
out.Set(i)
}
s, err := out.RunIterator()
str, err := encode(cctx, out)
if err != nil {
return err
}
bytes, err := rlepluslazy.EncodeRuns(s, []byte{})
if err != nil {
return err
}
fmt.Println(base64.StdEncoding.EncodeToString(bytes))
fmt.Println(str)
return nil
},
}
func decode(cctx *cli.Context, a int) (bitfield.BitField, error) {
func encode(cctx *cli.Context, field bitfield.BitField) (string, error) {
s, err := field.RunIterator()
if err != nil {
return "", err
}
bytes, err := rlepluslazy.EncodeRuns(s, []byte{})
if err != nil {
return "", err
}
var str string
switch cctx.String("enc") {
case "base64":
str = base64.StdEncoding.EncodeToString(bytes)
case "hex":
str = hex.EncodeToString(bytes)
default:
return "", fmt.Errorf("unrecognized encoding: %s", cctx.String("enc"))
}
return str, nil
}
func decode(cctx *cli.Context, i int) (bitfield.BitField, error) {
b, err := decodeToByte(cctx, i)
if err != nil {
return bitfield.BitField{}, err
}
return bitfield.NewFromBytes(b)
}
func decodeToByte(cctx *cli.Context, i int) ([]byte, error) {
var val string
if cctx.Args().Present() {
if a >= cctx.NArg() {
return bitfield.BitField{}, xerrors.Errorf("need more than %d args", a)
if i >= cctx.NArg() {
return nil, xerrors.Errorf("need more than %d args", i)
}
val = cctx.Args().Get(a)
val = cctx.Args().Get(i)
} else {
if a > 0 {
return bitfield.BitField{}, xerrors.Errorf("need more than %d args", a)
if i > 0 {
return nil, xerrors.Errorf("need more than %d args", i)
}
b, err := ioutil.ReadAll(os.Stdin)
r, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return bitfield.BitField{}, err
return nil, err
}
val = string(b)
val = string(r)
}
var dec []byte
@ -370,18 +330,18 @@ func decode(cctx *cli.Context, a int) (bitfield.BitField, error) {
case "base64":
d, err := base64.StdEncoding.DecodeString(val)
if err != nil {
return bitfield.BitField{}, fmt.Errorf("decoding base64 value: %w", err)
return nil, fmt.Errorf("decoding base64 value: %w", err)
}
dec = d
case "hex":
d, err := hex.DecodeString(val)
if err != nil {
return bitfield.BitField{}, fmt.Errorf("decoding hex value: %w", err)
return nil, fmt.Errorf("decoding hex value: %w", err)
}
dec = d
default:
return bitfield.BitField{}, fmt.Errorf("unrecognized encoding: %s", cctx.String("enc"))
return nil, fmt.Errorf("unrecognized encoding: %s", cctx.String("enc"))
}
return bitfield.NewFromBytes(dec)
return dec, nil
}

View File

@ -2,6 +2,7 @@ package main
import (
"bufio"
"context"
"encoding/json"
"fmt"
"io"
@ -75,7 +76,7 @@ var datastoreListCmd = &cli.Command{
}
defer lr.Close() //nolint:errcheck
ds, err := lr.Datastore(datastore.NewKey(cctx.Args().First()).String())
ds, err := lr.Datastore(context.Background(), datastore.NewKey(cctx.Args().First()).String())
if err != nil {
return err
}
@ -141,7 +142,7 @@ var datastoreGetCmd = &cli.Command{
}
defer lr.Close() //nolint:errcheck
ds, err := lr.Datastore(datastore.NewKey(cctx.Args().First()).String())
ds, err := lr.Datastore(context.Background(), datastore.NewKey(cctx.Args().First()).String())
if err != nil {
return err
}

View File

@ -72,7 +72,7 @@ var exportChainCmd = &cli.Command{
defer fi.Close() //nolint:errcheck
bs, err := lr.Blockstore(repo.BlockstoreChain)
bs, err := lr.Blockstore(ctx, repo.BlockstoreChain)
if err != nil {
return fmt.Errorf("failed to open blockstore: %w", err)
}
@ -85,7 +85,7 @@ var exportChainCmd = &cli.Command{
}
}()
mds, err := lr.Datastore("/metadata")
mds, err := lr.Datastore(context.Background(), "/metadata")
if err != nil {
return err
}

View File

@ -1,6 +1,7 @@
package main
import (
"context"
"encoding/hex"
"fmt"
"io"
@ -24,6 +25,8 @@ var importCarCmd = &cli.Command{
return xerrors.Errorf("opening fs repo: %w", err)
}
ctx := context.TODO()
exists, err := r.Exists()
if err != nil {
return err
@ -44,7 +47,7 @@ var importCarCmd = &cli.Command{
return xerrors.Errorf("opening the car file: %w", err)
}
bs, err := lr.Blockstore(repo.BlockstoreChain)
bs, err := lr.Blockstore(ctx, repo.BlockstoreChain)
if err != nil {
return err
}
@ -99,6 +102,8 @@ var importObjectCmd = &cli.Command{
return xerrors.Errorf("opening fs repo: %w", err)
}
ctx := context.TODO()
exists, err := r.Exists()
if err != nil {
return err
@ -113,7 +118,7 @@ var importObjectCmd = &cli.Command{
}
defer lr.Close() //nolint:errcheck
bs, err := lr.Blockstore(repo.BlockstoreChain)
bs, err := lr.Blockstore(ctx, repo.BlockstoreChain)
if err != nil {
return fmt.Errorf("failed to open blockstore: %w", err)
}

View File

@ -168,7 +168,7 @@ var jwtNewCmd = &cli.Command{
defer func() {
if err := file.Close(); err != nil {
log.Warnf("failed to close output file: %w", err)
log.Warnf("failed to close output file: %v", err)
}
}()

View File

@ -427,7 +427,7 @@ var keyinfoNewCmd = &cli.Command{
defer func() {
if err := file.Close(); err != nil {
log.Warnf("failed to close output file: %w", err)
log.Warnf("failed to close output file: %v", err)
}
}()

View File

@ -131,7 +131,7 @@ var stateTreePruneCmd = &cli.Command{
defer lkrepo.Close() //nolint:errcheck
bs, err := lkrepo.Blockstore(repo.BlockstoreChain)
bs, err := lkrepo.Blockstore(ctx, repo.BlockstoreChain)
if err != nil {
return fmt.Errorf("failed to open blockstore: %w", err)
}
@ -151,7 +151,7 @@ var stateTreePruneCmd = &cli.Command{
return fmt.Errorf("only badger blockstores are supported")
}
mds, err := lkrepo.Datastore("/metadata")
mds, err := lkrepo.Datastore(context.Background(), "/metadata")
if err != nil {
return err
}

View File

@ -24,9 +24,18 @@ import (
var rpcCmd = &cli.Command{
Name: "rpc",
Usage: "Interactive JsonPRC shell",
// TODO: flag for miner/worker
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "miner",
},
},
Action: func(cctx *cli.Context) error {
addr, headers, err := lcli.GetRawAPI(cctx, repo.FullNode)
rt := repo.FullNode
if cctx.Bool("miner") {
rt = repo.StorageMiner
}
addr, headers, err := lcli.GetRawAPI(cctx, rt)
if err != nil {
return err
}

View File

@ -311,7 +311,8 @@ func migratePreSealMeta(ctx context.Context, api lapi.FullNode, metadata string,
PieceCID: commD,
},
DealInfo: &sealing.DealInfo{
DealID: dealID,
DealID: dealID,
DealProposal: &sector.Deal,
DealSchedule: sealing.DealSchedule{
StartEpoch: sector.Deal.StartEpoch,
EndEpoch: sector.Deal.EndEpoch,
@ -416,7 +417,7 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api lapi.FullNode,
return xerrors.Errorf("peer ID from private key: %w", err)
}
mds, err := lr.Datastore("/metadata")
mds, err := lr.Datastore(context.TODO(), "/metadata")
if err != nil {
return err
}

View File

@ -1,6 +1,7 @@
package main
import (
"context"
"encoding/json"
"io/ioutil"
"os"
@ -190,7 +191,7 @@ var initRestoreCmd = &cli.Command{
log.Info("Restoring metadata backup")
mds, err := lr.Datastore("/metadata")
mds, err := lr.Datastore(context.TODO(), "/metadata")
if err != nil {
return err
}

View File

@ -15,7 +15,6 @@ import (
tm "github.com/buger/goterm"
"github.com/docker/go-units"
datatransfer "github.com/filecoin-project/go-data-transfer"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-cidutil/cidenc"
"github.com/libp2p/go-libp2p-core/peer"
@ -23,6 +22,8 @@ import (
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
cborutil "github.com/filecoin-project/go-cbor-util"
datatransfer "github.com/filecoin-project/go-data-transfer"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi"
@ -341,6 +342,7 @@ var storageDealsCmd = &cli.Command{
getBlocklistCmd,
resetBlocklistCmd,
setSealDurationCmd,
dealsPendingPublish,
},
}
@ -825,3 +827,57 @@ var transfersListCmd = &cli.Command{
return nil
},
}
var dealsPendingPublish = &cli.Command{
Name: "pending-publish",
Usage: "list deals waiting in publish queue",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "publish-now",
Usage: "send a publish message now",
},
},
Action: func(cctx *cli.Context) error {
api, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := lcli.ReqContext(cctx)
if cctx.Bool("publish-now") {
if err := api.MarketPublishPendingDeals(ctx); err != nil {
return xerrors.Errorf("publishing deals: %w", err)
}
fmt.Println("triggered deal publishing")
return nil
}
pending, err := api.MarketPendingDeals(ctx)
if err != nil {
return xerrors.Errorf("getting pending deals: %w", err)
}
if len(pending.Deals) > 0 {
endsIn := pending.PublishPeriodStart.Add(pending.PublishPeriod).Sub(time.Now())
w := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0)
_, _ = fmt.Fprintf(w, "Publish period: %s (ends in %s)\n", pending.PublishPeriod, endsIn.Round(time.Second))
_, _ = fmt.Fprintf(w, "First deal queued at: %s\n", pending.PublishPeriodStart)
_, _ = fmt.Fprintf(w, "Deals will be published at: %s\n", pending.PublishPeriodStart.Add(pending.PublishPeriod))
_, _ = fmt.Fprintf(w, "%d deals queued to be published:\n", len(pending.Deals))
_, _ = fmt.Fprintf(w, "ProposalCID\tClient\tSize\n")
for _, deal := range pending.Deals {
proposalNd, err := cborutil.AsIpld(&deal) // nolint
if err != nil {
return err
}
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\n", proposalNd.Cid(), deal.Proposal.Client, units.BytesSize(float64(deal.Proposal.PieceSize)))
}
return w.Flush()
}
fmt.Println("No deals queued to be published")
return nil
},
}

View File

@ -249,6 +249,7 @@ var sectorsListCmd = &cli.Command{
tablewriter.Col("Events"),
tablewriter.Col("Deals"),
tablewriter.Col("DealWeight"),
tablewriter.Col("VerifiedPower"),
tablewriter.NewLineCol("Error"),
tablewriter.NewLineCol("RecoveryTimeout"))
@ -268,9 +269,11 @@ var sectorsListCmd = &cli.Command{
_, inSSet := commitedIDs[s]
_, inASet := activeIDs[s]
dw := .0
dw, vp := .0, .0
if st.Expiration-st.Activation > 0 {
dw = float64(big.Div(st.DealWeight, big.NewInt(int64(st.Expiration-st.Activation))).Uint64())
rdw := big.Add(st.DealWeight, st.VerifiedDealWeight)
dw = float64(big.Div(rdw, big.NewInt(int64(st.Expiration-st.Activation))).Uint64())
vp = float64(big.Div(big.Mul(st.VerifiedDealWeight, big.NewInt(9)), big.NewInt(int64(st.Expiration-st.Activation))).Uint64())
}
var deals int
@ -309,6 +312,9 @@ var sectorsListCmd = &cli.Command{
if !fast && deals > 0 {
m["DealWeight"] = units.BytesSize(dw)
if vp > 0 {
m["VerifiedPower"] = color.GreenString(units.BytesSize(vp))
}
}
if st.Early > 0 {
@ -649,18 +655,45 @@ var sectorsCapacityCollateralCmd = &cli.Command{
return err
}
mi, err := nApi.StateMinerInfo(ctx, maddr, types.EmptyTSK)
if err != nil {
return err
}
nv, err := nApi.StateNetworkVersion(ctx, types.EmptyTSK)
if err != nil {
return err
}
spt, err := miner.PreferredSealProofTypeFromWindowPoStType(nv, mi.WindowPoStProofType)
if err != nil {
return err
}
pci := miner.SectorPreCommitInfo{
SealProof: spt,
Expiration: abi.ChainEpoch(cctx.Uint64("expiration")),
}
if pci.Expiration == 0 {
pci.Expiration = policy.GetMaxSectorExpirationExtension()
h, err := nApi.ChainHead(ctx)
if err != nil {
return err
}
pci.Expiration = policy.GetMaxSectorExpirationExtension() + h.Height()
}
pc, err := nApi.StateMinerInitialPledgeCollateral(ctx, maddr, pci, types.EmptyTSK)
if err != nil {
return err
}
fmt.Printf("Estimated collateral: %s\n", types.FIL(pc))
pcd, err := nApi.StateMinerPreCommitDepositForPower(ctx, maddr, pci, types.EmptyTSK)
if err != nil {
return err
}
fmt.Printf("Estimated collateral: %s\n", types.FIL(big.Max(pc, pcd)))
return nil
},

View File

@ -0,0 +1,244 @@
package main
import (
"bytes"
"context"
"crypto/rand"
"encoding/hex"
"encoding/json"
"fmt"
gobig "math/big"
"strings"
"sync"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/builtin/multisig"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
)
type InteractiveWallet struct {
lk sync.Mutex
apiGetter func() (api.FullNode, jsonrpc.ClientCloser, error)
under api.WalletAPI
}
func (c *InteractiveWallet) WalletNew(ctx context.Context, typ types.KeyType) (address.Address, error) {
err := c.accept(func() error {
fmt.Println("-----")
fmt.Println("ACTION: WalletNew - Creating new wallet")
fmt.Printf("TYPE: %s\n", typ)
return nil
})
if err != nil {
return address.Address{}, err
}
return c.under.WalletNew(ctx, typ)
}
func (c *InteractiveWallet) WalletHas(ctx context.Context, addr address.Address) (bool, error) {
return c.under.WalletHas(ctx, addr)
}
func (c *InteractiveWallet) WalletList(ctx context.Context) ([]address.Address, error) {
return c.under.WalletList(ctx)
}
func (c *InteractiveWallet) WalletSign(ctx context.Context, k address.Address, msg []byte, meta api.MsgMeta) (*crypto.Signature, error) {
err := c.accept(func() error {
fmt.Println("-----")
fmt.Println("ACTION: WalletSign - Sign a message/deal")
fmt.Printf("ADDRESS: %s\n", k)
fmt.Printf("TYPE: %s\n", meta.Type)
switch meta.Type {
case api.MTChainMsg:
var cmsg types.Message
if err := cmsg.UnmarshalCBOR(bytes.NewReader(meta.Extra)); err != nil {
return xerrors.Errorf("unmarshalling message: %w", err)
}
_, bc, err := cid.CidFromBytes(msg)
if err != nil {
return xerrors.Errorf("getting cid from signing bytes: %w", err)
}
if !cmsg.Cid().Equals(bc) {
return xerrors.Errorf("cid(meta.Extra).bytes() != msg")
}
jb, err := json.MarshalIndent(&cmsg, "", " ")
if err != nil {
return xerrors.Errorf("json-marshaling the message: %w", err)
}
fmt.Println("Message JSON:", string(jb))
fmt.Println("Value:", types.FIL(cmsg.Value))
fmt.Println("Max Fees:", types.FIL(cmsg.RequiredFunds()))
fmt.Println("Max Total Cost:", types.FIL(big.Add(cmsg.RequiredFunds(), cmsg.Value)))
if c.apiGetter != nil {
napi, closer, err := c.apiGetter()
if err != nil {
return xerrors.Errorf("getting node api: %w", err)
}
defer closer()
toact, err := napi.StateGetActor(ctx, cmsg.To, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("looking up dest actor: %w", err)
}
fmt.Println("Method:", stmgr.MethodsMap[toact.Code][cmsg.Method].Name)
p, err := lcli.JsonParams(toact.Code, cmsg.Method, cmsg.Params)
if err != nil {
return err
}
fmt.Println("Params:", p)
if builtin.IsMultisigActor(toact.Code) && cmsg.Method == multisig.Methods.Propose {
var mp multisig.ProposeParams
if err := mp.UnmarshalCBOR(bytes.NewReader(cmsg.Params)); err != nil {
return xerrors.Errorf("unmarshalling multisig propose params: %w", err)
}
fmt.Println("\tMultiSig Proposal Value:", types.FIL(mp.Value))
fmt.Println("\tMultiSig Proposal Hex Params:", hex.EncodeToString(mp.Params))
toact, err := napi.StateGetActor(ctx, mp.To, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("looking up msig dest actor: %w", err)
}
fmt.Println("\tMultiSig Proposal Method:", stmgr.MethodsMap[toact.Code][mp.Method].Name)
p, err := lcli.JsonParams(toact.Code, mp.Method, mp.Params)
if err != nil {
return err
}
fmt.Println("\tMultiSig Proposal Params:", strings.ReplaceAll(p, "\n", "\n\t"))
}
} else {
fmt.Println("Params: No chain node connection, can't decode params")
}
case api.MTDealProposal:
return xerrors.Errorf("TODO") // TODO
default:
log.Infow("WalletSign", "address", k, "type", meta.Type)
}
return nil
})
if err != nil {
return nil, err
}
return c.under.WalletSign(ctx, k, msg, meta)
}
func (c *InteractiveWallet) WalletExport(ctx context.Context, a address.Address) (*types.KeyInfo, error) {
err := c.accept(func() error {
fmt.Println("-----")
fmt.Println("ACTION: WalletExport - Export private key")
fmt.Printf("ADDRESS: %s\n", a)
return nil
})
if err != nil {
return nil, err
}
return c.under.WalletExport(ctx, a)
}
func (c *InteractiveWallet) WalletImport(ctx context.Context, ki *types.KeyInfo) (address.Address, error) {
err := c.accept(func() error {
fmt.Println("-----")
fmt.Println("ACTION: WalletImport - Import private key")
fmt.Printf("TYPE: %s\n", ki.Type)
return nil
})
if err != nil {
return address.Undef, err
}
return c.under.WalletImport(ctx, ki)
}
func (c *InteractiveWallet) WalletDelete(ctx context.Context, addr address.Address) error {
err := c.accept(func() error {
fmt.Println("-----")
fmt.Println("ACTION: WalletDelete - Delete a private key")
fmt.Printf("ADDRESS: %s\n", addr)
return nil
})
if err != nil {
return err
}
return c.under.WalletDelete(ctx, addr)
}
func (c *InteractiveWallet) accept(prompt func() error) error {
c.lk.Lock()
defer c.lk.Unlock()
if err := prompt(); err != nil {
return err
}
yes := randomYes()
for {
fmt.Printf("\nAccept the above? (%s/No): ", yes)
var a string
if _, err := fmt.Scanln(&a); err != nil {
return err
}
switch a {
case yes:
fmt.Println("approved")
return nil
case "No":
return xerrors.Errorf("action rejected")
}
fmt.Printf("Type EXACTLY '%s' or 'No'\n", yes)
}
}
var yeses = []string{
"yes",
"Yes",
"YES",
"approve",
"Approve",
"accept",
"Accept",
"authorize",
"Authorize",
"confirm",
"Confirm",
}
func randomYes() string {
i, err := rand.Int(rand.Reader, gobig.NewInt(int64(len(yeses))))
if err != nil {
panic(err)
}
return yeses[i.Int64()]
}

View File

@ -45,6 +45,12 @@ func main() {
EnvVars: []string{"WALLET_PATH"},
Value: "~/.lotuswallet", // TODO: Consider XDG_DATA_HOME
},
&cli.StringFlag{
Name: "repo",
EnvVars: []string{"LOTUS_PATH"},
Hidden: true,
Value: "~/.lotus",
},
},
Commands: local,
@ -70,6 +76,14 @@ var runCmd = &cli.Command{
Name: "ledger",
Usage: "use a ledger device instead of an on-disk wallet",
},
&cli.BoolFlag{
Name: "interactive",
Usage: "prompt before performing actions (DO NOT USE FOR MINER WORKER ADDRESS)",
},
&cli.BoolFlag{
Name: "offline",
Usage: "don't query chain state in interactive mode",
},
},
Action: func(cctx *cli.Context) error {
log.Info("Starting lotus wallet")
@ -118,7 +132,7 @@ var runCmd = &cli.Command{
var w api.WalletAPI = lw
if cctx.Bool("ledger") {
ds, err := lr.Datastore("/metadata")
ds, err := lr.Datastore(context.Background(), "/metadata")
if err != nil {
return err
}
@ -134,8 +148,25 @@ var runCmd = &cli.Command{
log.Info("Setting up API endpoint at " + address)
if cctx.Bool("interactive") {
var ag func() (api.FullNode, jsonrpc.ClientCloser, error)
if !cctx.Bool("offline") {
ag = func() (api.FullNode, jsonrpc.ClientCloser, error) {
return lcli.GetFullNodeAPI(cctx)
}
}
w = &InteractiveWallet{
under: w,
apiGetter: ag,
}
} else {
w = &LoggedWallet{under: w}
}
rpcServer := jsonrpc.NewServer()
rpcServer.Register("Filecoin", &LoggedWallet{under: metrics.MetricedWalletAPI(w)})
rpcServer.Register("Filecoin", metrics.MetricedWalletAPI(w))
mux.Handle("/rpc/v0", rpcServer)
mux.PathPrefix("/").Handler(http.DefaultServeMux) // pprof

View File

@ -1,6 +1,7 @@
package main
import (
"context"
"os"
dstore "github.com/ipfs/go-datastore"
@ -87,7 +88,7 @@ func restore(cctx *cli.Context, r repo.Repo) error {
log.Info("Restoring metadata backup")
mds, err := lr.Datastore("/metadata")
mds, err := lr.Datastore(context.TODO(), "/metadata")
if err != nil {
return err
}

View File

@ -254,7 +254,7 @@ var DaemonCmd = &cli.Command{
issnapshot = true
}
if err := ImportChain(r, chainfile, issnapshot); err != nil {
if err := ImportChain(ctx, r, chainfile, issnapshot); err != nil {
return err
}
if cctx.Bool("halt-after-import") {
@ -389,7 +389,7 @@ func importKey(ctx context.Context, api api.FullNode, f string) error {
return nil
}
func ImportChain(r repo.Repo, fname string, snapshot bool) (err error) {
func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) (err error) {
var rd io.Reader
var l int64
if strings.HasPrefix(fname, "http://") || strings.HasPrefix(fname, "https://") {
@ -432,12 +432,12 @@ func ImportChain(r repo.Repo, fname string, snapshot bool) (err error) {
}
defer lr.Close() //nolint:errcheck
bs, err := lr.Blockstore(repo.BlockstoreChain)
bs, err := lr.Blockstore(ctx, repo.BlockstoreChain)
if err != nil {
return xerrors.Errorf("failed to open blockstore: %w", err)
}
mds, err := lr.Datastore("/metadata")
mds, err := lr.Datastore(context.TODO(), "/metadata")
if err != nil {
return err
}
@ -473,7 +473,7 @@ func ImportChain(r repo.Repo, fname string, snapshot bool) (err error) {
return xerrors.Errorf("flushing validation cache failed: %w", err)
}
gb, err := cst.GetTipsetByHeight(context.TODO(), 0, ts, true)
gb, err := cst.GetTipsetByHeight(ctx, 0, ts, true)
if err != nil {
return err
}
@ -487,13 +487,13 @@ func ImportChain(r repo.Repo, fname string, snapshot bool) (err error) {
if !snapshot {
log.Infof("validating imported chain...")
if err := stm.ValidateChain(context.TODO(), ts); err != nil {
if err := stm.ValidateChain(ctx, ts); err != nil {
return xerrors.Errorf("chain validation failed: %w", err)
}
}
log.Infof("accepting %s as new head", ts.Cids())
if err := cst.ForceHeadSilent(context.Background(), ts); err != nil {
if err := cst.ForceHeadSilent(ctx, ts); err != nil {
return err
}

View File

@ -5,6 +5,7 @@ package chaos
import (
"fmt"
"io"
"sort"
address "github.com/filecoin-project/go-address"
abi "github.com/filecoin-project/go-state-types/abi"
@ -15,6 +16,8 @@ import (
)
var _ = xerrors.Errorf
var _ = cid.Undef
var _ = sort.Sort
var lengthBufState = []byte{130}

View File

@ -48,6 +48,8 @@
* [MarketListDeals](#MarketListDeals)
* [MarketListIncompleteDeals](#MarketListIncompleteDeals)
* [MarketListRetrievalDeals](#MarketListRetrievalDeals)
* [MarketPendingDeals](#MarketPendingDeals)
* [MarketPublishPendingDeals](#MarketPublishPendingDeals)
* [MarketRestartDataTransfer](#MarketRestartDataTransfer)
* [MarketSetAsk](#MarketSetAsk)
* [MarketSetRetrievalAsk](#MarketSetRetrievalAsk)
@ -168,7 +170,7 @@ Response:
```json
{
"Version": "string value",
"APIVersion": 65536,
"APIVersion": 65792,
"BlockDelay": 42
}
```
@ -524,10 +526,10 @@ Response: `{}`
### MarketCancelDataTransfer
ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer
MarketCancelDataTransfer cancels a data transfer with the given transfer ID and other peer
Perms: read
Perms: write
Inputs:
```json
@ -641,7 +643,8 @@ Response:
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
},
"PieceCid": null,
"PieceSize": 1024
"PieceSize": 1024,
"RawBlockSize": 42
},
"AvailableForRetrieval": true,
"DealID": 5432,
@ -725,11 +728,36 @@ Inputs: `null`
Response: `null`
### MarketPendingDeals
There are not yet any comments for this method.
Perms: write
Inputs: `null`
Response:
```json
{
"Deals": null,
"PublishPeriodStart": "0001-01-01T00:00:00Z",
"PublishPeriod": 60000000000
}
```
### MarketPublishPendingDeals
There are not yet any comments for this method.
Perms: admin
Inputs: `null`
Response: `{}`
### MarketRestartDataTransfer
MinerRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer
MarketRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer
Perms: read
Perms: write
Inputs:
```json

View File

@ -146,7 +146,7 @@ Perms: admin
Inputs: `null`
Response: `65536`
Response: `65792`
## Add

View File

@ -99,6 +99,7 @@
* [MsigCancel](#MsigCancel)
* [MsigCreate](#MsigCreate)
* [MsigGetAvailableBalance](#MsigGetAvailableBalance)
* [MsigGetPending](#MsigGetPending)
* [MsigGetVested](#MsigGetVested)
* [MsigGetVestingSchedule](#MsigGetVestingSchedule)
* [MsigPropose](#MsigPropose)
@ -253,7 +254,7 @@ Response:
```json
{
"Version": "string value",
"APIVersion": 65536,
"APIVersion": 65792,
"BlockDelay": 42
}
```
@ -1027,7 +1028,8 @@ Response:
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
},
"PieceCid": null,
"PieceSize": 1024
"PieceSize": 1024,
"RawBlockSize": 42
},
"PieceCID": {
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
@ -1097,7 +1099,8 @@ Response:
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
},
"PieceCid": null,
"PieceSize": 1024
"PieceSize": 1024,
"RawBlockSize": 42
},
"PieceCID": {
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
@ -1415,7 +1418,8 @@ Inputs:
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
},
"PieceCid": null,
"PieceSize": 1024
"PieceSize": 1024,
"RawBlockSize": 42
},
"Wallet": "f01234",
"Miner": "f01234",
@ -2445,6 +2449,31 @@ Inputs:
Response: `"0"`
### MsigGetPending
MsigGetPending returns pending transactions for the given multisig
wallet. Once pending transactions are fully approved, they will no longer
appear here.
Perms: read
Inputs:
```json
[
"f01234",
[
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
},
{
"/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
}
]
]
```
Response: `null`
### MsigGetVested
MsigGetVested returns the amount of FIL that vested in a multisig in a certain period.
It takes the following params: <multisig address>, <start epoch>, <end epoch>
@ -3283,7 +3312,7 @@ Response:
## State
The State methods are used to query, inspect, and interact with chain state.
Most methods take a TipSetKey as a parameter. The state looked up is the state at that tipset.
Most methods take a TipSetKey as a parameter. The state looked up is the parent state of the tipset.
A nil TipSetKey can be provided as a param, this will cause the heaviest tipset in the chain to be used.
@ -3336,6 +3365,10 @@ Response: `null`
### StateCall
StateCall runs the given message and returns its result without any persisted changes.
StateCall applies the message to the tipset's parent state. The
message is not applied on-top-of the messages in the passed-in
tipset.
Perms: read
@ -4375,6 +4408,9 @@ Response:
```json
{
"Balance": "0",
"Code": {
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
},
"State": {}
}
```

2
extern/filecoin-ffi vendored

@ -1 +1 @@
Subproject commit 62f89f108a6a8fe9ad6ed52fb7ffbf8594d7ae5c
Subproject commit b6e0b35fb49ed0fedb6a6a473b222e3b8a7d7f17

View File

@ -5,13 +5,17 @@ package sectorstorage
import (
"fmt"
"io"
"sort"
sealtasks "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
cid "github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
xerrors "golang.org/x/xerrors"
)
var _ = xerrors.Errorf
var _ = cid.Undef
var _ = sort.Sort
func (t *Call) MarshalCBOR(w io.Writer) error {
if t == nil {
@ -188,7 +192,8 @@ func (t *Call) UnmarshalCBOR(r io.Reader) error {
}
default:
return fmt.Errorf("unknown struct field %d: '%s'", i, name)
// Field doesn't exist on this type, so ignore it
cbg.ScanForLinks(r, func(cid.Cid) {})
}
}
@ -444,7 +449,8 @@ func (t *WorkState) UnmarshalCBOR(r io.Reader) error {
}
default:
return fmt.Errorf("unknown struct field %d: '%s'", i, name)
// Field doesn't exist on this type, so ignore it
cbg.ScanForLinks(r, func(cid.Cid) {})
}
}
@ -566,7 +572,8 @@ func (t *WorkID) UnmarshalCBOR(r io.Reader) error {
}
default:
return fmt.Errorf("unknown struct field %d: '%s'", i, name)
// Field doesn't exist on this type, so ignore it
cbg.ScanForLinks(r, func(cid.Cid) {})
}
}

View File

@ -236,7 +236,12 @@ func (mgr *SectorMgr) SealCommit1(ctx context.Context, sid storage.SectorRef, ti
}
func (mgr *SectorMgr) SealCommit2(ctx context.Context, sid storage.SectorRef, phase1Out storage.Commit1Out) (proof storage.Proof, err error) {
var out [1920]byte
plen, err := sid.ProofType.ProofSize()
if err != nil {
return nil, err
}
out := make([]byte, plen)
for i := range out[:len(phase1Out)] {
out[i] = phase1Out[i] ^ byte(sid.ID.Number&0xff)
}
@ -464,7 +469,12 @@ func (mgr *SectorMgr) ReturnFetch(ctx context.Context, callID storiface.CallID,
}
func (m mockVerif) VerifySeal(svi proof2.SealVerifyInfo) (bool, error) {
if len(svi.Proof) != 1920 {
plen, err := svi.SealProof.ProofSize()
if err != nil {
return false, err
}
if len(svi.Proof) != int(plen) {
return false, nil
}

View File

@ -232,7 +232,7 @@ func (i *Index) StorageDropSector(ctx context.Context, storageID ID, s abi.Secto
d := Decl{s, fileType}
if len(i.sectors[d]) == 0 {
return nil
continue
}
rewritten := make([]*declMeta, 0, len(i.sectors[d])-1)
@ -245,7 +245,7 @@ func (i *Index) StorageDropSector(ctx context.Context, storageID ID, s abi.Secto
}
if len(rewritten) == 0 {
delete(i.sectors, d)
return nil
continue
}
i.sectors[d] = rewritten

View File

@ -5,12 +5,16 @@ package storiface
import (
"fmt"
"io"
"sort"
cid "github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
xerrors "golang.org/x/xerrors"
)
var _ = xerrors.Errorf
var _ = cid.Undef
var _ = sort.Sort
func (t *CallID) MarshalCBOR(w io.Writer) error {
if t == nil {
@ -134,7 +138,8 @@ func (t *CallID) UnmarshalCBOR(r io.Reader) error {
}
default:
return fmt.Errorf("unknown struct field %d: '%s'", i, name)
// Field doesn't exist on this type, so ignore it
cbg.ScanForLinks(r, func(cid.Cid) {})
}
}

View File

@ -5,14 +5,19 @@ package sealing
import (
"fmt"
"io"
"sort"
abi "github.com/filecoin-project/go-state-types/abi"
market "github.com/filecoin-project/specs-actors/actors/builtin/market"
miner "github.com/filecoin-project/specs-actors/actors/builtin/miner"
cid "github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
xerrors "golang.org/x/xerrors"
)
var _ = xerrors.Errorf
var _ = cid.Undef
var _ = sort.Sort
func (t *Piece) MarshalCBOR(w io.Writer) error {
if t == nil {
@ -124,7 +129,8 @@ func (t *Piece) UnmarshalCBOR(r io.Reader) error {
}
default:
return fmt.Errorf("unknown struct field %d: '%s'", i, name)
// Field doesn't exist on this type, so ignore it
cbg.ScanForLinks(r, func(cid.Cid) {})
}
}
@ -135,7 +141,7 @@ func (t *DealInfo) MarshalCBOR(w io.Writer) error {
_, err := w.Write(cbg.CborNull)
return err
}
if _, err := w.Write([]byte{164}); err != nil {
if _, err := w.Write([]byte{165}); err != nil {
return err
}
@ -179,6 +185,22 @@ func (t *DealInfo) MarshalCBOR(w io.Writer) error {
return err
}
// t.DealProposal (market.DealProposal) (struct)
if len("DealProposal") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"DealProposal\" was too long")
}
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealProposal"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("DealProposal")); err != nil {
return err
}
if err := t.DealProposal.MarshalCBOR(w); err != nil {
return err
}
// t.DealSchedule (sealing.DealSchedule) (struct)
if len("DealSchedule") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"DealSchedule\" was too long")
@ -283,6 +305,26 @@ func (t *DealInfo) UnmarshalCBOR(r io.Reader) error {
}
t.DealID = abi.DealID(extra)
}
// t.DealProposal (market.DealProposal) (struct)
case "DealProposal":
{
b, err := br.ReadByte()
if err != nil {
return err
}
if b != cbg.CborNull[0] {
if err := br.UnreadByte(); err != nil {
return err
}
t.DealProposal = new(market.DealProposal)
if err := t.DealProposal.UnmarshalCBOR(br); err != nil {
return xerrors.Errorf("unmarshaling t.DealProposal pointer: %w", err)
}
}
}
// t.DealSchedule (sealing.DealSchedule) (struct)
case "DealSchedule":
@ -314,7 +356,8 @@ func (t *DealInfo) UnmarshalCBOR(r io.Reader) error {
}
default:
return fmt.Errorf("unknown struct field %d: '%s'", i, name)
// Field doesn't exist on this type, so ignore it
cbg.ScanForLinks(r, func(cid.Cid) {})
}
}
@ -464,7 +507,8 @@ func (t *DealSchedule) UnmarshalCBOR(r io.Reader) error {
}
default:
return fmt.Errorf("unknown struct field %d: '%s'", i, name)
// Field doesn't exist on this type, so ignore it
cbg.ScanForLinks(r, func(cid.Cid) {})
}
}
@ -1576,7 +1620,8 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) error {
}
default:
return fmt.Errorf("unknown struct field %d: '%s'", i, name)
// Field doesn't exist on this type, so ignore it
cbg.ScanForLinks(r, func(cid.Cid) {})
}
}
@ -1763,7 +1808,8 @@ func (t *Log) UnmarshalCBOR(r io.Reader) error {
}
default:
return fmt.Errorf("unknown struct field %d: '%s'", i, name)
// Field doesn't exist on this type, so ignore it
cbg.ScanForLinks(r, func(cid.Cid) {})
}
}

View File

@ -53,7 +53,7 @@ func checkPieces(ctx context.Context, maddr address.Address, si SectorInfo, api
continue
}
proposal, err := api.StateMarketStorageDeal(ctx, p.DealInfo.DealID, tok)
proposal, err := api.StateMarketStorageDealProposal(ctx, p.DealInfo.DealID, tok)
if err != nil {
return &ErrInvalidDeals{xerrors.Errorf("getting deal %d for piece %d: %w", p.DealInfo.DealID, i, err)}
}

View File

@ -0,0 +1,209 @@
package sealing
import (
"bytes"
"context"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
"github.com/filecoin-project/lotus/chain/types"
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
)
type CurrentDealInfoAPI interface {
ChainGetMessage(context.Context, cid.Cid) (*types.Message, error)
StateLookupID(context.Context, address.Address, TipSetToken) (address.Address, error)
StateMarketStorageDeal(context.Context, abi.DealID, TipSetToken) (*api.MarketDeal, error)
StateSearchMsg(context.Context, cid.Cid) (*MsgLookup, error)
}
type CurrentDealInfo struct {
DealID abi.DealID
MarketDeal *api.MarketDeal
PublishMsgTipSet TipSetToken
}
type CurrentDealInfoManager struct {
CDAPI CurrentDealInfoAPI
}
// GetCurrentDealInfo gets the current deal state and deal ID.
// Note that the deal ID is assigned when the deal is published, so it may
// have changed if there was a reorg after the deal was published.
func (mgr *CurrentDealInfoManager) GetCurrentDealInfo(ctx context.Context, tok TipSetToken, proposal *market.DealProposal, publishCid cid.Cid) (CurrentDealInfo, error) {
// Lookup the deal ID by comparing the deal proposal to the proposals in
// the publish deals message, and indexing into the message return value
dealID, pubMsgTok, err := mgr.dealIDFromPublishDealsMsg(ctx, tok, proposal, publishCid)
if err != nil {
return CurrentDealInfo{}, err
}
// Lookup the deal state by deal ID
marketDeal, err := mgr.CDAPI.StateMarketStorageDeal(ctx, dealID, tok)
if err == nil && proposal != nil {
// Make sure the retrieved deal proposal matches the target proposal
equal, err := mgr.CheckDealEquality(ctx, tok, *proposal, marketDeal.Proposal)
if err != nil {
return CurrentDealInfo{}, err
}
if !equal {
return CurrentDealInfo{}, xerrors.Errorf("Deal proposals for publish message %s did not match", publishCid)
}
}
return CurrentDealInfo{DealID: dealID, MarketDeal: marketDeal, PublishMsgTipSet: pubMsgTok}, err
}
// dealIDFromPublishDealsMsg looks up the publish deals message by cid, and finds the deal ID
// by looking at the message return value
func (mgr *CurrentDealInfoManager) dealIDFromPublishDealsMsg(ctx context.Context, tok TipSetToken, proposal *market.DealProposal, publishCid cid.Cid) (abi.DealID, TipSetToken, error) {
dealID := abi.DealID(0)
// Get the return value of the publish deals message
lookup, err := mgr.CDAPI.StateSearchMsg(ctx, publishCid)
if err != nil {
return dealID, nil, xerrors.Errorf("looking for publish deal message %s: search msg failed: %w", publishCid, err)
}
if lookup.Receipt.ExitCode != exitcode.Ok {
return dealID, nil, xerrors.Errorf("looking for publish deal message %s: non-ok exit code: %s", publishCid, lookup.Receipt.ExitCode)
}
var retval market.PublishStorageDealsReturn
if err := retval.UnmarshalCBOR(bytes.NewReader(lookup.Receipt.Return)); err != nil {
return dealID, nil, xerrors.Errorf("looking for publish deal message %s: unmarshalling message return: %w", publishCid, err)
}
// Previously, publish deals messages contained a single deal, and the
// deal proposal was not included in the sealing deal info.
// So check if the proposal is nil and check the number of deals published
// in the message.
if proposal == nil {
if len(retval.IDs) > 1 {
return dealID, nil, xerrors.Errorf(
"getting deal ID from publish deal message %s: "+
"no deal proposal supplied but message return value has more than one deal (%d deals)",
publishCid, len(retval.IDs))
}
// There is a single deal in this publish message and no deal proposal
// was supplied, so we have nothing to compare against. Just assume
// the deal ID is correct.
return retval.IDs[0], lookup.TipSetTok, nil
}
// Get the parameters to the publish deals message
pubmsg, err := mgr.CDAPI.ChainGetMessage(ctx, publishCid)
if err != nil {
return dealID, nil, xerrors.Errorf("getting publish deal message %s: %w", publishCid, err)
}
var pubDealsParams market2.PublishStorageDealsParams
if err := pubDealsParams.UnmarshalCBOR(bytes.NewReader(pubmsg.Params)); err != nil {
return dealID, nil, xerrors.Errorf("unmarshalling publish deal message params for message %s: %w", publishCid, err)
}
// Scan through the deal proposals in the message parameters to find the
// index of the target deal proposal
dealIdx := -1
for i, paramDeal := range pubDealsParams.Deals {
eq, err := mgr.CheckDealEquality(ctx, tok, *proposal, market.DealProposal(paramDeal.Proposal))
if err != nil {
return dealID, nil, xerrors.Errorf("comparing publish deal message %s proposal to deal proposal: %w", publishCid, err)
}
if eq {
dealIdx = i
break
}
}
if dealIdx == -1 {
return dealID, nil, xerrors.Errorf("could not find deal in publish deals message %s", publishCid)
}
if dealIdx >= len(retval.IDs) {
return dealID, nil, xerrors.Errorf(
"deal index %d out of bounds of deals (len %d) in publish deals message %s",
dealIdx, len(retval.IDs), publishCid)
}
return retval.IDs[dealIdx], lookup.TipSetTok, nil
}
func (mgr *CurrentDealInfoManager) CheckDealEquality(ctx context.Context, tok TipSetToken, p1, p2 market.DealProposal) (bool, error) {
p1ClientID, err := mgr.CDAPI.StateLookupID(ctx, p1.Client, tok)
if err != nil {
return false, err
}
p2ClientID, err := mgr.CDAPI.StateLookupID(ctx, p2.Client, tok)
if err != nil {
return false, err
}
return p1.PieceCID.Equals(p2.PieceCID) &&
p1.PieceSize == p2.PieceSize &&
p1.VerifiedDeal == p2.VerifiedDeal &&
p1.Label == p2.Label &&
p1.StartEpoch == p2.StartEpoch &&
p1.EndEpoch == p2.EndEpoch &&
p1.StoragePricePerEpoch.Equals(p2.StoragePricePerEpoch) &&
p1.ProviderCollateral.Equals(p2.ProviderCollateral) &&
p1.ClientCollateral.Equals(p2.ClientCollateral) &&
p1.Provider == p2.Provider &&
p1ClientID == p2ClientID, nil
}
type CurrentDealInfoTskAPI interface {
ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error)
StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error)
StateMarketStorageDeal(context.Context, abi.DealID, types.TipSetKey) (*api.MarketDeal, error)
StateSearchMsg(context.Context, cid.Cid) (*api.MsgLookup, error)
}
type CurrentDealInfoAPIAdapter struct {
CurrentDealInfoTskAPI
}
func (c *CurrentDealInfoAPIAdapter) StateLookupID(ctx context.Context, a address.Address, tok TipSetToken) (address.Address, error) {
tsk, err := types.TipSetKeyFromBytes(tok)
if err != nil {
return address.Undef, xerrors.Errorf("failed to unmarshal TipSetToken to TipSetKey: %w", err)
}
return c.CurrentDealInfoTskAPI.StateLookupID(ctx, a, tsk)
}
func (c *CurrentDealInfoAPIAdapter) StateMarketStorageDeal(ctx context.Context, dealID abi.DealID, tok TipSetToken) (*api.MarketDeal, error) {
tsk, err := types.TipSetKeyFromBytes(tok)
if err != nil {
return nil, xerrors.Errorf("failed to unmarshal TipSetToken to TipSetKey: %w", err)
}
return c.CurrentDealInfoTskAPI.StateMarketStorageDeal(ctx, dealID, tsk)
}
func (c *CurrentDealInfoAPIAdapter) StateSearchMsg(ctx context.Context, k cid.Cid) (*MsgLookup, error) {
wmsg, err := c.CurrentDealInfoTskAPI.StateSearchMsg(ctx, k)
if err != nil {
return nil, err
}
if wmsg == nil {
return nil, nil
}
return &MsgLookup{
Receipt: MessageReceipt{
ExitCode: wmsg.Receipt.ExitCode,
Return: wmsg.Receipt.Return,
GasUsed: wmsg.Receipt.GasUsed,
},
TipSetTok: wmsg.TipSet.Bytes(),
Height: wmsg.Height,
}, nil
}
var _ CurrentDealInfoAPI = (*CurrentDealInfoAPIAdapter)(nil)

View File

@ -0,0 +1,310 @@
package sealing
import (
"bytes"
"errors"
"math/rand"
"sort"
"testing"
"time"
"golang.org/x/net/context"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
evtmock "github.com/filecoin-project/lotus/chain/events/state/mock"
"github.com/filecoin-project/lotus/chain/types"
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
tutils "github.com/filecoin-project/specs-actors/v2/support/testing"
"github.com/ipfs/go-cid"
"github.com/stretchr/testify/require"
)
var errNotFound = errors.New("Could not find")
func TestGetCurrentDealInfo(t *testing.T) {
ctx := context.Background()
dummyCid, _ := cid.Parse("bafkqaaa")
dummyCid2, _ := cid.Parse("bafkqaab")
zeroDealID := abi.DealID(0)
earlierDealID := abi.DealID(9)
successDealID := abi.DealID(10)
proposal := market.DealProposal{
PieceCID: dummyCid,
PieceSize: abi.PaddedPieceSize(100),
Client: tutils.NewActorAddr(t, "client"),
Provider: tutils.NewActorAddr(t, "provider"),
StoragePricePerEpoch: abi.NewTokenAmount(1),
ProviderCollateral: abi.NewTokenAmount(1),
ClientCollateral: abi.NewTokenAmount(1),
Label: "success",
}
otherProposal := market.DealProposal{
PieceCID: dummyCid2,
PieceSize: abi.PaddedPieceSize(100),
Client: tutils.NewActorAddr(t, "client"),
Provider: tutils.NewActorAddr(t, "provider"),
StoragePricePerEpoch: abi.NewTokenAmount(1),
ProviderCollateral: abi.NewTokenAmount(1),
ClientCollateral: abi.NewTokenAmount(1),
Label: "other",
}
successDeal := &api.MarketDeal{
Proposal: proposal,
State: market.DealState{
SectorStartEpoch: 1,
LastUpdatedEpoch: 2,
},
}
earlierDeal := &api.MarketDeal{
Proposal: otherProposal,
State: market.DealState{
SectorStartEpoch: 1,
LastUpdatedEpoch: 2,
},
}
type testCaseData struct {
searchMessageLookup *MsgLookup
searchMessageErr error
marketDeals map[abi.DealID]*api.MarketDeal
publishCid cid.Cid
targetProposal *market.DealProposal
expectedDealID abi.DealID
expectedMarketDeal *api.MarketDeal
expectedError error
}
testCases := map[string]testCaseData{
"deal lookup succeeds": {
publishCid: dummyCid,
searchMessageLookup: &MsgLookup{
Receipt: MessageReceipt{
ExitCode: exitcode.Ok,
Return: makePublishDealsReturnBytes(t, []abi.DealID{successDealID}),
},
},
marketDeals: map[abi.DealID]*api.MarketDeal{
successDealID: successDeal,
},
targetProposal: &proposal,
expectedDealID: successDealID,
expectedMarketDeal: successDeal,
},
"deal lookup succeeds two return values": {
publishCid: dummyCid,
searchMessageLookup: &MsgLookup{
Receipt: MessageReceipt{
ExitCode: exitcode.Ok,
Return: makePublishDealsReturnBytes(t, []abi.DealID{earlierDealID, successDealID}),
},
},
marketDeals: map[abi.DealID]*api.MarketDeal{
earlierDealID: earlierDeal,
successDealID: successDeal,
},
targetProposal: &proposal,
expectedDealID: successDealID,
expectedMarketDeal: successDeal,
},
"deal lookup fails proposal mis-match": {
publishCid: dummyCid,
searchMessageLookup: &MsgLookup{
Receipt: MessageReceipt{
ExitCode: exitcode.Ok,
Return: makePublishDealsReturnBytes(t, []abi.DealID{earlierDealID}),
},
},
marketDeals: map[abi.DealID]*api.MarketDeal{
earlierDealID: earlierDeal,
},
targetProposal: &proposal,
expectedDealID: zeroDealID,
expectedError: xerrors.Errorf("could not find deal in publish deals message %s", dummyCid),
},
"deal lookup fails mismatch count of deals and return values": {
publishCid: dummyCid,
searchMessageLookup: &MsgLookup{
Receipt: MessageReceipt{
ExitCode: exitcode.Ok,
Return: makePublishDealsReturnBytes(t, []abi.DealID{earlierDealID}),
},
},
marketDeals: map[abi.DealID]*api.MarketDeal{
earlierDealID: earlierDeal,
successDealID: successDeal,
},
targetProposal: &proposal,
expectedDealID: zeroDealID,
expectedError: xerrors.Errorf("deal index 1 out of bounds of deals (len 1) in publish deals message %s", dummyCid),
},
"deal lookup succeeds, target proposal nil, single deal in message": {
publishCid: dummyCid,
searchMessageLookup: &MsgLookup{
Receipt: MessageReceipt{
ExitCode: exitcode.Ok,
Return: makePublishDealsReturnBytes(t, []abi.DealID{successDealID}),
},
},
marketDeals: map[abi.DealID]*api.MarketDeal{
successDealID: successDeal,
},
targetProposal: nil,
expectedDealID: successDealID,
expectedMarketDeal: successDeal,
},
"deal lookup fails, multiple deals in return value but target proposal nil": {
publishCid: dummyCid,
searchMessageLookup: &MsgLookup{
Receipt: MessageReceipt{
ExitCode: exitcode.Ok,
Return: makePublishDealsReturnBytes(t, []abi.DealID{earlierDealID, successDealID}),
},
},
marketDeals: map[abi.DealID]*api.MarketDeal{
earlierDealID: earlierDeal,
successDealID: successDeal,
},
targetProposal: nil,
expectedDealID: zeroDealID,
expectedError: xerrors.Errorf("getting deal ID from publish deal message %s: no deal proposal supplied but message return value has more than one deal (2 deals)", dummyCid),
},
"search message fails": {
publishCid: dummyCid,
searchMessageErr: errors.New("something went wrong"),
targetProposal: &proposal,
expectedDealID: zeroDealID,
expectedError: xerrors.Errorf("looking for publish deal message %s: search msg failed: something went wrong", dummyCid),
},
"return code not ok": {
publishCid: dummyCid,
searchMessageLookup: &MsgLookup{
Receipt: MessageReceipt{
ExitCode: exitcode.ErrIllegalState,
},
},
targetProposal: &proposal,
expectedDealID: zeroDealID,
expectedError: xerrors.Errorf("looking for publish deal message %s: non-ok exit code: %s", dummyCid, exitcode.ErrIllegalState),
},
"unable to unmarshal params": {
publishCid: dummyCid,
searchMessageLookup: &MsgLookup{
Receipt: MessageReceipt{
ExitCode: exitcode.Ok,
Return: []byte("applesauce"),
},
},
targetProposal: &proposal,
expectedDealID: zeroDealID,
expectedError: xerrors.Errorf("looking for publish deal message %s: unmarshalling message return: cbor input should be of type array", dummyCid),
},
}
runTestCase := func(testCase string, data testCaseData) {
t.Run(testCase, func(t *testing.T) {
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
ts, err := evtmock.MockTipset(address.TestAddress, rand.Uint64())
require.NoError(t, err)
marketDeals := make(map[marketDealKey]*api.MarketDeal)
for dealID, deal := range data.marketDeals {
marketDeals[marketDealKey{dealID, ts.Key()}] = deal
}
mockApi := &CurrentDealInfoMockAPI{
SearchMessageLookup: data.searchMessageLookup,
SearchMessageErr: data.searchMessageErr,
MarketDeals: marketDeals,
}
dealInfoMgr := CurrentDealInfoManager{mockApi}
res, err := dealInfoMgr.GetCurrentDealInfo(ctx, ts.Key().Bytes(), data.targetProposal, data.publishCid)
require.Equal(t, data.expectedDealID, res.DealID)
require.Equal(t, data.expectedMarketDeal, res.MarketDeal)
if data.expectedError == nil {
require.NoError(t, err)
} else {
require.EqualError(t, err, data.expectedError.Error())
}
})
}
for testCase, data := range testCases {
runTestCase(testCase, data)
}
}
type marketDealKey struct {
abi.DealID
types.TipSetKey
}
type CurrentDealInfoMockAPI struct {
SearchMessageLookup *MsgLookup
SearchMessageErr error
MarketDeals map[marketDealKey]*api.MarketDeal
}
func (mapi *CurrentDealInfoMockAPI) ChainGetMessage(ctx context.Context, c cid.Cid) (*types.Message, error) {
var dealIDs []abi.DealID
var deals []market2.ClientDealProposal
for k, dl := range mapi.MarketDeals {
dealIDs = append(dealIDs, k.DealID)
deals = append(deals, market2.ClientDealProposal{
Proposal: market2.DealProposal(dl.Proposal),
ClientSignature: crypto.Signature{
Data: []byte("foo bar cat dog"),
Type: crypto.SigTypeBLS,
},
})
}
sort.SliceStable(deals, func(i, j int) bool {
return dealIDs[i] < dealIDs[j]
})
buf := new(bytes.Buffer)
params := market2.PublishStorageDealsParams{Deals: deals}
err := params.MarshalCBOR(buf)
if err != nil {
panic(err)
}
return &types.Message{
Params: buf.Bytes(),
}, nil
}
func (mapi *CurrentDealInfoMockAPI) StateLookupID(ctx context.Context, addr address.Address, token TipSetToken) (address.Address, error) {
return addr, nil
}
func (mapi *CurrentDealInfoMockAPI) StateMarketStorageDeal(ctx context.Context, dealID abi.DealID, tok TipSetToken) (*api.MarketDeal, error) {
tsk, err := types.TipSetKeyFromBytes(tok)
if err != nil {
return nil, err
}
deal, ok := mapi.MarketDeals[marketDealKey{dealID, tsk}]
if !ok {
return nil, errNotFound
}
return deal, nil
}
func (mapi *CurrentDealInfoMockAPI) StateSearchMsg(ctx context.Context, c cid.Cid) (*MsgLookup, error) {
if mapi.SearchMessageLookup == nil {
return mapi.SearchMessageLookup, mapi.SearchMessageErr
}
return mapi.SearchMessageLookup, mapi.SearchMessageErr
}
func makePublishDealsReturnBytes(t *testing.T, dealIDs []abi.DealID) []byte {
buf := new(bytes.Buffer)
dealsReturn := market.PublishStorageDealsReturn{
IDs: dealIDs,
}
err := dealsReturn.MarshalCBOR(buf)
require.NoError(t, err)
return buf.Bytes()
}

View File

@ -15,4 +15,6 @@ type Config struct {
MaxSealingSectorsForDeals uint64
WaitDealsDelay time.Duration
AlwaysKeepUnsealedCopy bool
}

View File

@ -27,6 +27,7 @@ import (
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types"
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
)
@ -53,18 +54,21 @@ type SealingAPI interface {
StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok TipSetToken) (*miner.SectorPreCommitOnChainInfo, error)
StateSectorGetInfo(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok TipSetToken) (*miner.SectorOnChainInfo, error)
StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok TipSetToken) (*SectorLocation, error)
StateLookupID(context.Context, address.Address, TipSetToken) (address.Address, error)
StateMinerSectorSize(context.Context, address.Address, TipSetToken) (abi.SectorSize, error)
StateMinerWorkerAddress(ctx context.Context, maddr address.Address, tok TipSetToken) (address.Address, error)
StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error)
StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error)
StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error)
StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, TipSetToken) (bool, error)
StateMarketStorageDeal(context.Context, abi.DealID, TipSetToken) (market.DealProposal, error)
StateMarketStorageDeal(context.Context, abi.DealID, TipSetToken) (*api.MarketDeal, error)
StateMarketStorageDealProposal(context.Context, abi.DealID, TipSetToken) (market.DealProposal, error)
StateNetworkVersion(ctx context.Context, tok TipSetToken) (network.Version, error)
StateMinerProvingDeadline(context.Context, address.Address, TipSetToken) (*dline.Info, error)
StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tok TipSetToken) ([]api.Partition, error)
SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error)
ChainHead(ctx context.Context) (TipSetToken, abi.ChainEpoch, error)
ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error)
ChainGetRandomnessFromBeacon(ctx context.Context, tok TipSetToken, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error)
ChainGetRandomnessFromTickets(ctx context.Context, tok TipSetToken, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error)
ChainReadObj(context.Context, cid.Cid) ([]byte, error)
@ -100,6 +104,7 @@ type Sealing struct {
terminator *TerminateBatcher
getConfig GetSealingConfigFunc
dealInfo *CurrentDealInfoManager
}
type FeeConfig struct {
@ -145,6 +150,7 @@ func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds
terminator: NewTerminationBatcher(context.TODO(), maddr, api, as, fc),
getConfig: gc,
dealInfo: &CurrentDealInfoManager{api},
stats: SectorStats{
bySector: map[abi.SectorID]statSectorState{},

View File

@ -1,7 +1,6 @@
package sealing
import (
"bytes"
"time"
"golang.org/x/xerrors"
@ -365,7 +364,7 @@ func (m *Sealing) handleRecoverDealIDs(ctx statemachine.Context, sector SectorIn
continue
}
proposal, err := m.api.StateMarketStorageDeal(ctx.Context(), p.DealInfo.DealID, tok)
proposal, err := m.api.StateMarketStorageDealProposal(ctx.Context(), p.DealInfo.DealID, tok)
if err != nil {
log.Warnf("getting deal %d for piece %d: %+v", p.DealInfo.DealID, i, err)
toFix = append(toFix, i)
@ -408,26 +407,17 @@ func (m *Sealing) handleRecoverDealIDs(ctx statemachine.Context, sector SectorIn
return ctx.Send(SectorRemove{})
}
ml, err := m.api.StateSearchMsg(ctx.Context(), *p.DealInfo.PublishCid)
var dp *market.DealProposal
if p.DealInfo.DealProposal != nil {
mdp := market.DealProposal(*p.DealInfo.DealProposal)
dp = &mdp
}
res, err := m.dealInfo.GetCurrentDealInfo(ctx.Context(), tok, dp, *p.DealInfo.PublishCid)
if err != nil {
return xerrors.Errorf("looking for publish deal message %s (sector %d, piece %d): %w", *p.DealInfo.PublishCid, sector.SectorNumber, i, err)
return xerrors.Errorf("recovering deal ID for publish deal message %s (sector %d, piece %d): %w", *p.DealInfo.PublishCid, sector.SectorNumber, i, err)
}
if ml.Receipt.ExitCode != exitcode.Ok {
return xerrors.Errorf("looking for publish deal message %s (sector %d, piece %d): non-ok exit code: %s", *p.DealInfo.PublishCid, sector.SectorNumber, i, ml.Receipt.ExitCode)
}
var retval market.PublishStorageDealsReturn
if err := retval.UnmarshalCBOR(bytes.NewReader(ml.Receipt.Return)); err != nil {
return xerrors.Errorf("looking for publish deal message: unmarshaling message return: %w", err)
}
if len(retval.IDs) != 1 {
// market currently only ever sends messages with 1 deal
return xerrors.Errorf("can't recover dealIDs from publish deal message with more than 1 deal")
}
updates[i] = retval.IDs[0]
updates[i] = res.DealID
}
// Not much to do here, we can't go back in time to commit this sector

View File

@ -512,7 +512,12 @@ func (m *Sealing) handleCommitWait(ctx statemachine.Context, sector SectorInfo)
func (m *Sealing) handleFinalizeSector(ctx statemachine.Context, sector SectorInfo) error {
// TODO: Maybe wait for some finality
if err := m.sealer.FinalizeSector(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.keepUnsealedRanges(false)); err != nil {
cfg, err := m.getConfig()
if err != nil {
return xerrors.Errorf("getting sealing config: %w", err)
}
if err := m.sealer.FinalizeSector(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.keepUnsealedRanges(false, cfg.AlwaysKeepUnsealedCopy)); err != nil {
return ctx.Send(SectorFinalizeFailed{xerrors.Errorf("finalize sector: %w", err)})
}
@ -523,7 +528,12 @@ func (m *Sealing) handleProvingSector(ctx statemachine.Context, sector SectorInf
// TODO: track sector health / expiration
log.Infof("Proving sector %d", sector.SectorNumber)
if err := m.sealer.ReleaseUnsealed(ctx.Context(), m.minerSector(sector.SectorType, sector.SectorNumber), sector.keepUnsealedRanges(true)); err != nil {
cfg, err := m.getConfig()
if err != nil {
return xerrors.Errorf("getting sealing config: %w", err)
}
if err := m.sealer.ReleaseUnsealed(ctx.Context(), m.minerSector(sector.SectorType, sector.SectorNumber), sector.keepUnsealedRanges(true, cfg.AlwaysKeepUnsealedCopy)); err != nil {
log.Error(err)
}

View File

@ -14,6 +14,7 @@ import (
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
"github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
)
// Piece is a tuple of piece and deal info
@ -32,6 +33,7 @@ type Piece struct {
type DealInfo struct {
PublishCid *cid.Cid
DealID abi.DealID
DealProposal *market.DealProposal
DealSchedule DealSchedule
KeepUnsealed bool
}
@ -163,7 +165,7 @@ func (t *SectorInfo) sealingCtx(ctx context.Context) context.Context {
// Returns list of offset/length tuples of sector data ranges which clients
// requested to keep unsealed
func (t *SectorInfo) keepUnsealedRanges(invert bool) []storage.Range {
func (t *SectorInfo) keepUnsealedRanges(invert, alwaysKeep bool) []storage.Range {
var out []storage.Range
var at abi.UnpaddedPieceSize
@ -174,7 +176,10 @@ func (t *SectorInfo) keepUnsealedRanges(invert bool) []storage.Range {
if piece.DealInfo == nil {
continue
}
if piece.DealInfo.KeepUnsealed == invert {
keep := piece.DealInfo.KeepUnsealed || alwaysKeep
if keep == invert {
continue
}

View File

@ -4,26 +4,41 @@ import (
"bytes"
"testing"
"github.com/ipfs/go-cid"
"gotest.tools/assert"
cborutil "github.com/filecoin-project/go-cbor-util"
"github.com/filecoin-project/go-state-types/abi"
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
tutils "github.com/filecoin-project/specs-actors/v2/support/testing"
)
func TestSectorInfoSelialization(t *testing.T) {
func TestSectorInfoSerialization(t *testing.T) {
d := abi.DealID(1234)
dummyCid, err := cid.Parse("bafkqaaa")
if err != nil {
t.Fatal(err)
}
dealInfo := DealInfo{
DealID: d,
DealSchedule: DealSchedule{
StartEpoch: 0,
EndEpoch: 100,
},
DealProposal: &market2.DealProposal{
PieceCID: dummyCid,
PieceSize: 5,
Client: tutils.NewActorAddr(t, "client"),
Provider: tutils.NewActorAddr(t, "provider"),
StoragePricePerEpoch: abi.NewTokenAmount(10),
ProviderCollateral: abi.NewTokenAmount(20),
ClientCollateral: abi.NewTokenAmount(15),
},
}
dummyCid := builtin2.AccountActorCodeID
si := &SectorInfo{
State: "stateful",
SectorNumber: 234,
@ -53,18 +68,18 @@ func TestSectorInfoSelialization(t *testing.T) {
}
var si2 SectorInfo
if err := cborutil.ReadCborRPC(bytes.NewReader(b), &si); err != nil {
if err := cborutil.ReadCborRPC(bytes.NewReader(b), &si2); err != nil {
t.Fatal(err)
return
}
assert.Equal(t, si.State, si2.State)
assert.Equal(t, si.SectorNumber, si2.SectorNumber)
assert.Equal(t, si.Pieces, si2.Pieces)
assert.Equal(t, si.CommD, si2.CommD)
assert.Equal(t, si.TicketValue, si2.TicketValue)
assert.Equal(t, si.Pieces[0].DealInfo.DealID, si2.Pieces[0].DealInfo.DealID)
assert.Equal(t, si.Pieces[0].DealInfo.DealProposal.PieceCID, si2.Pieces[0].DealInfo.DealProposal.PieceCID)
assert.Equal(t, *si.CommD, *si2.CommD)
assert.DeepEqual(t, si.TicketValue, si2.TicketValue)
assert.Equal(t, si.TicketEpoch, si2.TicketEpoch)
assert.Equal(t, si.TicketEpoch, si2.TicketEpoch)
assert.Equal(t, si, si2)
}

19
go.mod
View File

@ -27,24 +27,24 @@ require (
github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200910194244-f640612a1a1f
github.com/filecoin-project/go-address v0.0.5
github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349 // indirect
github.com/filecoin-project/go-bitfield v0.2.3
github.com/filecoin-project/go-bitfield v0.2.4
github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2
github.com/filecoin-project/go-commp-utils v0.0.0-20201119054358-b88f7a96a434
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03
github.com/filecoin-project/go-data-transfer v1.2.7
github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a
github.com/filecoin-project/go-fil-markets v1.1.2
github.com/filecoin-project/go-jsonrpc v0.1.2
github.com/filecoin-project/go-fil-markets v1.1.9
github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec
github.com/filecoin-project/go-multistore v0.0.3
github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261
github.com/filecoin-project/go-state-types v0.0.0-20210119062722-4adba5aaea71
github.com/filecoin-project/go-state-types v0.1.0
github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe
github.com/filecoin-project/go-statestore v0.1.0
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b
github.com/filecoin-project/specs-actors v0.9.13
github.com/filecoin-project/specs-actors/v2 v2.3.4
github.com/filecoin-project/specs-actors/v3 v3.0.1-0.20210128055125-ab0632b1c8fa
github.com/filecoin-project/specs-actors/v3 v3.0.3
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506
github.com/filecoin-project/test-vectors/schema v0.0.5
github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1
@ -60,7 +60,7 @@ require (
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d
github.com/ipfs/bbloom v0.0.4
github.com/ipfs/go-bitswap v0.3.2
github.com/ipfs/go-block-format v0.0.2
github.com/ipfs/go-block-format v0.0.3
github.com/ipfs/go-blockservice v0.1.4
github.com/ipfs/go-cid v0.0.7
github.com/ipfs/go-cidutil v0.0.2
@ -121,6 +121,7 @@ require (
github.com/multiformats/go-multiaddr-dns v0.2.0
github.com/multiformats/go-multibase v0.0.3
github.com/multiformats/go-multihash v0.0.14
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
github.com/opentracing/opentracing-go v1.2.0
github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a
github.com/prometheus/client_golang v1.6.0
@ -130,10 +131,10 @@ require (
github.com/syndtr/goleveldb v1.0.0
github.com/urfave/cli/v2 v2.2.0
github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba
github.com/whyrusleeping/cbor-gen v0.0.0-20210118024343-169e9d70c0c2
github.com/whyrusleeping/cbor-gen v0.0.0-20210219115102-f37d292932f2
github.com/whyrusleeping/ledger-filecoin-go v0.9.1-0.20201010031517-c3dcc1bddce4
github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7
github.com/whyrusleeping/pubsub v0.0.0-20131020042734-02de8aa2db3d
github.com/whyrusleeping/pubsub v0.0.0-20190708150250-92bcb0691325
github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542
go.opencensus.io v0.22.5
go.uber.org/dig v1.10.0 // indirect
@ -145,9 +146,9 @@ require (
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f
golang.org/x/time v0.0.0-20191024005414-555d28b269f0
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
gopkg.in/cheggaaa/pb.v1 v1.0.28
gotest.tools v2.2.0+incompatible
launchpad.net/gocheck v0.0.0-20140225173054-000000000087 // indirect
)
replace github.com/filecoin-project/lotus => ./

65
go.sum
View File

@ -61,11 +61,9 @@ github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia
github.com/akavel/rsrc v0.8.0 h1:zjWn7ukO9Kc5Q62DOJCcxGpXC18RawVtYAGdz2aLlfw=
github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
@ -93,7 +91,6 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
github.com/briandowns/spinner v1.11.1 h1:OixPqDEcX3juo5AjQZAnFPbeUA0jvkp2qzB5gOZJ/L0=
github.com/briandowns/spinner v1.11.1/go.mod h1:QOuQk7x+EaDASo80FEXwlwiA+j/PPIcX3FScO+3/ZPQ=
github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8=
github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI=
@ -138,7 +135,6 @@ github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcK
github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY=
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
github.com/cockroachdb/pebble v0.0.0-20200916222308-4e219a90ba5b h1:OKALTB609+19AM7wsO0k8yMwAqjEIppcnYvyIhA+ZlQ=
github.com/cockroachdb/pebble v0.0.0-20200916222308-4e219a90ba5b/go.mod h1:hU7vhtrqonEphNF+xt8/lHdaBprxmV1h8BOGrd9XwmQ=
github.com/cockroachdb/pebble v0.0.0-20201001221639-879f3bfeef07 h1:Cb2pZUCFXlLA8i7My+wrN51D41GeuhYOKa1dJeZt6NY=
github.com/cockroachdb/pebble v0.0.0-20201001221639-879f3bfeef07/go.mod h1:hU7vhtrqonEphNF+xt8/lHdaBprxmV1h8BOGrd9XwmQ=
@ -200,7 +196,6 @@ github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczC
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/drand/bls12-381 v0.3.2 h1:RImU8Wckmx8XQx1tp1q04OV73J9Tj6mmpQLYDP7V1XE=
github.com/drand/bls12-381 v0.3.2/go.mod h1:dtcLgPtYT38L3NO6mPDYH0nbpc5tjPassDqiniuAt4Y=
github.com/drand/drand v1.2.1 h1:KB7z+69YbnQ5z22AH/LMi0ObDR8DzYmrkS6vZXTR9jI=
github.com/drand/drand v1.2.1/go.mod h1:j0P7RGmVaY7E/OuO2yQOcQj7OgeZCuhgu2gdv0JAm+g=
@ -233,52 +228,47 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 h1:BBso6MBKW8ncyZLv37o+KNyy0HrrHgfnOaGQC2qvN+A=
github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5/go.mod h1:JpoxHjuQauoxiFMl1ie8Xc/7TfLuMZ5eOCONd1sUBHg=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.8.0 h1:5bzFgL+oy7JITMTxUPJ00n7VxmYd/PdMp5mHFX40/RY=
github.com/fatih/color v1.8.0/go.mod h1:3l45GVGkyrnYNl9HoIjnp2NnNWvh6hLAqD8yTfGjnw8=
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fd/go-nat v1.0.0/go.mod h1:BTBu/CKvMmOMUPkKVef1pngt2WFH/lg7E6yQnulfp6E=
github.com/filecoin-project/go-address v0.0.3 h1:eVfbdjEbpbzIrbiSa+PiGUY+oDK9HnUn+M1R/ggoHf8=
github.com/filecoin-project/go-address v0.0.3/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8=
github.com/filecoin-project/go-address v0.0.5 h1:SSaFT/5aLfPXycUlFyemoHYhRgdyXClXCyDdNJKPlDM=
github.com/filecoin-project/go-address v0.0.5/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8=
github.com/filecoin-project/go-amt-ipld/v2 v2.1.0 h1:t6qDiuGYYngDqaLc2ZUvdtAg4UNxPeOYaXhBWSNsVaM=
github.com/filecoin-project/go-amt-ipld/v2 v2.1.0/go.mod h1:nfFPoGyX0CU9SkXX8EoCcSuHN1XcbN0c6KBh7yvP5fs=
github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349 h1:pIuR0dnMD0i+as8wNnjjHyQrnhP5O5bmba/lmgQeRgU=
github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349/go.mod h1:vgmwKBkx+ca5OIeEvstiQgzAZnb7R6QaqE1oEDSqa6g=
github.com/filecoin-project/go-amt-ipld/v3 v3.0.0 h1:Ou/q82QeHGOhpkedvaxxzpBYuqTxLCcj5OChkDNx4qc=
github.com/filecoin-project/go-amt-ipld/v3 v3.0.0/go.mod h1:Qa95YNAbtoVCTSVtX38aAC1ptBnJfPma1R/zZsKmx4o=
github.com/filecoin-project/go-bitfield v0.2.0 h1:gCtLcjskIPtdg4NfN7gQZSQF9yrBQ7mkT0qCJxzGI2Q=
github.com/filecoin-project/go-bitfield v0.2.0/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM=
github.com/filecoin-project/go-bitfield v0.2.3 h1:pedK/7maYF06Z+BYJf2OeFFqIDEh6SP6mIOlLFpYXGs=
github.com/filecoin-project/go-bitfield v0.2.3/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM=
github.com/filecoin-project/go-bitfield v0.2.4 h1:uZ7MeE+XfM5lqrHJZ93OnhQKc/rveW8p9au0C68JPgk=
github.com/filecoin-project/go-bitfield v0.2.4/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM=
github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 h1:av5fw6wmm58FYMgJeoB/lK9XXrgdugYiTqkdxjTy9k8=
github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg=
github.com/filecoin-project/go-commp-utils v0.0.0-20201119054358-b88f7a96a434 h1:0kHszkYP3hgApcjl5x4rpwONhN9+j7XDobf6at5XfHs=
github.com/filecoin-project/go-commp-utils v0.0.0-20201119054358-b88f7a96a434/go.mod h1:6s95K91mCyHY51RPWECZieD3SGWTqIFLf1mPOes9l5U=
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus=
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ=
github.com/filecoin-project/go-data-transfer v1.0.1 h1:5sYKDbstyDsdJpVP4UGUW6+BgCNfgnH8hQgf0E3ZAno=
github.com/filecoin-project/go-data-transfer v1.0.1/go.mod h1:UxvfUAY9v3ub0a21BSK9u3pB2aq30Y0KMsG+w9/ysyo=
github.com/filecoin-project/go-data-transfer v1.2.7 h1:WE5Cpp9eMt5BDoWOVR64QegSn6bwHQaDzyyjVU377Y0=
github.com/filecoin-project/go-data-transfer v1.2.7/go.mod h1:mvjZ+C3NkBX10JP4JMu27DCjUouHFjHwUGh+Xc4yvDA=
github.com/filecoin-project/go-ds-versioning v0.1.0 h1:y/X6UksYTsK8TLCI7rttCKEvl8btmWxyFMEeeWGUxIQ=
github.com/filecoin-project/go-ds-versioning v0.1.0/go.mod h1:mp16rb4i2QPmxBnmanUx8i/XANp+PFCCJWiAb+VW4/s=
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f h1:GxJzR3oRIMTPtpZ0b7QF8FKPK6/iPAc7trhlL5k/g+s=
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ=
github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a h1:hyJ+pUm/4U4RdEZBlg6k8Ma4rDiuvqyGpoICXAxwsTg=
github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ=
github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335/go.mod h1:AJySOJC00JRWEZzRG2KsfUnqEf5ITXxeX09BE9N4f9c=
github.com/filecoin-project/go-fil-markets v1.1.2 h1:5FVdDmF9GvW6Xllql9OGiJXEZjh/tu590BXSQH2W/vU=
github.com/filecoin-project/go-fil-markets v1.1.2/go.mod h1:6oTRaAsHnCqhi3mpZqdvnWIzH6QzHQc4dbhJrI9/BfQ=
github.com/filecoin-project/go-fil-markets v1.1.9 h1:sA0NIEOpy7brZaeXeNgdXg5pvHaBtD5OTRlraOUbI0w=
github.com/filecoin-project/go-fil-markets v1.1.9/go.mod h1:0yQu5gvrjFoAIyzPSSJ+xUdCG83vjInAFbTswIB5/hk=
github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM=
github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24=
github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM=
github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0/go.mod h1:7aWZdaQ1b16BVoQUYR+eEvrDCGJoPLxFpDynFjYfBjI=
github.com/filecoin-project/go-hamt-ipld/v3 v3.0.1 h1:zbzs46G7bOctkZ+JUX3xirrj0RaEsi+27dtlsgrTNBg=
github.com/filecoin-project/go-hamt-ipld/v3 v3.0.1/go.mod h1:gXpNmr3oQx8l3o7qkGyDjJjYSRX7hp/FGOStdqrWyDI=
github.com/filecoin-project/go-jsonrpc v0.1.2 h1:MTebUawBHLxxY9gDi1WXuGc89TWIDmsgoDqeZSk9KRw=
github.com/filecoin-project/go-jsonrpc v0.1.2/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4=
github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec h1:rGI5I7fdU4viManxmDdbk5deZO7afe6L1Wc04dAmlOM=
github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4=
github.com/filecoin-project/go-multistore v0.0.3 h1:vaRBY4YiA2UZFPK57RNuewypB8u0DzzQwqsL0XarpnI=
github.com/filecoin-project/go-multistore v0.0.3/go.mod h1:kaNqCC4IhU4B1uyr7YWFHd23TL4KM32aChS0jNkyUvQ=
github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20 h1:+/4aUeUoKr6AKfPE3mBhXA5spIV6UcKdTYDPNU2Tdmg=
@ -288,10 +278,9 @@ github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261/g
github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I=
github.com/filecoin-project/go-state-types v0.0.0-20200904021452-1883f36ca2f4/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I=
github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc h1:+hbMY4Pcx2oizrfH08VWXwrj5mU8aJT6g0UNxGHFCGU=
github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
github.com/filecoin-project/go-state-types v0.0.0-20210119062722-4adba5aaea71 h1:Cas/CUB4ybYpdxvW7LouaydE16cpwdq3vvS3qgZuU+Q=
github.com/filecoin-project/go-state-types v0.0.0-20210119062722-4adba5aaea71/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
github.com/filecoin-project/go-state-types v0.1.0 h1:9r2HCSMMCmyMfGyMKxQtv0GKp6VT/m5GgVk8EhYbLJU=
github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe h1:dF8u+LEWeIcTcfUcCf3WFVlc81Fr2JKg8zPzIbBDKDw=
github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig=
github.com/filecoin-project/go-statestore v0.1.0 h1:t56reH59843TwXHkMcwyuayStBIiWBRilQjQ+5IiwdQ=
@ -303,12 +292,11 @@ github.com/filecoin-project/specs-actors v0.9.12/go.mod h1:TS1AW/7LbG+615j4NsjMK
github.com/filecoin-project/specs-actors v0.9.13 h1:rUEOQouefi9fuVY/2HOroROJlZbOzWYXXeIh41KF2M4=
github.com/filecoin-project/specs-actors v0.9.13/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao=
github.com/filecoin-project/specs-actors/v2 v2.0.1/go.mod h1:v2NZVYinNIKA9acEMBm5wWXxqv5+frFEbekBFemYghY=
github.com/filecoin-project/specs-actors/v2 v2.3.2 h1:2Vcf4CGa29kRh4JJ02m+FbvD/p3YNnLGsaHfw7Uj49g=
github.com/filecoin-project/specs-actors/v2 v2.3.2/go.mod h1:UuJQLoTx/HPvvWeqlIFmC/ywlOLHNe8SNQ3OunFbu2Y=
github.com/filecoin-project/specs-actors/v2 v2.3.4 h1:NZK2oMCcA71wNsUzDBmLQyRMzcCnX9tDGvwZ53G67j8=
github.com/filecoin-project/specs-actors/v2 v2.3.4/go.mod h1:UuJQLoTx/HPvvWeqlIFmC/ywlOLHNe8SNQ3OunFbu2Y=
github.com/filecoin-project/specs-actors/v3 v3.0.1-0.20210128055125-ab0632b1c8fa h1:J0yyTt9MLDaN0XvzjEAWTCvG6SRVfXc6dVLluvRiOsQ=
github.com/filecoin-project/specs-actors/v3 v3.0.1-0.20210128055125-ab0632b1c8fa/go.mod h1:NL24TPjJGyU7fh1ztpUyYcoZi3TmRKNEI0huPYmhObA=
github.com/filecoin-project/specs-actors/v3 v3.0.3 h1:bq9B1Jnq+Z0A+Yj3KnYhN3kcTpUyP6Umo3MZgai0BRE=
github.com/filecoin-project/specs-actors/v3 v3.0.3/go.mod h1:oMcmEed6B7H/wHabM3RQphTIhq0ibAKsbpYs+bQ/uxQ=
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 h1:Ur/l2+6qN+lQiqjozWWc5p9UDaAMDZKTlDS98oRnlIw=
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g=
github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg=
@ -403,7 +391,6 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
@ -512,8 +499,9 @@ github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMi
github.com/ipfs/go-bitswap v0.3.2 h1:TdKx7lpidYe2dMAKfdeNS26y6Pc/AZX/i8doI1GV210=
github.com/ipfs/go-bitswap v0.3.2/go.mod h1:AyWWfN3moBzQX0banEtfKOfbXb3ZeoOeXnZGNPV9S6w=
github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc=
github.com/ipfs/go-block-format v0.0.2 h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE=
github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY=
github.com/ipfs/go-block-format v0.0.3 h1:r8t66QstRp/pd/or4dpnbVfXT5Gt7lOqRvC+/dDTpMc=
github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk=
github.com/ipfs/go-blockservice v0.0.3/go.mod h1:/NNihwTi6V2Yr6g8wBI+BSwPuURpBRMtYNGrlxZ8KuI=
github.com/ipfs/go-blockservice v0.0.7/go.mod h1:EOfb9k/Y878ZTRY/CH0x5+ATtaipfbRhbvNSdgc/7So=
github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M=
@ -574,7 +562,6 @@ github.com/ipfs/go-graphsync v0.4.2/go.mod h1:/VmbZTUdUMTbNkgzAiCEucIIAU3BkLE2cZ
github.com/ipfs/go-graphsync v0.4.3/go.mod h1:mPOwDYv128gf8gxPFgXnz4fNrSYPsWyqisJ7ych+XDY=
github.com/ipfs/go-graphsync v0.5.2 h1:USD+daaSC+7pLHCxROThSaF6SF7WYXF03sjrta0rCfA=
github.com/ipfs/go-graphsync v0.5.2/go.mod h1:e2ZxnClqBBYAtd901g9vXMJzS47labjAtOzsWtOzKNk=
github.com/ipfs/go-hamt-ipld v0.1.1 h1:0IQdvwnAAUKmDE+PMJa5y1QiwOPHpI9+eAbQEEEYthk=
github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk=
github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08=
github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw=
@ -752,14 +739,12 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk=
github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d h1:68u9r4wEvL3gYg2jvAOgROwZ3H+Y3hIDk4tbbmIjcYQ=
github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
@ -1226,6 +1211,8 @@ github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxzi
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nikkolasg/hexjson v0.0.0-20181101101858-78e39397e00c h1:5bFTChQxSKNwy8ALwOebjekYExl9HTT9urdawqC95tA=
github.com/nikkolasg/hexjson v0.0.0-20181101101858-78e39397e00c/go.mod h1:7qN3Y0BvzRUf4LofcoJplQL10lsFDb4PYlePTVwrP28=
github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229 h1:E2B8qYyeSgv5MXpmzZXRNp8IAQ4vjxIjhpAf5hv/tAg=
@ -1381,7 +1368,6 @@ github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5k
github.com/siebenmann/go-kstat v0.0.0-20160321171754-d34789b79745/go.mod h1:G81aIFAMS9ECrwBYR9YxhlPjWgrItd+Kje78O6+uqm8=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
@ -1426,7 +1412,6 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
@ -1445,7 +1430,6 @@ github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/
github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo=
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
@ -1480,10 +1464,10 @@ github.com/whyrusleeping/cbor-gen v0.0.0-20200723185710-6a3894a6352b/go.mod h1:f
github.com/whyrusleeping/cbor-gen v0.0.0-20200806213330-63aa96ca5488/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ=
github.com/whyrusleeping/cbor-gen v0.0.0-20200810223238-211df3b9e24c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ=
github.com/whyrusleeping/cbor-gen v0.0.0-20200812213548-958ddffe352c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ=
github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163 h1:TtcUeY2XZSriVWR1pXyfCBWIf/NGC2iUdNw1lofUjUU=
github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ=
github.com/whyrusleeping/cbor-gen v0.0.0-20210118024343-169e9d70c0c2 h1:7HzUKl5d/dELS9lLeT4W6YvliZx+s9k/eOOIdHKrA/w=
github.com/whyrusleeping/cbor-gen v0.0.0-20210118024343-169e9d70c0c2/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ=
github.com/whyrusleeping/cbor-gen v0.0.0-20210219115102-f37d292932f2 h1:bsUlNhdmbtlfdLVXAVfuvKQ01RnWAM09TVrJkI7NZs4=
github.com/whyrusleeping/cbor-gen v0.0.0-20210219115102-f37d292932f2/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ=
github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E=
github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8=
github.com/whyrusleeping/go-ctrlnet v0.0.0-20180313164037-f564fbbdaa95/go.mod h1:SJqKCCPXRfBFCwXjfNT/skfsceF7+MBFLI2OrvuRA7g=
@ -1503,8 +1487,8 @@ github.com/whyrusleeping/mdns v0.0.0-20180901202407-ef14215e6b30/go.mod h1:j4l84
github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4=
github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 h1:E9S12nwJwEOXe2d6gT6qxdvqMnNq+VnSsKPgm2ZZNds=
github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI=
github.com/whyrusleeping/pubsub v0.0.0-20131020042734-02de8aa2db3d h1:wnjWu1N8UTNf2zzF5FWlEyNNbNw5GMVHaHaaLdvdTdA=
github.com/whyrusleeping/pubsub v0.0.0-20131020042734-02de8aa2db3d/go.mod h1:g7ckxrjiFh8mi1AY7ox23PZD0g6QU/TxW3U3unX7I3A=
github.com/whyrusleeping/pubsub v0.0.0-20190708150250-92bcb0691325 h1:++Zf4xQ7YrkE81gNHIjVqx5JZsn0nbMeHOkY1ILAIME=
github.com/whyrusleeping/pubsub v0.0.0-20190708150250-92bcb0691325/go.mod h1:g7ckxrjiFh8mi1AY7ox23PZD0g6QU/TxW3U3unX7I3A=
github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee h1:lYbXeSvJi5zk5GLKVuid9TVjS9a0OmLIDKTfoZBL6Ow=
github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:m2aV4LZI4Aez7dP5PMyVKEHhUyEJ/RjmPEDOpDvudHg=
github.com/whyrusleeping/yamux v1.1.5/go.mod h1:E8LnQQ8HKx5KD29HZFUwM1PxCOdPRzGwur1mcYhXcD8=
@ -1545,7 +1529,6 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5 h1:dntmOdLpSpHlVqbW5Eay97DelsZHe+55D+xC6i0dDS0=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
@ -1553,7 +1536,6 @@ go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
@ -1566,11 +1548,9 @@ go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
@ -1706,7 +1686,6 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -1826,7 +1805,6 @@ golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapK
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200711155855-7342f9734a7d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3 h1:r3P/5xOq/dK1991B65Oy6E1fRF/2d/fSYZJ/fXGVfJc=
golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20201112185108-eeaa07dd7696 h1:Bfazo+enXJET5SbHeh95NtxabJF6fJ9r/jpfRJgd3j4=
golang.org/x/tools v0.0.0-20201112185108-eeaa07dd7696/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
@ -1910,12 +1888,12 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk=
gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
@ -1952,12 +1930,9 @@ honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXe
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M=
howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0=
launchpad.net/gocheck v0.0.0-20140225173054-000000000087 h1:Izowp2XBH6Ya6rv+hqbceQyw/gSGoXfH/UPoTGduL54=
launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80Vse0e+BUHsHMTEhd0O4cpUHr/e/BUM=
modernc.org/cc v1.0.0 h1:nPibNuDEx6tvYrUAtvDTTw98rx5juGsa5zuDnKwEEQQ=
modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8=
modernc.org/golex v1.0.0 h1:wWpDlbK8ejRfSyi0frMyhilD3JBvtcx2AdGDnU+JtsE=
modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
modernc.org/golex v1.0.1 h1:EYKY1a3wStt0RzHaH8mdSRNg78Ub0OHxYfCRWw35YtM=
modernc.org/golex v1.0.1/go.mod h1:QCA53QtsT1NdGkaZZkF5ezFwk4IXh4BGNafAARTC254=

View File

@ -202,5 +202,109 @@
"AddVerifiedClient",
"UseBytes",
"RestoreBytes"
],
"fil/3/account": [
"Send",
"Constructor",
"PubkeyAddress"
],
"fil/3/cron": [
"Send",
"Constructor",
"EpochTick"
],
"fil/3/init": [
"Send",
"Constructor",
"Exec"
],
"fil/3/multisig": [
"Send",
"Constructor",
"Propose",
"Approve",
"Cancel",
"AddSigner",
"RemoveSigner",
"SwapSigner",
"ChangeNumApprovalsThreshold",
"LockBalance"
],
"fil/3/paymentchannel": [
"Send",
"Constructor",
"UpdateChannelState",
"Settle",
"Collect"
],
"fil/3/reward": [
"Send",
"Constructor",
"AwardBlockReward",
"ThisEpochReward",
"UpdateNetworkKPI"
],
"fil/3/storagemarket": [
"Send",
"Constructor",
"AddBalance",
"WithdrawBalance",
"PublishStorageDeals",
"VerifyDealsForActivation",
"ActivateDeals",
"OnMinerSectorsTerminate",
"ComputeDataCommitment",
"CronTick"
],
"fil/3/storageminer": [
"Send",
"Constructor",
"ControlAddresses",
"ChangeWorkerAddress",
"ChangePeerID",
"SubmitWindowedPoSt",
"PreCommitSector",
"ProveCommitSector",
"ExtendSectorExpiration",
"TerminateSectors",
"DeclareFaults",
"DeclareFaultsRecovered",
"OnDeferredCronEvent",
"CheckSectorProven",
"ApplyRewards",
"ReportConsensusFault",
"WithdrawBalance",
"ConfirmSectorProofsValid",
"ChangeMultiaddrs",
"CompactPartitions",
"CompactSectorNumbers",
"ConfirmUpdateWorkerKey",
"RepayDebt",
"ChangeOwnerAddress",
"DisputeWindowedPoSt"
],
"fil/3/storagepower": [
"Send",
"Constructor",
"CreateMiner",
"UpdateClaimedPower",
"EnrollCronEvent",
"OnEpochTickEnd",
"UpdatePledgeTotal",
"SubmitPoRepForBulkVerify",
"CurrentTotalPower"
],
"fil/3/system": [
"Send",
"Constructor"
],
"fil/3/verifiedregistry": [
"Send",
"Constructor",
"AddVerifier",
"RemoveVerifier",
"AddVerifiedClient",
"UseBytes",
"RestoreBytes"
]
}

View File

@ -6,11 +6,12 @@ import (
"bytes"
"context"
"github.com/filecoin-project/go-address"
cborutil "github.com/filecoin-project/go-cbor-util"
"github.com/ipfs/go-cid"
"go.uber.org/fx"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
cborutil "github.com/filecoin-project/go-cbor-util"
"github.com/filecoin-project/go-fil-markets/shared"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi"
@ -31,15 +32,16 @@ import (
"github.com/filecoin-project/lotus/lib/sigs"
"github.com/filecoin-project/lotus/markets/utils"
"github.com/filecoin-project/lotus/node/impl/full"
"github.com/filecoin-project/lotus/node/modules/helpers"
)
type ClientNodeAdapter struct {
*clientApi
*apiWrapper
fundmgr *market.FundManager
ev *events.Events
dsMatcher *dealStateMatcher
scMgr *SectorCommittedManager
}
type clientApi struct {
@ -48,16 +50,20 @@ type clientApi struct {
full.MpoolAPI
}
func NewClientNodeAdapter(stateapi full.StateAPI, chain full.ChainAPI, mpool full.MpoolAPI, fundmgr *market.FundManager) storagemarket.StorageClientNode {
func NewClientNodeAdapter(mctx helpers.MetricsCtx, lc fx.Lifecycle, stateapi full.StateAPI, chain full.ChainAPI, mpool full.MpoolAPI, fundmgr *market.FundManager) storagemarket.StorageClientNode {
capi := &clientApi{chain, stateapi, mpool}
return &ClientNodeAdapter{
clientApi: capi,
apiWrapper: &apiWrapper{api: capi},
ctx := helpers.LifecycleCtx(mctx, lc)
ev := events.NewEvents(ctx, capi)
a := &ClientNodeAdapter{
clientApi: capi,
fundmgr: fundmgr,
ev: events.NewEvents(context.TODO(), capi),
ev: ev,
dsMatcher: newDealStateMatcher(state.NewStatePredicates(state.WrapFastAPI(capi))),
}
a.scMgr = NewSectorCommittedManager(ev, a, &apiWrapper{api: capi})
return a
}
func (c *ClientNodeAdapter) ListStorageProviders(ctx context.Context, encodedTs shared.TipSetToken) ([]*storagemarket.StorageProviderInfo, error) {
@ -135,6 +141,7 @@ func (c *ClientNodeAdapter) GetBalance(ctx context.Context, addr address.Address
// ValidatePublishedDeal validates that the provided deal has appeared on chain and references the same ClientDeal
// returns the Deal id if there is no error
// TODO: Don't return deal ID
func (c *ClientNodeAdapter) ValidatePublishedDeal(ctx context.Context, deal storagemarket.ClientDeal) (abi.DealID, error) {
log.Infow("DEAL ACCEPTED!")
@ -216,14 +223,17 @@ func (c *ClientNodeAdapter) DealProviderCollateralBounds(ctx context.Context, si
return big.Mul(bounds.Min, big.NewInt(clientOverestimation)), bounds.Max, nil
}
// TODO: Remove dealID parameter, change publishCid to be cid.Cid (instead of pointer)
func (c *ClientNodeAdapter) OnDealSectorPreCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, proposal market2.DealProposal, publishCid *cid.Cid, cb storagemarket.DealSectorPreCommittedCallback) error {
return OnDealSectorPreCommitted(ctx, c, c.ev, provider, dealID, marketactor.DealProposal(proposal), publishCid, cb)
return c.scMgr.OnDealSectorPreCommitted(ctx, provider, marketactor.DealProposal(proposal), *publishCid, cb)
}
// TODO: Remove dealID parameter, change publishCid to be cid.Cid (instead of pointer)
func (c *ClientNodeAdapter) OnDealSectorCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, sectorNumber abi.SectorNumber, proposal market2.DealProposal, publishCid *cid.Cid, cb storagemarket.DealSectorCommittedCallback) error {
return OnDealSectorCommitted(ctx, c, c.ev, provider, dealID, sectorNumber, marketactor.DealProposal(proposal), publishCid, cb)
return c.scMgr.OnDealSectorCommitted(ctx, provider, sectorNumber, marketactor.DealProposal(proposal), *publishCid, cb)
}
// TODO: Replace dealID parameter with DealProposal
func (c *ClientNodeAdapter) OnDealExpiredOrSlashed(ctx context.Context, dealID abi.DealID, onDealExpired storagemarket.DealExpiredCallback, onDealSlashed storagemarket.DealSlashedCallback) error {
head, err := c.ChainHead(ctx)
if err != nil {

View File

@ -0,0 +1,380 @@
package storageadapter
import (
"context"
"fmt"
"strings"
"sync"
"time"
"go.uber.org/fx"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/node/config"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types"
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
)
type dealPublisherAPI interface {
ChainHead(context.Context) (*types.TipSet, error)
MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error)
StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error)
}
// DealPublisher batches deal publishing so that many deals can be included in
// a single publish message. This saves gas for miners that publish deals
// frequently.
// When a deal is submitted, the DealPublisher waits a configurable amount of
// time for other deals to be submitted before sending the publish message.
// There is a configurable maximum number of deals that can be included in one
// message. When the limit is reached the DealPublisher immediately submits a
// publish message with all deals in the queue.
type DealPublisher struct {
api dealPublisherAPI
ctx context.Context
Shutdown context.CancelFunc
maxDealsPerPublishMsg uint64
publishPeriod time.Duration
publishSpec *api.MessageSendSpec
lk sync.Mutex
pending []*pendingDeal
cancelWaitForMoreDeals context.CancelFunc
publishPeriodStart time.Time
}
// A deal that is queued to be published
type pendingDeal struct {
ctx context.Context
deal market2.ClientDealProposal
Result chan publishResult
}
// The result of publishing a deal
type publishResult struct {
msgCid cid.Cid
err error
}
func newPendingDeal(ctx context.Context, deal market2.ClientDealProposal) *pendingDeal {
return &pendingDeal{
ctx: ctx,
deal: deal,
Result: make(chan publishResult),
}
}
type PublishMsgConfig struct {
// The amount of time to wait for more deals to arrive before
// publishing
Period time.Duration
// The maximum number of deals to include in a single PublishStorageDeals
// message
MaxDealsPerMsg uint64
}
func NewDealPublisher(
feeConfig *config.MinerFeeConfig,
publishMsgCfg PublishMsgConfig,
) func(lc fx.Lifecycle, full api.FullNode) *DealPublisher {
return func(lc fx.Lifecycle, full api.FullNode) *DealPublisher {
maxFee := abi.NewTokenAmount(0)
if feeConfig != nil {
maxFee = abi.TokenAmount(feeConfig.MaxPublishDealsFee)
}
publishSpec := &api.MessageSendSpec{MaxFee: maxFee}
dp := newDealPublisher(full, publishMsgCfg, publishSpec)
lc.Append(fx.Hook{
OnStop: func(ctx context.Context) error {
dp.Shutdown()
return nil
},
})
return dp
}
}
func newDealPublisher(
dpapi dealPublisherAPI,
publishMsgCfg PublishMsgConfig,
publishSpec *api.MessageSendSpec,
) *DealPublisher {
ctx, cancel := context.WithCancel(context.Background())
return &DealPublisher{
api: dpapi,
ctx: ctx,
Shutdown: cancel,
maxDealsPerPublishMsg: publishMsgCfg.MaxDealsPerMsg,
publishPeriod: publishMsgCfg.Period,
publishSpec: publishSpec,
}
}
// PendingDeals returns the list of deals that are queued up to be published
func (p *DealPublisher) PendingDeals() api.PendingDealInfo {
p.lk.Lock()
defer p.lk.Unlock()
// Filter out deals whose context has been cancelled
deals := make([]*pendingDeal, 0, len(p.pending))
for _, dl := range p.pending {
if dl.ctx.Err() == nil {
deals = append(deals, dl)
}
}
pending := make([]market2.ClientDealProposal, len(deals))
for i, deal := range deals {
pending[i] = deal.deal
}
return api.PendingDealInfo{
Deals: pending,
PublishPeriodStart: p.publishPeriodStart,
PublishPeriod: p.publishPeriod,
}
}
// ForcePublishPendingDeals publishes all pending deals without waiting for
// the publish period to elapse
func (p *DealPublisher) ForcePublishPendingDeals() {
p.lk.Lock()
defer p.lk.Unlock()
log.Infof("force publishing deals")
p.publishAllDeals()
}
func (p *DealPublisher) Publish(ctx context.Context, deal market2.ClientDealProposal) (cid.Cid, error) {
pdeal := newPendingDeal(ctx, deal)
// Add the deal to the queue
p.processNewDeal(pdeal)
// Wait for the deal to be submitted
select {
case <-ctx.Done():
return cid.Undef, ctx.Err()
case res := <-pdeal.Result:
return res.msgCid, res.err
}
}
func (p *DealPublisher) processNewDeal(pdeal *pendingDeal) {
p.lk.Lock()
defer p.lk.Unlock()
// Filter out any cancelled deals
p.filterCancelledDeals()
// If all deals have been cancelled, clear the wait-for-deals timer
if len(p.pending) == 0 && p.cancelWaitForMoreDeals != nil {
p.cancelWaitForMoreDeals()
p.cancelWaitForMoreDeals = nil
}
// Make sure the new deal hasn't been cancelled
if pdeal.ctx.Err() != nil {
return
}
// Add the new deal to the queue
p.pending = append(p.pending, pdeal)
log.Infof("add deal with piece CID %s to publish deals queue - %d deals in queue (max queue size %d)",
pdeal.deal.Proposal.PieceCID, len(p.pending), p.maxDealsPerPublishMsg)
// If the maximum number of deals per message has been reached,
// send a publish message
if uint64(len(p.pending)) >= p.maxDealsPerPublishMsg {
log.Infof("publish deals queue has reached max size of %d, publishing deals", p.maxDealsPerPublishMsg)
p.publishAllDeals()
return
}
// Otherwise wait for more deals to arrive or the timeout to be reached
p.waitForMoreDeals()
}
func (p *DealPublisher) waitForMoreDeals() {
// Check if we're already waiting for deals
if !p.publishPeriodStart.IsZero() {
elapsed := time.Since(p.publishPeriodStart)
log.Infof("%s elapsed of / %s until publish deals queue is published",
elapsed, p.publishPeriod)
return
}
// Set a timeout to wait for more deals to arrive
log.Infof("waiting publish deals queue period of %s before publishing", p.publishPeriod)
ctx, cancel := context.WithCancel(p.ctx)
p.publishPeriodStart = time.Now()
p.cancelWaitForMoreDeals = cancel
go func() {
timer := time.NewTimer(p.publishPeriod)
select {
case <-ctx.Done():
timer.Stop()
case <-timer.C:
p.lk.Lock()
defer p.lk.Unlock()
// The timeout has expired so publish all pending deals
log.Infof("publish deals queue period of %s has expired, publishing deals", p.publishPeriod)
p.publishAllDeals()
}
}()
}
func (p *DealPublisher) publishAllDeals() {
// If the timeout hasn't yet been cancelled, cancel it
if p.cancelWaitForMoreDeals != nil {
p.cancelWaitForMoreDeals()
p.cancelWaitForMoreDeals = nil
p.publishPeriodStart = time.Time{}
}
// Filter out any deals that have been cancelled
p.filterCancelledDeals()
deals := p.pending[:]
p.pending = nil
// Send the publish message
go p.publishReady(deals)
}
func (p *DealPublisher) publishReady(ready []*pendingDeal) {
if len(ready) == 0 {
return
}
// onComplete is called when the publish message has been sent or there
// was an error
onComplete := func(pd *pendingDeal, msgCid cid.Cid, err error) {
// Send the publish result on the pending deal's Result channel
res := publishResult{
msgCid: msgCid,
err: err,
}
select {
case <-p.ctx.Done():
case <-pd.ctx.Done():
case pd.Result <- res:
}
}
// Validate each deal to make sure it can be published
validated := make([]*pendingDeal, 0, len(ready))
deals := make([]market2.ClientDealProposal, 0, len(ready))
for _, pd := range ready {
// Validate the deal
if err := p.validateDeal(pd.deal); err != nil {
// Validation failed, complete immediately with an error
go onComplete(pd, cid.Undef, err)
continue
}
validated = append(validated, pd)
deals = append(deals, pd.deal)
}
// Send the publish message
msgCid, err := p.publishDealProposals(deals)
// Signal that each deal has been published
for _, pd := range validated {
go onComplete(pd, msgCid, err)
}
}
// validateDeal checks that the deal proposal start epoch hasn't already
// elapsed
func (p *DealPublisher) validateDeal(deal market2.ClientDealProposal) error {
head, err := p.api.ChainHead(p.ctx)
if err != nil {
return err
}
if head.Height() > deal.Proposal.StartEpoch {
return xerrors.Errorf(
"cannot publish deal with piece CID %s: current epoch %d has passed deal proposal start epoch %d",
deal.Proposal.PieceCID, head.Height(), deal.Proposal.StartEpoch)
}
return nil
}
// Sends the publish message
func (p *DealPublisher) publishDealProposals(deals []market2.ClientDealProposal) (cid.Cid, error) {
if len(deals) == 0 {
return cid.Undef, nil
}
log.Infof("publishing %d deals in publish deals queue with piece CIDs: %s", len(deals), pieceCids(deals))
provider := deals[0].Proposal.Provider
for _, dl := range deals {
if dl.Proposal.Provider != provider {
msg := fmt.Sprintf("publishing %d deals failed: ", len(deals)) +
"not all deals are for same provider: " +
fmt.Sprintf("deal with piece CID %s is for provider %s ", deals[0].Proposal.PieceCID, deals[0].Proposal.Provider) +
fmt.Sprintf("but deal with piece CID %s is for provider %s", dl.Proposal.PieceCID, dl.Proposal.Provider)
return cid.Undef, xerrors.Errorf(msg)
}
}
mi, err := p.api.StateMinerInfo(p.ctx, provider, types.EmptyTSK)
if err != nil {
return cid.Undef, err
}
params, err := actors.SerializeParams(&market2.PublishStorageDealsParams{
Deals: deals,
})
if err != nil {
return cid.Undef, xerrors.Errorf("serializing PublishStorageDeals params failed: %w", err)
}
smsg, err := p.api.MpoolPushMessage(p.ctx, &types.Message{
To: market.Address,
From: mi.Worker,
Value: types.NewInt(0),
Method: market.Methods.PublishStorageDeals,
Params: params,
}, p.publishSpec)
if err != nil {
return cid.Undef, err
}
return smsg.Cid(), nil
}
func pieceCids(deals []market2.ClientDealProposal) string {
cids := make([]string, 0, len(deals))
for _, dl := range deals {
cids = append(cids, dl.Proposal.PieceCID.String())
}
return strings.Join(cids, ", ")
}
// filter out deals that have been cancelled
func (p *DealPublisher) filterCancelledDeals() {
i := 0
for _, pd := range p.pending {
if pd.ctx.Err() == nil {
p.pending[i] = pd
i++
}
}
p.pending = p.pending[:i]
}

Some files were not shown because too many files have changed in this diff Show More