Merge pull request #2 from RobQuistNL/patch-2

Update makefile
This commit is contained in:
Rob Quist 2020-06-26 02:10:53 +02:00 committed by GitHub
commit 3d3e384d5c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
73 changed files with 1991 additions and 544 deletions

View File

@ -314,7 +314,7 @@ workflows:
ci: ci:
jobs: jobs:
- lint-changes: - lint-changes:
args: "--new-from-rev origin/next" args: "--new-from-rev origin/master"
- mod-tidy-check - mod-tidy-check
- gofmt - gofmt
- test: - test:

View File

@ -6,7 +6,7 @@ all: build
unexport GOFLAGS unexport GOFLAGS
GOVERSION:=$(shell go version | cut -d' ' -f 3 | cut -d. -f 2) GOVERSION:=$(shell go version | cut -d' ' -f 3 | cut -d. -f 2)
ifeq ($(shell expr $(GOVERSION) \< 13), 1) ifeq ($(shell expr $(GOVERSION) \< 14), 1)
$(warning Your Golang version is go 1.$(GOVERSION)) $(warning Your Golang version is go 1.$(GOVERSION))
$(error Update Golang to version $(shell grep '^go' go.mod)) $(error Update Golang to version $(shell grep '^go' go.mod))
endif endif
@ -105,15 +105,17 @@ install:
install-services: install install-services: install
mkdir -p /usr/local/lib/systemd/system mkdir -p /usr/local/lib/systemd/system
mkdir -p /var/log/lotus
install -C -m 0644 ./scripts/lotus-daemon.service /usr/local/lib/systemd/system/lotus-daemon.service install -C -m 0644 ./scripts/lotus-daemon.service /usr/local/lib/systemd/system/lotus-daemon.service
install -C -m 0644 ./scripts/lotus-miner.service /usr/local/lib/systemd/system/lotus-miner.service install -C -m 0644 ./scripts/lotus-miner.service /usr/local/lib/systemd/system/lotus-miner.service
systemctl daemon-reload systemctl daemon-reload
@echo @echo
@echo "lotus and lotus-miner services installed. Don't forget to 'systemctl enable lotus|lotus-miner' for it to be enabled on startup." @echo "lotus-daemon and lotus-miner services installed. Don't forget to 'systemctl enable lotus-daemon|lotus-miner' for it to be enabled on startup."
clean-services: clean-services:
rm -f /usr/local/lib/systemd/system/lotus-daemon.service rm -f /usr/local/lib/systemd/system/lotus-daemon.service
rm -f /usr/local/lib/systemd/system/lotus-miner.service rm -f /usr/local/lib/systemd/system/lotus-miner.service
rm -f /usr/local/lib/systemd/system/chainwatch.service
systemctl daemon-reload systemctl daemon-reload
# TOOLS # TOOLS
@ -160,6 +162,13 @@ chainwatch:
.PHONY: chainwatch .PHONY: chainwatch
BINS+=chainwatch BINS+=chainwatch
install-chainwatch-service: chainwatch
install -C ./chainwatch /usr/local/bin/chainwatch
install -C -m 0644 ./scripts/chainwatch.service /usr/local/lib/systemd/system/chainwatch.service
systemctl daemon-reload
@echo
@echo "chainwatch installed. Don't forget to 'systemctl enable chainwatch' for it to be enabled on startup."
bench: bench:
rm -f bench rm -f bench
go build -o bench ./cmd/lotus-bench go build -o bench ./cmd/lotus-bench

View File

@ -8,6 +8,10 @@ Lotus is an implementation of the Filecoin Distributed Storage Network. For more
For instructions on how to build lotus from source, please visit [https://docs.lotu.sh](https://docs.lotu.sh) or read the source [here](https://github.com/filecoin-project/lotus/tree/master/documentation). For instructions on how to build lotus from source, please visit [https://docs.lotu.sh](https://docs.lotu.sh) or read the source [here](https://github.com/filecoin-project/lotus/tree/master/documentation).
## Reporting a Vulnerability
Please send an email to security@filecoin.org. See our [security policy](SECURITY.md) for more details.
## Development ## Development
All work is tracked via issues. An attempt at keeping an up-to-date view on remaining work is in the [lotus testnet github project board](https://github.com/filecoin-project/lotus/projects/1). All work is tracked via issues. An attempt at keeping an up-to-date view on remaining work is in the [lotus testnet github project board](https://github.com/filecoin-project/lotus/projects/1).

29
SECURITY.md Normal file
View File

@ -0,0 +1,29 @@
# Security Policy
## Reporting a Vulnerability
For *critical* bugs, please send an email to security@filecoin.org.
The bug reporting process differs between bugs that are critical and may crash the network, and others that are unlikely to cause problems if malicious parties know about it. For non-critical bugs, please simply file a GitHub [issue](https://github.com/filecoin-project/lotus/issues/new?template=bug_report.md).
Please try to provide a clear description of any bugs reported, along with how to reproduce the bug if possible. More detailed bug reports (especially those with a PoC included) will help us move forward much faster. Additionally, please avoid reporting bugs that already have open issues. Take a moment to search the issue list of the related GitHub repositories before writing up a new report.
Here are some examples of bugs we would consider 'critical':
* If you can spend from a `multisig` wallet you do not control the keys for.
* If you can cause a miner to be slashed without them actually misbehaving.
* If you can maintain power without submitting windowed posts regularly.
* If you can craft a message that causes lotus nodes to panic.
* If you can cause your miner to win significantly more blocks than it should.
* If you can craft a message that causes a persistent fork in the network.
* If you can cause the total amount of Filecoin in the network to no longer be 2 billion.
This is not an exhaustive list, but should provide some idea of what we consider 'critical'.
## Supported Versions
* TODO: This should be defined and set up by Mainnet launch.
| Version | Supported |
| ------- | ------------------ |
| Testnet | :white_check_mark: |

View File

@ -13,11 +13,13 @@ import (
) )
type Common interface { type Common interface {
// Auth
// MethodGroup: Auth
AuthVerify(ctx context.Context, token string) ([]auth.Permission, error) AuthVerify(ctx context.Context, token string) ([]auth.Permission, error)
AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error) AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error)
// network // MethodGroup: Net
NetConnectedness(context.Context, peer.ID) (network.Connectedness, error) NetConnectedness(context.Context, peer.ID) (network.Connectedness, error)
NetPeers(context.Context) ([]peer.AddrInfo, error) NetPeers(context.Context) ([]peer.AddrInfo, error)
@ -27,6 +29,8 @@ type Common interface {
NetFindPeer(context.Context, peer.ID) (peer.AddrInfo, error) NetFindPeer(context.Context, peer.ID) (peer.AddrInfo, error)
NetPubsubScores(context.Context) ([]PubsubScore, error) NetPubsubScores(context.Context) ([]PubsubScore, error)
// MethodGroup: Common
// ID returns peerID of libp2p node backing this API // ID returns peerID of libp2p node backing this API
ID(context.Context) (peer.ID, error) ID(context.Context) (peer.ID, error)
@ -38,6 +42,8 @@ type Common interface {
// trigger graceful shutdown // trigger graceful shutdown
Shutdown(context.Context) error Shutdown(context.Context) error
Closing(context.Context) (<-chan struct{}, error)
} }
// Version provides various build-time information // Version provides various build-time information

View File

@ -35,26 +35,71 @@ type FullNode interface {
// ChainNotify returns channel with chain head updates // ChainNotify returns channel with chain head updates
// First message is guaranteed to be of len == 1, and type == 'current' // First message is guaranteed to be of len == 1, and type == 'current'
ChainNotify(context.Context) (<-chan []*HeadChange, error) ChainNotify(context.Context) (<-chan []*HeadChange, error)
// ChainHead returns the current head of the chain // ChainHead returns the current head of the chain
ChainHead(context.Context) (*types.TipSet, error) ChainHead(context.Context) (*types.TipSet, error)
// ChainGetRandomness is used to sample the chain for randomness // ChainGetRandomness is used to sample the chain for randomness
ChainGetRandomness(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) ChainGetRandomness(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error)
// ChainGetBlock returns the block specified by the given CID // ChainGetBlock returns the block specified by the given CID
ChainGetBlock(context.Context, cid.Cid) (*types.BlockHeader, error) ChainGetBlock(context.Context, cid.Cid) (*types.BlockHeader, error)
ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error)
ChainGetBlockMessages(context.Context, cid.Cid) (*BlockMessages, error)
ChainGetParentReceipts(context.Context, cid.Cid) ([]*types.MessageReceipt, error) // ChainGetBlockMessages returns messages stored in the specified block
ChainGetParentMessages(context.Context, cid.Cid) ([]Message, error) ChainGetBlockMessages(ctx context.Context, blockCid cid.Cid) (*BlockMessages, error)
// ChainGetParentReceipts returns receipts for messages in parent tipset of
// the specified block
ChainGetParentReceipts(ctx context.Context, blockCid cid.Cid) ([]*types.MessageReceipt, error)
// ChainGetParentReceipts returns messages stored in parent tipset of the
// specified block
ChainGetParentMessages(ctx context.Context, blockCid cid.Cid) ([]Message, error)
// ChainGetTipSetByHeight looks back for a tipset at the specified epoch.
// If there are no blocks at the specified epoch, a tipset at higher epoch
// will be returned
ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error)
// ChainReadObj reads ipld nodes referenced by the specified CID from chain
// blockstore and returns raw bytes
ChainReadObj(context.Context, cid.Cid) ([]byte, error) ChainReadObj(context.Context, cid.Cid) ([]byte, error)
// ChainHasObj checks if a given CID exists in the chain blockstore
ChainHasObj(context.Context, cid.Cid) (bool, error) ChainHasObj(context.Context, cid.Cid) (bool, error)
ChainStatObj(context.Context, cid.Cid, cid.Cid) (ObjStat, error) ChainStatObj(context.Context, cid.Cid, cid.Cid) (ObjStat, error)
// ChainSetHead forcefully sets current chain head. Use with caution
ChainSetHead(context.Context, types.TipSetKey) error ChainSetHead(context.Context, types.TipSetKey) error
// ChainGetGenesis returns the genesis tipset
ChainGetGenesis(context.Context) (*types.TipSet, error) ChainGetGenesis(context.Context) (*types.TipSet, error)
// ChainTipSetWeight computes weight for the specified tipset
ChainTipSetWeight(context.Context, types.TipSetKey) (types.BigInt, error) ChainTipSetWeight(context.Context, types.TipSetKey) (types.BigInt, error)
ChainGetNode(ctx context.Context, p string) (*IpldObject, error) ChainGetNode(ctx context.Context, p string) (*IpldObject, error)
// ChainGetMessage reads a message referenced by the specified CID from the
// chain blockstore
ChainGetMessage(context.Context, cid.Cid) (*types.Message, error) ChainGetMessage(context.Context, cid.Cid) (*types.Message, error)
// ChainGetPath returns a set of revert/apply operations needed to get from
// one tipset to another, for example:
//```
// to
// ^
// from tAA
// ^ ^
// tBA tAB
// ^---*--^
// ^
// tRR
//```
// Would return `[revert(tBA), apply(tAB), apply(tAA)]`
ChainGetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*HeadChange, error) ChainGetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*HeadChange, error)
// ChainExport returns a stream of bytes with CAR dump of chain data
ChainExport(context.Context, types.TipSetKey) (<-chan []byte, error) ChainExport(context.Context, types.TipSetKey) (<-chan []byte, error)
// MethodGroup: Sync // MethodGroup: Sync
@ -63,23 +108,45 @@ type FullNode interface {
// SyncState returns the current status of the lotus sync system // SyncState returns the current status of the lotus sync system
SyncState(context.Context) (*SyncState, error) SyncState(context.Context) (*SyncState, error)
// SyncSubmitBlock can be used to submit a newly created block to the // SyncSubmitBlock can be used to submit a newly created block to the
// network through this node // network through this node
SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) error SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) error
// SyncIncomingBlocks returns a channel streaming incoming, potentially not
// yet synced block headers.
SyncIncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error) SyncIncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error)
// SyncMarkBad marks a blocks as bad, meaning that it won't ever by synced.
// Use with extreme caution
SyncMarkBad(ctx context.Context, bcid cid.Cid) error SyncMarkBad(ctx context.Context, bcid cid.Cid) error
// SyncCheckBad checks if a block was marked as bad, and if it was, returns
// the reason
SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error) SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error)
// MethodGroup: Mpool // MethodGroup: Mpool
// The Mpool methods are for interacting with the message pool. The message pool // The Mpool methods are for interacting with the message pool. The message pool
// manages all incoming and outgoing 'messages' going over the network. // manages all incoming and outgoing 'messages' going over the network.
// MpoolPending returns pending mempool messages
MpoolPending(context.Context, types.TipSetKey) ([]*types.SignedMessage, error) MpoolPending(context.Context, types.TipSetKey) ([]*types.SignedMessage, error)
// MpoolPush pushes a signed message to mempool
MpoolPush(context.Context, *types.SignedMessage) (cid.Cid, error) MpoolPush(context.Context, *types.SignedMessage) (cid.Cid, error)
MpoolPushMessage(context.Context, *types.Message) (*types.SignedMessage, error) // get nonce, sign, push
// MpoolPushMessage atomically assigns a nonce, signs, and pushes a message
// to mempool
MpoolPushMessage(context.Context, *types.Message) (*types.SignedMessage, error)
// MpoolGetNonce gets next nonce for the specified sender.
// Note that this method may not be atomic. Use MpoolPushMessage instead
MpoolGetNonce(context.Context, address.Address) (uint64, error) MpoolGetNonce(context.Context, address.Address) (uint64, error)
MpoolSub(context.Context) (<-chan MpoolUpdate, error) MpoolSub(context.Context) (<-chan MpoolUpdate, error)
MpoolEstimateGasPrice(context.Context, uint64, address.Address, int64, types.TipSetKey) (types.BigInt, error)
// MpoolEstimateGasPrice estimates what gas price should be used for a
// message to have high likelihood of inclusion in `nblocksincl` epochs
MpoolEstimateGasPrice(ctx context.Context, nblocksincl uint64, sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error)
// MethodGroup: Miner // MethodGroup: Miner
@ -118,6 +185,7 @@ type FullNode interface {
ClientListDeals(ctx context.Context) ([]DealInfo, error) ClientListDeals(ctx context.Context) ([]DealInfo, error)
ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error) ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error)
ClientFindData(ctx context.Context, root cid.Cid) ([]QueryOffer, error) ClientFindData(ctx context.Context, root cid.Cid) ([]QueryOffer, error)
ClientMinerQueryOffer(ctx context.Context, root cid.Cid, miner address.Address) (QueryOffer, error)
ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *FileRef) error ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *FileRef) error
ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.SignedStorageAsk, error) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.SignedStorageAsk, error)
ClientCalcCommP(ctx context.Context, inpath string, miner address.Address) (*CommPRet, error) ClientCalcCommP(ctx context.Context, inpath string, miner address.Address) (*CommPRet, error)

View File

@ -36,6 +36,7 @@ type StorageMiner interface {
SectorsRefs(context.Context) (map[string][]SealedRef, error) SectorsRefs(context.Context) (map[string][]SealedRef, error)
SectorsUpdate(context.Context, abi.SectorNumber, SectorState) error SectorsUpdate(context.Context, abi.SectorNumber, SectorState) error
SectorRemove(context.Context, abi.SectorNumber) error
StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error)
StorageLocal(ctx context.Context) (map[stores.ID]string, error) StorageLocal(ctx context.Context) (map[stores.ID]string, error)
@ -50,11 +51,15 @@ type StorageMiner interface {
MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error
MarketListDeals(ctx context.Context) ([]storagemarket.StorageDeal, error) MarketListDeals(ctx context.Context) ([]storagemarket.StorageDeal, error)
MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error) MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error)
MarketSetPrice(context.Context, types.BigInt) error MarketSetAsk(ctx context.Context, price types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error
MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error)
DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error
DealsList(ctx context.Context) ([]storagemarket.StorageDeal, error) DealsList(ctx context.Context) ([]storagemarket.StorageDeal, error)
DealsSetAcceptingStorageDeals(context.Context, bool) error DealsSetAcceptingStorageDeals(context.Context, bool) error
DealsSetAcceptingRetrievalDeals(context.Context, bool) error
DealsPieceCidBlocklist(context.Context) ([]cid.Cid, error)
DealsSetPieceCidBlocklist(context.Context, []cid.Cid) error
StorageAddLocal(ctx context.Context, path string) error StorageAddLocal(ctx context.Context, path string) error
} }

View File

@ -51,6 +51,7 @@ type CommonStruct struct {
LogSetLevel func(context.Context, string, string) error `perm:"write"` LogSetLevel func(context.Context, string, string) error `perm:"write"`
Shutdown func(context.Context) error `perm:"admin"` Shutdown func(context.Context) error `perm:"admin"`
Closing func(context.Context) (<-chan struct{}, error) `perm:"read"`
} }
} }
@ -112,6 +113,7 @@ type FullNodeStruct struct {
ClientListImports func(ctx context.Context) ([]api.Import, error) `perm:"write"` ClientListImports func(ctx context.Context) ([]api.Import, error) `perm:"write"`
ClientHasLocal func(ctx context.Context, root cid.Cid) (bool, error) `perm:"write"` ClientHasLocal func(ctx context.Context, root cid.Cid) (bool, error) `perm:"write"`
ClientFindData func(ctx context.Context, root cid.Cid) ([]api.QueryOffer, error) `perm:"read"` ClientFindData func(ctx context.Context, root cid.Cid) ([]api.QueryOffer, error) `perm:"read"`
ClientMinerQueryOffer func(ctx context.Context, root cid.Cid, miner address.Address) (api.QueryOffer, error) `perm:"read"`
ClientStartDeal func(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) `perm:"admin"` ClientStartDeal func(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) `perm:"admin"`
ClientGetDealInfo func(context.Context, cid.Cid) (*api.DealInfo, error) `perm:"read"` ClientGetDealInfo func(context.Context, cid.Cid) (*api.DealInfo, error) `perm:"read"`
ClientListDeals func(ctx context.Context) ([]api.DealInfo, error) `perm:"write"` ClientListDeals func(ctx context.Context) ([]api.DealInfo, error) `perm:"write"`
@ -195,7 +197,8 @@ type StorageMinerStruct struct {
MarketImportDealData func(context.Context, cid.Cid, string) error `perm:"write"` MarketImportDealData func(context.Context, cid.Cid, string) error `perm:"write"`
MarketListDeals func(ctx context.Context) ([]storagemarket.StorageDeal, error) `perm:"read"` MarketListDeals func(ctx context.Context) ([]storagemarket.StorageDeal, error) `perm:"read"`
MarketListIncompleteDeals func(ctx context.Context) ([]storagemarket.MinerDeal, error) `perm:"read"` MarketListIncompleteDeals func(ctx context.Context) ([]storagemarket.MinerDeal, error) `perm:"read"`
MarketSetPrice func(context.Context, types.BigInt) error `perm:"admin"` MarketSetAsk func(ctx context.Context, price types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error `perm:"admin"`
MarketGetAsk func(ctx context.Context) (*storagemarket.SignedStorageAsk, error) `perm:"read"`
PledgeSector func(context.Context) error `perm:"write"` PledgeSector func(context.Context) error `perm:"write"`
@ -203,6 +206,7 @@ type StorageMinerStruct struct {
SectorsList func(context.Context) ([]abi.SectorNumber, error) `perm:"read"` SectorsList func(context.Context) ([]abi.SectorNumber, error) `perm:"read"`
SectorsRefs func(context.Context) (map[string][]api.SealedRef, error) `perm:"read"` SectorsRefs func(context.Context) (map[string][]api.SealedRef, error) `perm:"read"`
SectorsUpdate func(context.Context, abi.SectorNumber, api.SectorState) error `perm:"write"` SectorsUpdate func(context.Context, abi.SectorNumber, api.SectorState) error `perm:"write"`
SectorRemove func(context.Context, abi.SectorNumber) error `perm:"admin"`
WorkerConnect func(context.Context, string) error `perm:"admin"` // TODO: worker perm WorkerConnect func(context.Context, string) error `perm:"admin"` // TODO: worker perm
WorkerStats func(context.Context) (map[uint64]storiface.WorkerStats, error) `perm:"admin"` WorkerStats func(context.Context) (map[uint64]storiface.WorkerStats, error) `perm:"admin"`
@ -223,6 +227,9 @@ type StorageMinerStruct struct {
DealsImportData func(ctx context.Context, dealPropCid cid.Cid, file string) error `perm:"write"` DealsImportData func(ctx context.Context, dealPropCid cid.Cid, file string) error `perm:"write"`
DealsList func(ctx context.Context) ([]storagemarket.StorageDeal, error) `perm:"read"` DealsList func(ctx context.Context) ([]storagemarket.StorageDeal, error) `perm:"read"`
DealsSetAcceptingStorageDeals func(context.Context, bool) error `perm:"admin"` DealsSetAcceptingStorageDeals func(context.Context, bool) error `perm:"admin"`
DealsSetAcceptingRetrievalDeals func(context.Context, bool) error `perm:"admin"`
DealsPieceCidBlocklist func(context.Context) ([]cid.Cid, error) `perm:"admin"`
DealsSetPieceCidBlocklist func(context.Context, []cid.Cid) error `perm:"read"`
StorageAddLocal func(ctx context.Context, path string) error `perm:"admin"` StorageAddLocal func(ctx context.Context, path string) error `perm:"admin"`
} }
@ -242,7 +249,9 @@ type WorkerStruct struct {
SealPreCommit2 func(context.Context, abi.SectorID, storage.PreCommit1Out) (cids storage.SectorCids, err error) `perm:"admin"` SealPreCommit2 func(context.Context, abi.SectorID, storage.PreCommit1Out) (cids storage.SectorCids, err error) `perm:"admin"`
SealCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) `perm:"admin"` SealCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) `perm:"admin"`
SealCommit2 func(context.Context, abi.SectorID, storage.Commit1Out) (storage.Proof, error) `perm:"admin"` SealCommit2 func(context.Context, abi.SectorID, storage.Commit1Out) (storage.Proof, error) `perm:"admin"`
FinalizeSector func(context.Context, abi.SectorID) error `perm:"admin"` FinalizeSector func(context.Context, abi.SectorID, []storage.Range) error `perm:"admin"`
ReleaseUnsealed func(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error `perm:"admin"`
Remove func(ctx context.Context, sector abi.SectorID) error `perm:"admin"`
MoveStorage func(ctx context.Context, sector abi.SectorID) error `perm:"admin"` MoveStorage func(ctx context.Context, sector abi.SectorID) error `perm:"admin"`
UnsealPiece func(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error `perm:"admin"` UnsealPiece func(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error `perm:"admin"`
@ -313,6 +322,10 @@ func (c *CommonStruct) Shutdown(ctx context.Context) error {
return c.Internal.Shutdown(ctx) return c.Internal.Shutdown(ctx)
} }
func (c *CommonStruct) Closing(ctx context.Context) (<-chan struct{}, error) {
return c.Internal.Closing(ctx)
}
// FullNodeStruct // FullNodeStruct
func (c *FullNodeStruct) ClientListImports(ctx context.Context) ([]api.Import, error) { func (c *FullNodeStruct) ClientListImports(ctx context.Context) ([]api.Import, error) {
@ -331,6 +344,10 @@ func (c *FullNodeStruct) ClientFindData(ctx context.Context, root cid.Cid) ([]ap
return c.Internal.ClientFindData(ctx, root) return c.Internal.ClientFindData(ctx, root)
} }
func (c *FullNodeStruct) ClientMinerQueryOffer(ctx context.Context, root cid.Cid, miner address.Address) (api.QueryOffer, error) {
return c.Internal.ClientMinerQueryOffer(ctx, root, miner)
}
func (c *FullNodeStruct) ClientStartDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) { func (c *FullNodeStruct) ClientStartDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) {
return c.Internal.ClientStartDeal(ctx, params) return c.Internal.ClientStartDeal(ctx, params)
} }
@ -773,6 +790,10 @@ func (c *StorageMinerStruct) SectorsUpdate(ctx context.Context, id abi.SectorNum
return c.Internal.SectorsUpdate(ctx, id, state) return c.Internal.SectorsUpdate(ctx, id, state)
} }
func (c *StorageMinerStruct) SectorRemove(ctx context.Context, number abi.SectorNumber) error {
return c.Internal.SectorRemove(ctx, number)
}
func (c *StorageMinerStruct) WorkerConnect(ctx context.Context, url string) error { func (c *StorageMinerStruct) WorkerConnect(ctx context.Context, url string) error {
return c.Internal.WorkerConnect(ctx, url) return c.Internal.WorkerConnect(ctx, url)
} }
@ -841,8 +862,12 @@ func (c *StorageMinerStruct) MarketListIncompleteDeals(ctx context.Context) ([]s
return c.Internal.MarketListIncompleteDeals(ctx) return c.Internal.MarketListIncompleteDeals(ctx)
} }
func (c *StorageMinerStruct) MarketSetPrice(ctx context.Context, p types.BigInt) error { func (c *StorageMinerStruct) MarketSetAsk(ctx context.Context, price types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error {
return c.Internal.MarketSetPrice(ctx, p) return c.Internal.MarketSetAsk(ctx, price, duration, minPieceSize, maxPieceSize)
}
func (c *StorageMinerStruct) MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error) {
return c.Internal.MarketGetAsk(ctx)
} }
func (c *StorageMinerStruct) DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error { func (c *StorageMinerStruct) DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error {
@ -857,6 +882,18 @@ func (c *StorageMinerStruct) DealsSetAcceptingStorageDeals(ctx context.Context,
return c.Internal.DealsSetAcceptingStorageDeals(ctx, b) return c.Internal.DealsSetAcceptingStorageDeals(ctx, b)
} }
func (c *StorageMinerStruct) DealsSetAcceptingRetrievalDeals(ctx context.Context, b bool) error {
return c.Internal.DealsSetAcceptingRetrievalDeals(ctx, b)
}
func (c *StorageMinerStruct) DealsPieceCidBlocklist(ctx context.Context) ([]cid.Cid, error) {
return c.Internal.DealsPieceCidBlocklist(ctx)
}
func (c *StorageMinerStruct) DealsSetPieceCidBlocklist(ctx context.Context, cids []cid.Cid) error {
return c.Internal.DealsSetPieceCidBlocklist(ctx, cids)
}
func (c *StorageMinerStruct) StorageAddLocal(ctx context.Context, path string) error { func (c *StorageMinerStruct) StorageAddLocal(ctx context.Context, path string) error {
return c.Internal.StorageAddLocal(ctx, path) return c.Internal.StorageAddLocal(ctx, path)
} }
@ -895,8 +932,16 @@ func (w *WorkerStruct) SealCommit2(ctx context.Context, sector abi.SectorID, c1o
return w.Internal.SealCommit2(ctx, sector, c1o) return w.Internal.SealCommit2(ctx, sector, c1o)
} }
func (w *WorkerStruct) FinalizeSector(ctx context.Context, sector abi.SectorID) error { func (w *WorkerStruct) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error {
return w.Internal.FinalizeSector(ctx, sector) return w.Internal.FinalizeSector(ctx, sector, keepUnsealed)
}
func (w *WorkerStruct) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error {
return w.Internal.ReleaseUnsealed(ctx, sector, safeToFree)
}
func (w *WorkerStruct) Remove(ctx context.Context, sector abi.SectorID) error {
return w.Internal.Remove(ctx, sector)
} }
func (w *WorkerStruct) MoveStorage(ctx context.Context, sector abi.SectorID) error { func (w *WorkerStruct) MoveStorage(ctx context.Context, sector abi.SectorID) error {

View File

@ -73,9 +73,13 @@ func init() {
addExample(bitfield.NewFromSet([]uint64{5})) addExample(bitfield.NewFromSet([]uint64{5}))
addExample(abi.RegisteredSealProof_StackedDrg32GiBV1) addExample(abi.RegisteredSealProof_StackedDrg32GiBV1)
addExample(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1)
addExample(abi.ChainEpoch(10101)) addExample(abi.ChainEpoch(10101))
addExample(crypto.SigTypeBLS) addExample(crypto.SigTypeBLS)
addExample(int64(9)) addExample(int64(9))
addExample(12.3)
addExample(123)
addExample(uintptr(0))
addExample(abi.MethodNum(1)) addExample(abi.MethodNum(1))
addExample(exitcode.ExitCode(0)) addExample(exitcode.ExitCode(0))
addExample(crypto.DomainSeparationTag_ElectionProofProduction) addExample(crypto.DomainSeparationTag_ElectionProofProduction)
@ -94,17 +98,17 @@ func init() {
addExample(api.PCHInbound) addExample(api.PCHInbound)
addExample(time.Minute) addExample(time.Minute)
addExample(&types.ExecutionTrace{ addExample(&types.ExecutionTrace{
Msg: exampleValue(reflect.TypeOf(&types.Message{})).(*types.Message), Msg: exampleValue(reflect.TypeOf(&types.Message{}), nil).(*types.Message),
MsgRct: exampleValue(reflect.TypeOf(&types.MessageReceipt{})).(*types.MessageReceipt), MsgRct: exampleValue(reflect.TypeOf(&types.MessageReceipt{}), nil).(*types.MessageReceipt),
}) })
addExample(map[string]types.Actor{ addExample(map[string]types.Actor{
"t01236": exampleValue(reflect.TypeOf(types.Actor{})).(types.Actor), "t01236": exampleValue(reflect.TypeOf(types.Actor{}), nil).(types.Actor),
}) })
addExample(map[string]api.MarketDeal{ addExample(map[string]api.MarketDeal{
"t026363": exampleValue(reflect.TypeOf(api.MarketDeal{})).(api.MarketDeal), "t026363": exampleValue(reflect.TypeOf(api.MarketDeal{}), nil).(api.MarketDeal),
}) })
addExample(map[string]api.MarketBalance{ addExample(map[string]api.MarketBalance{
"t026363": exampleValue(reflect.TypeOf(api.MarketBalance{})).(api.MarketBalance), "t026363": exampleValue(reflect.TypeOf(api.MarketBalance{}), nil).(api.MarketBalance),
}) })
maddr, err := multiaddr.NewMultiaddr("/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior") maddr, err := multiaddr.NewMultiaddr("/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior")
@ -117,7 +121,7 @@ func init() {
} }
func exampleValue(t reflect.Type) interface{} { func exampleValue(t, parent reflect.Type) interface{} {
v, ok := ExampleValues[t] v, ok := ExampleValues[t]
if ok { if ok {
return v return v
@ -126,25 +130,25 @@ func exampleValue(t reflect.Type) interface{} {
switch t.Kind() { switch t.Kind() {
case reflect.Slice: case reflect.Slice:
out := reflect.New(t).Elem() out := reflect.New(t).Elem()
reflect.Append(out, reflect.ValueOf(exampleValue(t.Elem()))) reflect.Append(out, reflect.ValueOf(exampleValue(t.Elem(), t)))
return out.Interface() return out.Interface()
case reflect.Chan: case reflect.Chan:
return exampleValue(t.Elem()) return exampleValue(t.Elem(), nil)
case reflect.Struct: case reflect.Struct:
es := exampleStruct(t) es := exampleStruct(t, parent)
v := reflect.ValueOf(es).Elem().Interface() v := reflect.ValueOf(es).Elem().Interface()
ExampleValues[t] = v ExampleValues[t] = v
return v return v
case reflect.Array: case reflect.Array:
out := reflect.New(t).Elem() out := reflect.New(t).Elem()
for i := 0; i < t.Len(); i++ { for i := 0; i < t.Len(); i++ {
out.Index(i).Set(reflect.ValueOf(exampleValue(t.Elem()))) out.Index(i).Set(reflect.ValueOf(exampleValue(t.Elem(), t)))
} }
return out.Interface() return out.Interface()
case reflect.Ptr: case reflect.Ptr:
if t.Elem().Kind() == reflect.Struct { if t.Elem().Kind() == reflect.Struct {
es := exampleStruct(t.Elem()) es := exampleStruct(t.Elem(), t)
//ExampleValues[t] = es //ExampleValues[t] = es
return es return es
} }
@ -155,12 +159,15 @@ func exampleValue(t reflect.Type) interface{} {
panic(fmt.Sprintf("No example value for type: %s", t)) panic(fmt.Sprintf("No example value for type: %s", t))
} }
func exampleStruct(t reflect.Type) interface{} { func exampleStruct(t, parent reflect.Type) interface{} {
ns := reflect.New(t) ns := reflect.New(t)
for i := 0; i < t.NumField(); i++ { for i := 0; i < t.NumField(); i++ {
f := t.Field(i) f := t.Field(i)
if f.Type == parent {
continue
}
if strings.Title(f.Name) == f.Name { if strings.Title(f.Name) == f.Name {
ns.Elem().Field(i).Set(reflect.ValueOf(exampleValue(f.Type))) ns.Elem().Field(i).Set(reflect.ValueOf(exampleValue(f.Type, t)))
} }
} }
@ -286,17 +293,17 @@ func main() {
ft := m.Func.Type() ft := m.Func.Type()
for j := 2; j < ft.NumIn(); j++ { for j := 2; j < ft.NumIn(); j++ {
inp := ft.In(j) inp := ft.In(j)
args = append(args, exampleValue(inp)) args = append(args, exampleValue(inp, nil))
} }
v, err := json.Marshal(args) v, err := json.MarshalIndent(args, "", " ")
if err != nil { if err != nil {
panic(err) panic(err)
} }
outv := exampleValue(ft.Out(0)) outv := exampleValue(ft.Out(0), nil)
ov, err := json.Marshal(outv) ov, err := json.MarshalIndent(outv, "", " ")
if err != nil { if err != nil {
panic(err) panic(err)
} }
@ -318,6 +325,15 @@ func main() {
return groupslice[i].GroupName < groupslice[j].GroupName return groupslice[i].GroupName < groupslice[j].GroupName
}) })
fmt.Printf("# Groups\n")
for _, g := range groupslice {
fmt.Printf("* [%s](#%s)\n", g.GroupName, g.GroupName)
for _, method := range g.Methods {
fmt.Printf(" * [%s](#%s)\n", method.Name, method.Name)
}
}
for _, g := range groupslice { for _, g := range groupslice {
g := g g := g
fmt.Printf("## %s\n", g.GroupName) fmt.Printf("## %s\n", g.GroupName)
@ -331,8 +347,17 @@ func main() {
fmt.Printf("### %s\n", m.Name) fmt.Printf("### %s\n", m.Name)
fmt.Printf("%s\n\n", m.Comment) fmt.Printf("%s\n\n", m.Comment)
if strings.Count(m.InputExample, "\n") > 0 {
fmt.Printf("Inputs:\n```json\n%s\n```\n\n", m.InputExample)
} else {
fmt.Printf("Inputs: `%s`\n\n", m.InputExample) fmt.Printf("Inputs: `%s`\n\n", m.InputExample)
}
if strings.Count(m.ResponseExample, "\n") > 0 {
fmt.Printf("Response:\n```json\n%s\n```\n\n", m.ResponseExample)
} else {
fmt.Printf("Response: `%s`\n\n", m.ResponseExample) fmt.Printf("Response: `%s`\n\n", m.ResponseExample)
} }
} }
}
} }

View File

@ -8,6 +8,7 @@ import (
"math/rand" "math/rand"
"os" "os"
"path/filepath" "path/filepath"
"sync/atomic"
"testing" "testing"
"time" "time"
@ -52,11 +53,11 @@ func TestDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, carExport
} }
time.Sleep(time.Second) time.Sleep(time.Second)
mine := true mine := int64(1)
done := make(chan struct{}) done := make(chan struct{})
go func() { go func() {
defer close(done) defer close(done)
for mine { for atomic.LoadInt64(&mine) == 1 {
time.Sleep(blocktime) time.Sleep(blocktime)
if err := sn[0].MineOne(ctx, func(bool) {}); err != nil { if err := sn[0].MineOne(ctx, func(bool) {}); err != nil {
t.Error(err) t.Error(err)
@ -66,7 +67,7 @@ func TestDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, carExport
makeDeal(t, ctx, 6, client, miner, carExport) makeDeal(t, ctx, 6, client, miner, carExport)
mine = false atomic.AddInt64(&mine, -1)
fmt.Println("shutting down mining") fmt.Println("shutting down mining")
<-done <-done
} }
@ -89,12 +90,12 @@ func TestDoubleDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration) {
} }
time.Sleep(time.Second) time.Sleep(time.Second)
mine := true mine := int64(1)
done := make(chan struct{}) done := make(chan struct{})
go func() { go func() {
defer close(done) defer close(done)
for mine { for atomic.LoadInt64(&mine) == 1 {
time.Sleep(blocktime) time.Sleep(blocktime)
if err := sn[0].MineOne(ctx, func(bool) {}); err != nil { if err := sn[0].MineOne(ctx, func(bool) {}); err != nil {
t.Error(err) t.Error(err)
@ -105,7 +106,7 @@ func TestDoubleDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration) {
makeDeal(t, ctx, 6, client, miner, false) makeDeal(t, ctx, 6, client, miner, false)
makeDeal(t, ctx, 7, client, miner, false) makeDeal(t, ctx, 7, client, miner, false)
mine = false atomic.AddInt64(&mine, -1)
fmt.Println("shutting down mining") fmt.Println("shutting down mining")
<-done <-done
} }

View File

@ -126,6 +126,7 @@ func TestDealMining(t *testing.T, b APIBuilder, blocktime time.Duration, carExpo
minedTwo := make(chan struct{}) minedTwo := make(chan struct{})
go func() { go func() {
doneMinedTwo := false
defer close(done) defer close(done)
prevExpect := 0 prevExpect := 0
@ -175,9 +176,9 @@ func TestDealMining(t *testing.T, b APIBuilder, blocktime time.Duration, carExpo
time.Sleep(blocktime) time.Sleep(blocktime)
} }
if prevExpect == 2 && expect == 2 && minedTwo != nil { if prevExpect == 2 && expect == 2 && !doneMinedTwo {
close(minedTwo) close(minedTwo)
minedTwo = nil doneMinedTwo = true
} }
prevExpect = expect prevExpect = expect

View File

@ -13,6 +13,10 @@ import (
) )
func BuiltinBootstrap() ([]peer.AddrInfo, error) { func BuiltinBootstrap() ([]peer.AddrInfo, error) {
if DisableBuiltinAssets {
return nil, nil
}
var out []peer.AddrInfo var out []peer.AddrInfo
b := rice.MustFindBox("bootstrap") b := rice.MustFindBox("bootstrap")

View File

@ -1,12 +1,12 @@
/dns4/bootstrap-0-sin.fil-test.net/tcp/1347/p2p/12D3KooWKNF7vNFEhnvB45E9mw2B5z6t419W3ziZPLdUDVnLLKGs /dns4/bootstrap-0-sin.fil-test.net/tcp/1347/p2p/12D3KooWPdUquftaQvoQEtEdsRBAhwD6jopbF2oweVTzR59VbHEd
/ip4/86.109.15.57/tcp/1347/p2p/12D3KooWKNF7vNFEhnvB45E9mw2B5z6t419W3ziZPLdUDVnLLKGs /ip4/86.109.15.57/tcp/1347/p2p/12D3KooWPdUquftaQvoQEtEdsRBAhwD6jopbF2oweVTzR59VbHEd
/dns4/bootstrap-0-dfw.fil-test.net/tcp/1347/p2p/12D3KooWECJTm7RUPyGfNbRwm6y2fK4wA7EB8rDJtWsq5AKi7iDr /dns4/bootstrap-0-dfw.fil-test.net/tcp/1347/p2p/12D3KooWQSCkHCzosEyrh8FgYfLejKgEPM5VB6qWzZE3yDAuXn8d
/ip4/139.178.84.45/tcp/1347/p2p/12D3KooWECJTm7RUPyGfNbRwm6y2fK4wA7EB8rDJtWsq5AKi7iDr /ip4/139.178.84.45/tcp/1347/p2p/12D3KooWQSCkHCzosEyrh8FgYfLejKgEPM5VB6qWzZE3yDAuXn8d
/dns4/bootstrap-0-fra.fil-test.net/tcp/1347/p2p/12D3KooWC7MD6m7iNCuDsYtNr7xVtazihyVUizBbhmhEiyMAm9ym /dns4/bootstrap-0-fra.fil-test.net/tcp/1347/p2p/12D3KooWEXN2eQmoyqnNjde9PBAQfQLHN67jcEdWU6JougWrgXJK
/ip4/136.144.49.17/tcp/1347/p2p/12D3KooWC7MD6m7iNCuDsYtNr7xVtazihyVUizBbhmhEiyMAm9ym /ip4/136.144.49.17/tcp/1347/p2p/12D3KooWEXN2eQmoyqnNjde9PBAQfQLHN67jcEdWU6JougWrgXJK
/dns4/bootstrap-1-sin.fil-test.net/tcp/1347/p2p/12D3KooWD8eYqsKcEMFax6EbWN3rjA7qFsxCez2rmN8dWqkzgNaN /dns4/bootstrap-1-sin.fil-test.net/tcp/1347/p2p/12D3KooWLmJkZd33mJhjg5RrpJ6NFep9SNLXWc4uVngV4TXKwzYw
/ip4/86.109.15.55/tcp/1347/p2p/12D3KooWD8eYqsKcEMFax6EbWN3rjA7qFsxCez2rmN8dWqkzgNaN /ip4/86.109.15.123/tcp/1347/p2p/12D3KooWLmJkZd33mJhjg5RrpJ6NFep9SNLXWc4uVngV4TXKwzYw
/dns4/bootstrap-1-dfw.fil-test.net/tcp/1347/p2p/12D3KooWLB3RR8frLAmaK4ntHC2dwrAjyGzQgyUzWxAum1FxyyqD /dns4/bootstrap-1-dfw.fil-test.net/tcp/1347/p2p/12D3KooWGXLHjiz6pTRu7x2pkgTVCoxcCiVxcNLpMnWcJ3JiNEy5
/ip4/139.178.84.41/tcp/1347/p2p/12D3KooWLB3RR8frLAmaK4ntHC2dwrAjyGzQgyUzWxAum1FxyyqD /ip4/139.178.86.3/tcp/1347/p2p/12D3KooWGXLHjiz6pTRu7x2pkgTVCoxcCiVxcNLpMnWcJ3JiNEy5
/dns4/bootstrap-1-fra.fil-test.net/tcp/1347/p2p/12D3KooWGPDJAw3HW4uVU3JEQBfFaZ1kdpg4HvvwRMVpUYbzhsLQ /dns4/bootstrap-1-fra.fil-test.net/tcp/1347/p2p/12D3KooW9szZmKttS9A1FafH3Zc2pxKwwmvCWCGKkRP4KmbhhC4R
/ip4/136.144.49.131/tcp/1347/p2p/12D3KooWGPDJAw3HW4uVU3JEQBfFaZ1kdpg4HvvwRMVpUYbzhsLQ /ip4/136.144.49.131/tcp/1347/p2p/12D3KooW9szZmKttS9A1FafH3Zc2pxKwwmvCWCGKkRP4KmbhhC4R

15
build/flags.go Normal file
View File

@ -0,0 +1,15 @@
package build
// DisableBuiltinAssets disables the resolution of go.rice boxes that store
// built-in assets, such as proof parameters, bootstrap peers, genesis blocks,
// etc.
//
// When this value is set to true, it is expected that the user will
// provide any such configurations through the Lotus API itself.
//
// This is useful when you're using Lotus as a library, such as to orchestrate
// test scenarios, or for other purposes where you don't need to use the
// defaults shipped with the binary.
//
// For this flag to be effective, it must be enabled _before_ instantiating Lotus.
var DisableBuiltinAssets = false

Binary file not shown.

View File

@ -121,4 +121,11 @@ const VerifSigCacheSize = 32000
const BlockMessageLimit = 512 const BlockMessageLimit = 512
const BlockGasLimit = 100_000_000_000 const BlockGasLimit = 100_000_000_000
var DrandChain = `{"public_key":"922a2e93828ff83345bae533f5172669a26c02dc76d6bf59c80892e12ab1455c229211886f35bb56af6d5bea981024df","period":25,"genesis_time":1590445175,"hash":"138a324aa6540f93d0dad002aa89454b1bec2b6e948682cde6bd4db40f4b7c9b"}` var DrandConfig = dtypes.DrandConfig{
Servers: []string{
"https://pl-eu.testnet.drand.sh",
"https://pl-us.testnet.drand.sh",
"https://pl-sin.testnet.drand.sh",
},
ChainInfoJSON: `{"public_key":"922a2e93828ff83345bae533f5172669a26c02dc76d6bf59c80892e12ab1455c229211886f35bb56af6d5bea981024df","period":25,"genesis_time":1590445175,"hash":"138a324aa6540f93d0dad002aa89454b1bec2b6e948682cde6bd4db40f4b7c9b"}`,
}

View File

@ -12,9 +12,8 @@ import (
) )
func init() { func init() {
power.ConsensusMinerMinPower = big.NewInt(1024 << 20) power.ConsensusMinerMinPower = big.NewInt(1024 << 30)
miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{ miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{
abi.RegisteredSealProof_StackedDrg512MiBV1: {},
abi.RegisteredSealProof_StackedDrg32GiBV1: {}, abi.RegisteredSealProof_StackedDrg32GiBV1: {},
abi.RegisteredSealProof_StackedDrg64GiBV1: {}, abi.RegisteredSealProof_StackedDrg64GiBV1: {},
} }

View File

@ -53,7 +53,7 @@ func (ve Version) EqMajorMinor(v2 Version) bool {
} }
// APIVersion is a semver version of the rpc api exposed // APIVersion is a semver version of the rpc api exposed
var APIVersion Version = newVer(0, 3, 0) var APIVersion Version = newVer(0, 4, 0)
//nolint:varcheck,deadcode //nolint:varcheck,deadcode
const ( const (

View File

@ -17,6 +17,10 @@ type Response struct {
Err error Err error
} }
// RandomBeacon represents a system that provides randomness to Lotus.
// Other components interrogate the RandomBeacon to acquire randomness that's
// valid for a specific chain epoch. Also to verify beacon entries that have
// been posted on chain.
type RandomBeacon interface { type RandomBeacon interface {
Entry(context.Context, uint64) <-chan Response Entry(context.Context, uint64) <-chan Response
VerifyEntry(types.BeaconEntry, types.BeaconEntry) error VerifyEntry(types.BeaconEntry, types.BeaconEntry) error

View File

@ -19,31 +19,15 @@ import (
logging "github.com/ipfs/go-log" logging "github.com/ipfs/go-log"
pubsub "github.com/libp2p/go-libp2p-pubsub" pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/lotus/chain/beacon" "github.com/filecoin-project/lotus/chain/beacon"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/lotus/node/modules/dtypes"
) )
var log = logging.Logger("drand") var log = logging.Logger("drand")
var drandServers = []string{
"https://pl-eu.testnet.drand.sh",
"https://pl-us.testnet.drand.sh",
"https://pl-sin.testnet.drand.sh",
}
var drandChain *dchain.Info
func init() {
var err error
drandChain, err = dchain.InfoFromJSON(bytes.NewReader([]byte(build.DrandChain)))
if err != nil {
panic("could not unmarshal chain info: " + err.Error())
}
}
type drandPeer struct { type drandPeer struct {
addr string addr string
tls bool tls bool
@ -57,6 +41,13 @@ func (dp *drandPeer) IsTLS() bool {
return dp.tls return dp.tls
} }
// DrandBeacon connects Lotus with a drand network in order to provide
// randomness to the system in a way that's aligned with Filecoin rounds/epochs.
//
// We connect to drand peers via their public HTTP endpoints. The peers are
// enumerated in the drandServers variable.
//
// The root trust for the Drand chain is configured from build.DrandChain.
type DrandBeacon struct { type DrandBeacon struct {
client dclient.Client client dclient.Client
@ -73,16 +64,21 @@ type DrandBeacon struct {
localCache map[uint64]types.BeaconEntry localCache map[uint64]types.BeaconEntry
} }
func NewDrandBeacon(genesisTs, interval uint64, ps *pubsub.PubSub) (*DrandBeacon, error) { func NewDrandBeacon(genesisTs, interval uint64, ps *pubsub.PubSub, config dtypes.DrandConfig) (*DrandBeacon, error) {
if genesisTs == 0 { if genesisTs == 0 {
panic("what are you doing this cant be zero") panic("what are you doing this cant be zero")
} }
drandChain, err := dchain.InfoFromJSON(bytes.NewReader([]byte(config.ChainInfoJSON)))
if err != nil {
return nil, xerrors.Errorf("unable to unmarshal drand chain info: %w", err)
}
dlogger := dlog.NewKitLoggerFrom(kzap.NewZapSugarLogger( dlogger := dlog.NewKitLoggerFrom(kzap.NewZapSugarLogger(
log.SugaredLogger.Desugar(), zapcore.InfoLevel)) log.SugaredLogger.Desugar(), zapcore.InfoLevel))
var clients []dclient.Client var clients []dclient.Client
for _, url := range drandServers { for _, url := range config.Servers {
hc, err := hclient.NewWithInfo(url, drandChain, nil) hc, err := hclient.NewWithInfo(url, drandChain, nil)
if err != nil { if err != nil {
return nil, xerrors.Errorf("could not create http drand client: %w", err) return nil, xerrors.Errorf("could not create http drand client: %w", err)

View File

@ -7,10 +7,13 @@ import (
dchain "github.com/drand/drand/chain" dchain "github.com/drand/drand/chain"
hclient "github.com/drand/drand/client/http" hclient "github.com/drand/drand/client/http"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/filecoin-project/lotus/build"
) )
func TestPrintGroupInfo(t *testing.T) { func TestPrintGroupInfo(t *testing.T) {
c, err := hclient.New(drandServers[0], nil, nil) server := build.DrandConfig.Servers[0]
c, err := hclient.New(server, nil, nil)
assert.NoError(t, err) assert.NoError(t, err)
cg := c.(interface { cg := c.(interface {
FetchChainInfo(groupHash []byte) (*dchain.Info, error) FetchChainInfo(groupHash []byte) (*dchain.Info, error)

View File

@ -10,6 +10,7 @@ import (
"golang.org/x/xerrors" "golang.org/x/xerrors"
cborutil "github.com/filecoin-project/go-cbor-util" cborutil "github.com/filecoin-project/go-cbor-util"
"github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
@ -27,6 +28,24 @@ const BlockSyncProtocolID = "/fil/sync/blk/0.0.1"
const BlockSyncMaxRequestLength = 800 const BlockSyncMaxRequestLength = 800
// BlockSyncService is the component that services BlockSync requests from
// peers.
//
// BlockSync is the basic chain synchronization protocol of Filecoin. BlockSync
// is an RPC-oriented protocol, with a single operation to request blocks.
//
// A request contains a start anchor block (referred to with a CID), and a
// amount of blocks requested beyond the anchor (including the anchor itself).
//
// A client can also pass options, encoded as a 64-bit bitfield. Lotus supports
// two options at the moment:
//
// - include block contents
// - include block messages
//
// The response will include a status code, an optional message, and the
// response payload in case of success. The payload is a slice of serialized
// tipsets.
type BlockSyncService struct { type BlockSyncService struct {
cs *store.ChainStore cs *store.ChainStore
} }

View File

@ -64,6 +64,11 @@ func (bs *BlockSync) processStatus(req *BlockSyncRequest, res *BlockSyncResponse
} }
} }
// GetBlocks fetches count blocks from the network, from the provided tipset
// *backwards*, returning as many tipsets as count.
//
// {hint/usage}: This is used by the Syncer during normal chain syncing and when
// resolving forks.
func (bs *BlockSync) GetBlocks(ctx context.Context, tsk types.TipSetKey, count int) ([]*types.TipSet, error) { func (bs *BlockSync) GetBlocks(ctx context.Context, tsk types.TipSetKey, count int) ([]*types.TipSet, error) {
ctx, span := trace.StartSpan(ctx, "bsync.GetBlocks") ctx, span := trace.StartSpan(ctx, "bsync.GetBlocks")
defer span.End() defer span.End()
@ -80,7 +85,9 @@ func (bs *BlockSync) GetBlocks(ctx context.Context, tsk types.TipSetKey, count i
Options: BSOptBlocks, Options: BSOptBlocks,
} }
// this peerset is sorted by latency and failure counting.
peers := bs.getPeers() peers := bs.getPeers()
// randomize the first few peers so we don't always pick the same peer // randomize the first few peers so we don't always pick the same peer
shufflePrefix(peers) shufflePrefix(peers)
@ -356,6 +363,7 @@ func (bs *BlockSync) RemovePeer(p peer.ID) {
bs.syncPeers.removePeer(p) bs.syncPeers.removePeer(p)
} }
// getPeers returns a preference-sorted set of peers to query.
func (bs *BlockSync) getPeers() []peer.ID { func (bs *BlockSync) getPeers() []peer.ID {
return bs.syncPeers.prefSortedPeers() return bs.syncPeers.prefSortedPeers()
} }

View File

@ -84,6 +84,9 @@ type calledEvents struct {
} }
func (e *calledEvents) headChangeCalled(rev, app []*types.TipSet) error { func (e *calledEvents) headChangeCalled(rev, app []*types.TipSet) error {
e.lk.Lock()
defer e.lk.Unlock()
for _, ts := range rev { for _, ts := range rev {
e.handleReverts(ts) e.handleReverts(ts)
e.at = ts.Height() e.at = ts.Height()
@ -134,7 +137,6 @@ func (e *calledEvents) checkNewCalls(ts *types.TipSet) {
e.messagesForTs(pts, func(msg *types.Message) { e.messagesForTs(pts, func(msg *types.Message) {
// TODO: provide receipts // TODO: provide receipts
for tid, matchFns := range e.matchers { for tid, matchFns := range e.matchers {
var matched bool var matched bool
for _, matchFn := range matchFns { for _, matchFn := range matchFns {

View File

@ -26,12 +26,15 @@ type heightEvents struct {
} }
func (e *heightEvents) headChangeAt(rev, app []*types.TipSet) error { func (e *heightEvents) headChangeAt(rev, app []*types.TipSet) error {
ctx, span := trace.StartSpan(e.ctx, "events.HeightHeadChange") ctx, span := trace.StartSpan(e.ctx, "events.HeightHeadChange")
defer span.End() defer span.End()
span.AddAttributes(trace.Int64Attribute("endHeight", int64(app[0].Height()))) span.AddAttributes(trace.Int64Attribute("endHeight", int64(app[0].Height())))
span.AddAttributes(trace.Int64Attribute("reverts", int64(len(rev)))) span.AddAttributes(trace.Int64Attribute("reverts", int64(len(rev))))
span.AddAttributes(trace.Int64Attribute("applies", int64(len(app)))) span.AddAttributes(trace.Int64Attribute("applies", int64(len(app))))
e.lk.Lock()
defer e.lk.Unlock()
for _, ts := range rev { for _, ts := range rev {
// TODO: log error if h below gcconfidence // TODO: log error if h below gcconfidence
// revert height-based triggers // revert height-based triggers
@ -40,7 +43,10 @@ func (e *heightEvents) headChangeAt(rev, app []*types.TipSet) error {
for _, tid := range e.htHeights[h] { for _, tid := range e.htHeights[h] {
ctx, span := trace.StartSpan(ctx, "events.HeightRevert") ctx, span := trace.StartSpan(ctx, "events.HeightRevert")
err := e.heightTriggers[tid].revert(ctx, ts) rev := e.heightTriggers[tid].revert
e.lk.Unlock()
err := rev(ctx, ts)
e.lk.Lock()
e.heightTriggers[tid].called = false e.heightTriggers[tid].called = false
span.End() span.End()
@ -98,8 +104,10 @@ func (e *heightEvents) headChangeAt(rev, app []*types.TipSet) error {
ctx, span := trace.StartSpan(ctx, "events.HeightApply") ctx, span := trace.StartSpan(ctx, "events.HeightApply")
span.AddAttributes(trace.BoolAttribute("immediate", false)) span.AddAttributes(trace.BoolAttribute("immediate", false))
handle := hnd.handle
err = hnd.handle(ctx, incTs, h) e.lk.Unlock()
err = handle(ctx, incTs, h)
e.lk.Lock()
span.End() span.End()
if err != nil { if err != nil {

View File

@ -32,8 +32,11 @@ func (fts *FullTipSet) Cids() []cid.Cid {
return cids return cids
} }
// TipSet returns a narrower view of this FullTipSet elliding the block
// messages.
func (fts *FullTipSet) TipSet() *types.TipSet { func (fts *FullTipSet) TipSet() *types.TipSet {
if fts.tipset != nil { if fts.tipset != nil {
// FIXME: fts.tipset is actually never set. Should it memoize?
return fts.tipset return fts.tipset
} }

View File

@ -34,7 +34,7 @@ type lbEntry struct {
target types.TipSetKey target types.TipSetKey
} }
func (ci *ChainIndex) GetTipsetByHeight(ctx context.Context, from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) { func (ci *ChainIndex) GetTipsetByHeight(_ context.Context, from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) {
if from.Height()-to <= ci.skipLength { if from.Height()-to <= ci.skipLength {
return ci.walkBack(from, to) return ci.walkBack(from, to)
} }

View File

@ -52,6 +52,15 @@ var blockValidationCacheKeyPrefix = dstore.NewKey("blockValidation")
// ReorgNotifee represents a callback that gets called upon reorgs. // ReorgNotifee represents a callback that gets called upon reorgs.
type ReorgNotifee func(rev, app []*types.TipSet) error type ReorgNotifee func(rev, app []*types.TipSet) error
// ChainStore is the main point of access to chain data.
//
// Raw chain data is stored in the Blockstore, with relevant markers (genesis,
// latest head tipset references) being tracked in the Datastore (key-value
// store).
//
// To alleviate disk access, the ChainStore has two ARC caches:
// 1. a tipset cache
// 2. a block => messages references cache.
type ChainStore struct { type ChainStore struct {
bs bstore.Blockstore bs bstore.Blockstore
ds dstore.Datastore ds dstore.Datastore
@ -266,6 +275,9 @@ func (cs *ChainStore) PutTipSet(ctx context.Context, ts *types.TipSet) error {
return nil return nil
} }
// MaybeTakeHeavierTipSet evaluates the incoming tipset and locks it in our
// internal state as our new head, if and only if it is heavier than the current
// head.
func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipSet) error { func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipSet) error {
cs.heaviestLk.Lock() cs.heaviestLk.Lock()
defer cs.heaviestLk.Unlock() defer cs.heaviestLk.Unlock()
@ -331,6 +343,9 @@ func (cs *ChainStore) reorgWorker(ctx context.Context, initialNotifees []ReorgNo
return out return out
} }
// takeHeaviestTipSet actually sets the incoming tipset as our head both in
// memory and in the ChainStore. It also sends a notification to deliver to
// ReorgNotifees.
func (cs *ChainStore) takeHeaviestTipSet(ctx context.Context, ts *types.TipSet) error { func (cs *ChainStore) takeHeaviestTipSet(ctx context.Context, ts *types.TipSet) error {
_, span := trace.StartSpan(ctx, "takeHeaviestTipSet") _, span := trace.StartSpan(ctx, "takeHeaviestTipSet")
defer span.End() defer span.End()
@ -368,6 +383,7 @@ func (cs *ChainStore) SetHead(ts *types.TipSet) error {
return cs.takeHeaviestTipSet(context.TODO(), ts) return cs.takeHeaviestTipSet(context.TODO(), ts)
} }
// Contains returns whether our BlockStore has all blocks in the supplied TipSet.
func (cs *ChainStore) Contains(ts *types.TipSet) (bool, error) { func (cs *ChainStore) Contains(ts *types.TipSet) (bool, error) {
for _, c := range ts.Cids() { for _, c := range ts.Cids() {
has, err := cs.bs.Has(c) has, err := cs.bs.Has(c)
@ -382,6 +398,8 @@ func (cs *ChainStore) Contains(ts *types.TipSet) (bool, error) {
return true, nil return true, nil
} }
// GetBlock fetches a BlockHeader with the supplied CID. It returns
// blockstore.ErrNotFound if the block was not found in the BlockStore.
func (cs *ChainStore) GetBlock(c cid.Cid) (*types.BlockHeader, error) { func (cs *ChainStore) GetBlock(c cid.Cid) (*types.BlockHeader, error) {
sb, err := cs.bs.Get(c) sb, err := cs.bs.Get(c)
if err != nil { if err != nil {
@ -474,6 +492,7 @@ func (cs *ChainStore) ReorgOps(a, b *types.TipSet) ([]*types.TipSet, []*types.Ti
return leftChain, rightChain, nil return leftChain, rightChain, nil
} }
// GetHeaviestTipSet returns the current heaviest tipset known (i.e. our head).
func (cs *ChainStore) GetHeaviestTipSet() *types.TipSet { func (cs *ChainStore) GetHeaviestTipSet() *types.TipSet {
cs.heaviestLk.Lock() cs.heaviestLk.Lock()
defer cs.heaviestLk.Unlock() defer cs.heaviestLk.Unlock()

View File

@ -53,6 +53,29 @@ var log = logging.Logger("chain")
var LocalIncoming = "incoming" var LocalIncoming = "incoming"
// Syncer is in charge of running the chain synchronization logic. As such, it
// is tasked with these functions, amongst others:
//
// * Fast-forwards the chain as it learns of new TipSets from the network via
// the SyncManager.
// * Applies the fork choice rule to select the correct side when confronted
// with a fork in the network.
// * Requests block headers and messages from other peers when not available
// in our BlockStore.
// * Tracks blocks marked as bad in a cache.
// * Keeps the BlockStore and ChainStore consistent with our view of the world,
// the latter of which in turn informs other components when a reorg has been
// committed.
//
// The Syncer does not run workers itself. It's mainly concerned with
// ensuring a consistent state of chain consensus. The reactive and network-
// interfacing processes are part of other components, such as the SyncManager
// (which owns the sync scheduler and sync workers), BlockSync, the HELLO
// protocol, and the gossipsub block propagation layer.
//
// {hint/concept} The fork-choice rule as it currently stands is: "pick the
// chain with the heaviest weight, so long as it hasnt deviated one finality
// threshold from our head (900 epochs, parameter determined by spec-actors)".
type Syncer struct { type Syncer struct {
// The interface for accessing and putting tipsets into local storage // The interface for accessing and putting tipsets into local storage
store *store.ChainStore store *store.ChainStore
@ -85,6 +108,7 @@ type Syncer struct {
verifier ffiwrapper.Verifier verifier ffiwrapper.Verifier
} }
// NewSyncer creates a new Syncer object.
func NewSyncer(sm *stmgr.StateManager, bsync *blocksync.BlockSync, connmgr connmgr.ConnManager, self peer.ID, beacon beacon.RandomBeacon, verifier ffiwrapper.Verifier) (*Syncer, error) { func NewSyncer(sm *stmgr.StateManager, bsync *blocksync.BlockSync, connmgr connmgr.ConnManager, self peer.ID, beacon beacon.RandomBeacon, verifier ffiwrapper.Verifier) (*Syncer, error) {
gen, err := sm.ChainStore().GetGenesis() gen, err := sm.ChainStore().GetGenesis()
if err != nil { if err != nil {
@ -182,6 +206,11 @@ func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool {
return true return true
} }
// IncomingBlocks spawns a goroutine that subscribes to the local eventbus to
// receive new block headers as they arrive from the network, and sends them to
// the returned channel.
//
// These blocks have not necessarily been incorporated to our view of the chain.
func (syncer *Syncer) IncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error) { func (syncer *Syncer) IncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error) {
sub := syncer.incoming.Sub(LocalIncoming) sub := syncer.incoming.Sub(LocalIncoming)
out := make(chan *types.BlockHeader, 10) out := make(chan *types.BlockHeader, 10)
@ -209,11 +238,15 @@ func (syncer *Syncer) IncomingBlocks(ctx context.Context) (<-chan *types.BlockHe
return out, nil return out, nil
} }
// ValidateMsgMeta performs structural and content hash validation of the
// messages within this block. If validation passes, it stores the messages in
// the underlying IPLD block store.
func (syncer *Syncer) ValidateMsgMeta(fblk *types.FullBlock) error { func (syncer *Syncer) ValidateMsgMeta(fblk *types.FullBlock) error {
if msgc := len(fblk.BlsMessages) + len(fblk.SecpkMessages); msgc > build.BlockMessageLimit { if msgc := len(fblk.BlsMessages) + len(fblk.SecpkMessages); msgc > build.BlockMessageLimit {
return xerrors.Errorf("block %s has too many messages (%d)", fblk.Header.Cid(), msgc) return xerrors.Errorf("block %s has too many messages (%d)", fblk.Header.Cid(), msgc)
} }
// Collect the CIDs of both types of messages separately: BLS and Secpk.
var bcids, scids []cbg.CBORMarshaler var bcids, scids []cbg.CBORMarshaler
for _, m := range fblk.BlsMessages { for _, m := range fblk.BlsMessages {
c := cbg.CborCid(m.Cid()) c := cbg.CborCid(m.Cid())
@ -231,11 +264,14 @@ func (syncer *Syncer) ValidateMsgMeta(fblk *types.FullBlock) error {
blockstore := syncer.store.Blockstore() blockstore := syncer.store.Blockstore()
bs := cbor.NewCborStore(blockstore) bs := cbor.NewCborStore(blockstore)
// Compute the root CID of the combined message trie.
smroot, err := computeMsgMeta(bs, bcids, scids) smroot, err := computeMsgMeta(bs, bcids, scids)
if err != nil { if err != nil {
return xerrors.Errorf("validating msgmeta, compute failed: %w", err) return xerrors.Errorf("validating msgmeta, compute failed: %w", err)
} }
// Check that the message trie root matches with what's in the block.
if fblk.Header.Messages != smroot { if fblk.Header.Messages != smroot {
return xerrors.Errorf("messages in full block did not match msgmeta root in header (%s != %s)", fblk.Header.Messages, smroot) return xerrors.Errorf("messages in full block did not match msgmeta root in header (%s != %s)", fblk.Header.Messages, smroot)
} }
@ -345,6 +381,8 @@ func zipTipSetAndMessages(bs cbor.IpldStore, ts *types.TipSet, allbmsgs []*types
return fts, nil return fts, nil
} }
// computeMsgMeta computes the root CID of the combined arrays of message CIDs
// of both types (BLS and Secpk).
func computeMsgMeta(bs cbor.IpldStore, bmsgCids, smsgCids []cbg.CBORMarshaler) (cid.Cid, error) { func computeMsgMeta(bs cbor.IpldStore, bmsgCids, smsgCids []cbg.CBORMarshaler) (cid.Cid, error) {
ctx := context.TODO() ctx := context.TODO()
bmroot, err := amt.FromArray(ctx, bs, bmsgCids) bmroot, err := amt.FromArray(ctx, bs, bmsgCids)
@ -368,14 +406,24 @@ func computeMsgMeta(bs cbor.IpldStore, bmsgCids, smsgCids []cbg.CBORMarshaler) (
return mrcid, nil return mrcid, nil
} }
// FetchTipSet tries to load the provided tipset from the store, and falls back
// to the network (BlockSync) by querying the supplied peer if not found
// locally.
//
// {hint/usage} This is used from the HELLO protocol, to fetch the greeting
// peer's heaviest tipset if we don't have it.
func (syncer *Syncer) FetchTipSet(ctx context.Context, p peer.ID, tsk types.TipSetKey) (*store.FullTipSet, error) { func (syncer *Syncer) FetchTipSet(ctx context.Context, p peer.ID, tsk types.TipSetKey) (*store.FullTipSet, error) {
if fts, err := syncer.tryLoadFullTipSet(tsk); err == nil { if fts, err := syncer.tryLoadFullTipSet(tsk); err == nil {
return fts, nil return fts, nil
} }
// fall back to the network.
return syncer.Bsync.GetFullTipSet(ctx, p, tsk) return syncer.Bsync.GetFullTipSet(ctx, p, tsk)
} }
// tryLoadFullTipSet queries the tipset in the ChainStore, and returns a full
// representation of it containing FullBlocks. If ALL blocks are not found
// locally, it errors entirely with blockstore.ErrNotFound.
func (syncer *Syncer) tryLoadFullTipSet(tsk types.TipSetKey) (*store.FullTipSet, error) { func (syncer *Syncer) tryLoadFullTipSet(tsk types.TipSetKey) (*store.FullTipSet, error) {
ts, err := syncer.store.LoadTipSet(tsk) ts, err := syncer.store.LoadTipSet(tsk)
if err != nil { if err != nil {
@ -400,6 +448,12 @@ func (syncer *Syncer) tryLoadFullTipSet(tsk types.TipSetKey) (*store.FullTipSet,
return fts, nil return fts, nil
} }
// Sync tries to advance our view of the chain to `maybeHead`. It does nothing
// if our current head is heavier than the requested tipset, or if we're already
// at the requested head, or if the head is the genesis.
//
// Most of the heavy-lifting logic happens in syncer#collectChain. Refer to the
// godocs on that method for a more detailed view.
func (syncer *Syncer) Sync(ctx context.Context, maybeHead *types.TipSet) error { func (syncer *Syncer) Sync(ctx context.Context, maybeHead *types.TipSet) error {
ctx, span := trace.StartSpan(ctx, "chain.Sync") ctx, span := trace.StartSpan(ctx, "chain.Sync")
defer span.End() defer span.End()
@ -466,7 +520,11 @@ func (syncer *Syncer) ValidateTipSet(ctx context.Context, fts *store.FullTipSet)
return nil return nil
} }
var futures []async.ErrorFuture
for _, b := range fts.Blocks { for _, b := range fts.Blocks {
b := b // rebind to a scoped variable
futures = append(futures, async.Err(func() error {
if err := syncer.ValidateBlock(ctx, b); err != nil { if err := syncer.ValidateBlock(ctx, b); err != nil {
if isPermanent(err) { if isPermanent(err) {
syncer.bad.Add(b.Cid(), err.Error()) syncer.bad.Add(b.Cid(), err.Error())
@ -477,6 +535,13 @@ func (syncer *Syncer) ValidateTipSet(ctx context.Context, fts *store.FullTipSet)
if err := syncer.sm.ChainStore().AddToTipSetTracker(b.Header); err != nil { if err := syncer.sm.ChainStore().AddToTipSetTracker(b.Header); err != nil {
return xerrors.Errorf("failed to add validated header to tipset tracker: %w", err) return xerrors.Errorf("failed to add validated header to tipset tracker: %w", err)
} }
return nil
}))
}
for _, f := range futures {
if err := f.AwaitContext(ctx); err != nil {
return err
}
} }
return nil return nil
} }
@ -993,6 +1058,39 @@ func extractSyncState(ctx context.Context) *SyncerState {
return nil return nil
} }
// collectHeaders collects the headers from the blocks between any two tipsets.
//
// `from` is the heaviest/projected/target tipset we have learned about, and
// `to` is usually an anchor tipset we already have in our view of the chain
// (which could be the genesis).
//
// collectHeaders checks if portions of the chain are in our ChainStore; falling
// down to the network to retrieve the missing parts. If during the process, any
// portion we receive is in our denylist (bad list), we short-circuit.
//
// {hint/naming}: `from` and `to` is in inverse order. `from` is the highest,
// and `to` is the lowest. This method traverses the chain backwards.
//
// {hint/usage}: This is used by collectChain, which is in turn called from the
// main Sync method (Syncer#Sync), so it's a pretty central method.
//
// {hint/logic}: The logic of this method is as follows:
//
// 1. Check that the from tipset is not linked to a parent block known to be
// bad.
// 2. Check the consistency of beacon entries in the from tipset. We check
// total equality of the BeaconEntries in each block.
// 3. Travers the chain backwards, for each tipset:
// 3a. Load it from the chainstore; if found, it move on to its parent.
// 3b. Query our peers via BlockSync in batches, requesting up to a
// maximum of 500 tipsets every time.
//
// Once we've concluded, if we find a mismatching tipset at the height where the
// anchor tipset should be, we are facing a fork, and we invoke Syncer#syncFork
// to resolve it. Refer to the godocs there.
//
// All throughout the process, we keep checking if the received blocks are in
// the deny list, and short-circuit the process if so.
func (syncer *Syncer) collectHeaders(ctx context.Context, from *types.TipSet, to *types.TipSet) ([]*types.TipSet, error) { func (syncer *Syncer) collectHeaders(ctx context.Context, from *types.TipSet, to *types.TipSet) ([]*types.TipSet, error) {
ctx, span := trace.StartSpan(ctx, "collectHeaders") ctx, span := trace.StartSpan(ctx, "collectHeaders")
defer span.End() defer span.End()
@ -1009,6 +1107,8 @@ func (syncer *Syncer) collectHeaders(ctx context.Context, from *types.TipSet, to
} }
} }
// Check if the parents of the from block are in the denylist.
// i.e. if a fork of the chain has been requested that we know to be bad.
for _, pcid := range from.Parents().Cids() { for _, pcid := range from.Parents().Cids() {
if reason, ok := syncer.bad.Has(pcid); ok { if reason, ok := syncer.bad.Has(pcid); ok {
markBad("linked to %s", pcid) markBad("linked to %s", pcid)
@ -1079,8 +1179,8 @@ loop:
} }
// NB: GetBlocks validates that the blocks are in-fact the ones we // NB: GetBlocks validates that the blocks are in-fact the ones we
// requested, and that they are correctly linked to eachother. It does // requested, and that they are correctly linked to one another. It does
// not validate any state transitions // not validate any state transitions.
window := 500 window := 500
if gap := int(blockSet[len(blockSet)-1].Height() - untilHeight); gap < window { if gap := int(blockSet[len(blockSet)-1].Height() - untilHeight); gap < window {
window = gap window = gap
@ -1121,7 +1221,6 @@ loop:
at = blks[len(blks)-1].Parents() at = blks[len(blks)-1].Parents()
} }
// We have now ascertained that this is *not* a 'fast forward'
if !types.CidArrsEqual(blockSet[len(blockSet)-1].Parents().Cids(), to.Cids()) { if !types.CidArrsEqual(blockSet[len(blockSet)-1].Parents().Cids(), to.Cids()) {
last := blockSet[len(blockSet)-1] last := blockSet[len(blockSet)-1]
if last.Parents() == to.Parents() { if last.Parents() == to.Parents() {
@ -1129,6 +1228,8 @@ loop:
return blockSet, nil return blockSet, nil
} }
// We have now ascertained that this is *not* a 'fast forward'
log.Warnf("(fork detected) synced header chain (%s - %d) does not link to our best block (%s - %d)", from.Cids(), from.Height(), to.Cids(), to.Height()) log.Warnf("(fork detected) synced header chain (%s - %d) does not link to our best block (%s - %d)", from.Cids(), from.Height(), to.Cids(), to.Height())
fork, err := syncer.syncFork(ctx, last, to) fork, err := syncer.syncFork(ctx, last, to)
if err != nil { if err != nil {
@ -1150,6 +1251,12 @@ loop:
var ErrForkTooLong = fmt.Errorf("fork longer than threshold") var ErrForkTooLong = fmt.Errorf("fork longer than threshold")
// syncFork tries to obtain the chain fragment that links a fork into a common
// ancestor in our view of the chain.
//
// If the fork is too long (build.ForkLengthThreshold), we add the entire subchain to the
// denylist. Else, we find the common ancestor, and add the missing chain
// fragment until the fork point to the returned []TipSet.
func (syncer *Syncer) syncFork(ctx context.Context, from *types.TipSet, to *types.TipSet) ([]*types.TipSet, error) { func (syncer *Syncer) syncFork(ctx context.Context, from *types.TipSet, to *types.TipSet) ([]*types.TipSet, error) {
tips, err := syncer.Bsync.GetBlocks(ctx, from.Parents(), int(build.ForkLengthThreshold)) tips, err := syncer.Bsync.GetBlocks(ctx, from.Parents(), int(build.ForkLengthThreshold))
if err != nil { if err != nil {
@ -1301,6 +1408,25 @@ func persistMessages(bs bstore.Blockstore, bst *blocksync.BSTipSet) error {
return nil return nil
} }
// collectChain tries to advance our view of the chain to the purported head.
//
// It goes through various stages:
//
// 1. StageHeaders: we proceed in the sync process by requesting block headers
// from our peers, moving back from their heads, until we reach a tipset
// that we have in common (such a common tipset must exist, thought it may
// simply be the genesis block).
//
// If the common tipset is our head, we treat the sync as a "fast-forward",
// else we must drop part of our chain to connect to the peer's head
// (referred to as "forking").
//
// 2. StagePersistHeaders: now that we've collected the missing headers,
// augmented by those on the other side of a fork, we persist them to the
// BlockStore.
//
// 3. StageMessages: having acquired the headers and found a common tipset,
// we then move forward, requesting the full blocks, including the messages.
func (syncer *Syncer) collectChain(ctx context.Context, ts *types.TipSet) error { func (syncer *Syncer) collectChain(ctx context.Context, ts *types.TipSet) error {
ctx, span := trace.StartSpan(ctx, "collectChain") ctx, span := trace.StartSpan(ctx, "collectChain")
defer span.End() defer span.End()
@ -1350,9 +1476,8 @@ func (syncer *Syncer) collectChain(ctx context.Context, ts *types.TipSet) error
func VerifyElectionPoStVRF(ctx context.Context, worker address.Address, rand []byte, evrf []byte) error { func VerifyElectionPoStVRF(ctx context.Context, worker address.Address, rand []byte, evrf []byte) error {
if build.InsecurePoStValidation { if build.InsecurePoStValidation {
return nil return nil
} else {
return gen.VerifyVRF(ctx, worker, rand, evrf)
} }
return gen.VerifyVRF(ctx, worker, rand, evrf)
} }
func (syncer *Syncer) State() []SyncerState { func (syncer *Syncer) State() []SyncerState {
@ -1363,6 +1488,7 @@ func (syncer *Syncer) State() []SyncerState {
return out return out
} }
// MarkBad manually adds a block to the "bad blocks" cache.
func (syncer *Syncer) MarkBad(blk cid.Cid) { func (syncer *Syncer) MarkBad(blk cid.Cid) {
syncer.bad.Add(blk, "manually marked bad") syncer.bad.Add(blk, "manually marked bad")
} }
@ -1370,7 +1496,7 @@ func (syncer *Syncer) MarkBad(blk cid.Cid) {
func (syncer *Syncer) CheckBadBlockCache(blk cid.Cid) (string, bool) { func (syncer *Syncer) CheckBadBlockCache(blk cid.Cid) (string, bool) {
return syncer.bad.Has(blk) return syncer.bad.Has(blk)
} }
func (syncer *Syncer) getLatestBeaconEntry(ctx context.Context, ts *types.TipSet) (*types.BeaconEntry, error) { func (syncer *Syncer) getLatestBeaconEntry(_ context.Context, ts *types.TipSet) (*types.BeaconEntry, error) {
cur := ts cur := ts
for i := 0; i < 20; i++ { for i := 0; i < 20; i++ {
cbe := cur.Blocks()[0].BeaconEntries cbe := cur.Blocks()[0].BeaconEntries

View File

@ -76,7 +76,7 @@ func SizeStr(bi BigInt) string {
} }
f, _ := r.Float64() f, _ := r.Float64()
return fmt.Sprintf("%.3g %s", f, byteSizeUnits[i]) return fmt.Sprintf("%.4g %s", f, byteSizeUnits[i])
} }
var deciUnits = []string{"", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"} var deciUnits = []string{"", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"}

View File

@ -3,7 +3,12 @@ package types
import ( import (
"bytes" "bytes"
"math/big" "math/big"
"math/rand"
"strings"
"testing" "testing"
"time"
"github.com/docker/go-units"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
@ -60,8 +65,10 @@ func TestSizeStr(t *testing.T) {
}{ }{
{0, "0 B"}, {0, "0 B"},
{1, "1 B"}, {1, "1 B"},
{1016, "1016 B"},
{1024, "1 KiB"}, {1024, "1 KiB"},
{2000, "1.95 KiB"}, {1000 * 1024, "1000 KiB"},
{2000, "1.953 KiB"},
{5 << 20, "5 MiB"}, {5 << 20, "5 MiB"},
{11 << 60, "11 EiB"}, {11 << 60, "11 EiB"},
} }
@ -71,6 +78,22 @@ func TestSizeStr(t *testing.T) {
} }
} }
func TestSizeStrUnitsSymmetry(t *testing.T) {
s := rand.NewSource(time.Now().UnixNano())
r := rand.New(s)
for i := 0; i < 1000000; i++ {
n := r.Uint64()
l := strings.ReplaceAll(units.BytesSize(float64(n)), " ", "")
r := strings.ReplaceAll(SizeStr(NewInt(n)), " ", "")
assert.NotContains(t, l, "e+")
assert.NotContains(t, r, "e+")
assert.Equal(t, l, r, "wrong formatting for %d", n)
}
}
func TestSizeStrBig(t *testing.T) { func TestSizeStrBig(t *testing.T) {
ZiB := big.NewInt(50000) ZiB := big.NewInt(50000)
ZiB = ZiB.Lsh(ZiB, 70) ZiB = ZiB.Lsh(ZiB, 70)

View File

@ -21,16 +21,16 @@ type ExecutionTrace struct {
type GasTrace struct { type GasTrace struct {
Name string Name string
Location []Loc Location []Loc `json:"loc"`
TotalGas int64 TotalGas int64 `json:"tg"`
ComputeGas int64 ComputeGas int64 `json:"cg"`
StorageGas int64 StorageGas int64 `json:"sg"`
TotalVirtualGas int64 TotalVirtualGas int64 `json:"vtg"`
VirtualComputeGas int64 VirtualComputeGas int64 `json:"vcg"`
VirtualStorageGas int64 VirtualStorageGas int64 `json:"vsg"`
TimeTaken time.Duration TimeTaken time.Duration `json:"tt"`
Extra interface{} `json:",omitempty"` Extra interface{} `json:"ex,omitempty"`
Callers []uintptr `json:"-"` Callers []uintptr `json:"-"`
} }

View File

@ -186,6 +186,15 @@ func (ps pricedSyscalls) VerifyConsensusFault(h1 []byte, h2 []byte, extra []byte
} }
func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]abi.SealVerifyInfo) (map[address.Address][]bool, error) { func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]abi.SealVerifyInfo) (map[address.Address][]bool, error) {
ps.chargeGas(newGasCharge("BatchVerifySeals", 0, 0)) // TODO: this is only called by the cron actor. Should we even charge gas? var gasChargeSum GasCharge
gasChargeSum.Name = "BatchVerifySeals"
ps.chargeGas(gasChargeSum) // TODO: this is only called by the cron actor. Should we even charge gas?
for _, svis := range inp {
for _, svi := range svis {
ch := ps.pl.OnVerifySeal(svi)
ps.chargeGas(newGasCharge("BatchVerifySingle", 0, 0).WithVirtual(ch.VirtualCompute+ch.ComputeGas, 0))
}
}
return ps.under.BatchVerifySeals(inp) return ps.under.BatchVerifySeals(inp)
} }

View File

@ -103,17 +103,17 @@ func (pl *pricelistV0) OnMethodInvocation(value abi.TokenAmount, methodNum abi.M
if methodNum != builtin.MethodSend { if methodNum != builtin.MethodSend {
ret += pl.sendInvokeMethod ret += pl.sendInvokeMethod
} }
return newGasCharge("OnMethodInvocation", ret, 0) return newGasCharge("OnMethodInvocation", ret, 0).WithVirtual(ret*15000, 0)
} }
// OnIpldGet returns the gas used for storing an object // OnIpldGet returns the gas used for storing an object
func (pl *pricelistV0) OnIpldGet(dataSize int) GasCharge { func (pl *pricelistV0) OnIpldGet(dataSize int) GasCharge {
return newGasCharge("OnIpldGet", pl.ipldGetBase+int64(dataSize)*pl.ipldGetPerByte, 0).WithExtra(dataSize) return newGasCharge("OnIpldGet", pl.ipldGetBase+int64(dataSize)*pl.ipldGetPerByte, 0).WithExtra(dataSize).WithVirtual(pl.ipldGetBase*13750+(pl.ipldGetPerByte*100), 0)
} }
// OnIpldPut returns the gas used for storing an object // OnIpldPut returns the gas used for storing an object
func (pl *pricelistV0) OnIpldPut(dataSize int) GasCharge { func (pl *pricelistV0) OnIpldPut(dataSize int) GasCharge {
return newGasCharge("OnIpldPut", pl.ipldPutBase, int64(dataSize)*pl.ipldPutPerByte).WithExtra(dataSize) return newGasCharge("OnIpldPut", pl.ipldPutBase, int64(dataSize)*pl.ipldPutPerByte).WithExtra(dataSize).WithVirtual(pl.ipldPutBase*8700+(pl.ipldPutPerByte*100), 0)
} }
// OnCreateActor returns the gas used for creating an actor // OnCreateActor returns the gas used for creating an actor
@ -144,13 +144,13 @@ func (pl *pricelistV0) OnHashing(dataSize int) GasCharge {
// OnComputeUnsealedSectorCid // OnComputeUnsealedSectorCid
func (pl *pricelistV0) OnComputeUnsealedSectorCid(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) GasCharge { func (pl *pricelistV0) OnComputeUnsealedSectorCid(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) GasCharge {
// TODO: this needs more cost tunning, check with @lotus // TODO: this needs more cost tunning, check with @lotus
return newGasCharge("OnComputeUnsealedSectorCid", pl.computeUnsealedSectorCidBase, 0) return newGasCharge("OnComputeUnsealedSectorCid", pl.computeUnsealedSectorCidBase, 0).WithVirtual(pl.computeUnsealedSectorCidBase*24500, 0)
} }
// OnVerifySeal // OnVerifySeal
func (pl *pricelistV0) OnVerifySeal(info abi.SealVerifyInfo) GasCharge { func (pl *pricelistV0) OnVerifySeal(info abi.SealVerifyInfo) GasCharge {
// TODO: this needs more cost tunning, check with @lotus // TODO: this needs more cost tunning, check with @lotus
return newGasCharge("OnVerifySeal", pl.verifySealBase, 0) return newGasCharge("OnVerifySeal", pl.verifySealBase, 0).WithVirtual(pl.verifySealBase*177500, 0)
} }
// OnVerifyPost // OnVerifyPost

View File

@ -253,7 +253,7 @@ func (rt *Runtime) CreateActor(codeID cid.Cid, address address.Address) {
rt.Abortf(exitcode.SysErrorIllegalArgument, "Actor address already exists") rt.Abortf(exitcode.SysErrorIllegalArgument, "Actor address already exists")
} }
rt.ChargeGas(rt.Pricelist().OnCreateActor()) rt.chargeGas(rt.Pricelist().OnCreateActor())
err = rt.state.SetActor(address, &types.Actor{ err = rt.state.SetActor(address, &types.Actor{
Code: codeID, Code: codeID,
@ -267,7 +267,7 @@ func (rt *Runtime) CreateActor(codeID cid.Cid, address address.Address) {
} }
func (rt *Runtime) DeleteActor(addr address.Address) { func (rt *Runtime) DeleteActor(addr address.Address) {
rt.ChargeGas(rt.Pricelist().OnDeleteActor()) rt.chargeGas(rt.Pricelist().OnDeleteActor())
act, err := rt.state.GetActor(rt.Message().Receiver()) act, err := rt.state.GetActor(rt.Message().Receiver())
if err != nil { if err != nil {
if xerrors.Is(err, types.ErrActorNotFound) { if xerrors.Is(err, types.ErrActorNotFound) {
@ -408,7 +408,7 @@ func (rt *Runtime) internalSend(from, to address.Address, method abi.MethodNum,
if subrt != nil { if subrt != nil {
rt.numActorsCreated = subrt.numActorsCreated rt.numActorsCreated = subrt.numActorsCreated
} }
rt.executionTrace.Subcalls = append(rt.executionTrace.Subcalls, subrt.executionTrace) //&er) rt.executionTrace.Subcalls = append(rt.executionTrace.Subcalls, subrt.executionTrace)
return ret, errSend return ret, errSend
} }
@ -496,7 +496,15 @@ func (rt *Runtime) finilizeGasTracing() {
} }
} }
func (rt *Runtime) ChargeGas(gas GasCharge) { // ChargeGas is spec actors function
func (rt *Runtime) ChargeGas(name string, compute int64, virtual int64) {
err := rt.chargeGasInternal(newGasCharge(name, compute, 0).WithVirtual(virtual, 0), 1)
if err != nil {
panic(err)
}
}
func (rt *Runtime) chargeGas(gas GasCharge) {
err := rt.chargeGasInternal(gas, 1) err := rt.chargeGasInternal(gas, 1)
if err != nil { if err != nil {
panic(err) panic(err)

View File

@ -241,10 +241,12 @@ func (ss *syscallShim) VerifySignature(sig crypto.Signature, addr address.Addres
return sigs.Verify(&sig, kaddr, input) return sigs.Verify(&sig, kaddr, input)
} }
var BatchSealVerifyParallelism = goruntime.NumCPU()
func (ss *syscallShim) BatchVerifySeals(inp map[address.Address][]abi.SealVerifyInfo) (map[address.Address][]bool, error) { func (ss *syscallShim) BatchVerifySeals(inp map[address.Address][]abi.SealVerifyInfo) (map[address.Address][]bool, error) {
out := make(map[address.Address][]bool) out := make(map[address.Address][]bool)
sema := make(chan struct{}, goruntime.NumCPU()) sema := make(chan struct{}, BatchSealVerifyParallelism)
var wg sync.WaitGroup var wg sync.WaitGroup
for addr, seals := range inp { for addr, seals := range inp {

View File

@ -19,6 +19,7 @@ import (
"github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/builtin/market" "github.com/filecoin-project/specs-actors/actors/builtin/market"
"github.com/filecoin-project/lotus/api"
lapi "github.com/filecoin-project/lotus/api" lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
) )
@ -391,6 +392,10 @@ var clientRetrieveCmd = &cli.Command{
Name: "car", Name: "car",
Usage: "export to a car file instead of a regular file", Usage: "export to a car file instead of a regular file",
}, },
&cli.StringFlag{
Name: "miner",
Usage: "miner address for retrieval, if not present it'll use local discovery",
},
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
if cctx.NArg() != 2 { if cctx.NArg() != 2 {
@ -398,7 +403,7 @@ var clientRetrieveCmd = &cli.Command{
return nil return nil
} }
api, closer, err := GetFullNodeAPI(cctx) fapi, closer, err := GetFullNodeAPI(cctx)
if err != nil { if err != nil {
return err return err
} }
@ -409,7 +414,7 @@ var clientRetrieveCmd = &cli.Command{
if cctx.String("address") != "" { if cctx.String("address") != "" {
payer, err = address.NewFromString(cctx.String("address")) payer, err = address.NewFromString(cctx.String("address"))
} else { } else {
payer, err = api.WalletDefaultAddress(ctx) payer, err = fapi.WalletDefaultAddress(ctx)
} }
if err != nil { if err != nil {
return err return err
@ -432,23 +437,39 @@ var clientRetrieveCmd = &cli.Command{
return nil return nil
}*/ // TODO: fix }*/ // TODO: fix
offers, err := api.ClientFindData(ctx, file) var offer api.QueryOffer
minerStrAddr := cctx.String("miner")
if minerStrAddr == "" { // Local discovery
offers, err := fapi.ClientFindData(ctx, file)
if err != nil { if err != nil {
return err return err
} }
// TODO: parse offer strings from `client find`, make this smarter // TODO: parse offer strings from `client find`, make this smarter
if len(offers) < 1 { if len(offers) < 1 {
fmt.Println("Failed to find file") fmt.Println("Failed to find file")
return nil return nil
} }
offer = offers[0]
} else { // Directed retrieval
minerAddr, err := address.NewFromString(minerStrAddr)
if err != nil {
return err
}
offer, err = fapi.ClientMinerQueryOffer(ctx, file, minerAddr)
if err != nil {
return err
}
}
if offer.Err != "" {
return fmt.Errorf("The received offer errored: %s", offer.Err)
}
ref := &lapi.FileRef{ ref := &lapi.FileRef{
Path: cctx.Args().Get(1), Path: cctx.Args().Get(1),
IsCAR: cctx.Bool("car"), IsCAR: cctx.Bool("car"),
} }
if err := api.ClientRetrieve(ctx, offers[0].Order(payer), ref); err != nil { if err := fapi.ClientRetrieve(ctx, offer.Order(payer), ref); err != nil {
return xerrors.Errorf("Retrieval Failed: %w", err) return xerrors.Errorf("Retrieval Failed: %w", err)
} }

View File

@ -4,8 +4,11 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io"
"io/ioutil" "io/ioutil"
"math"
"os" "os"
"runtime"
"runtime/pprof" "runtime/pprof"
"sort" "sort"
"time" "time"
@ -44,8 +47,14 @@ var importBenchCmd = &cli.Command{
Name: "height", Name: "height",
Usage: "halt validation after given height", Usage: "halt validation after given height",
}, },
&cli.IntFlag{
Name: "batch-seal-verify-threads",
Usage: "set the parallelism factor for batch seal verification",
Value: runtime.NumCPU(),
},
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
vm.BatchSealVerifyParallelism = cctx.Int("batch-seal-verify-threads")
if !cctx.Args().Present() { if !cctx.Args().Present() {
fmt.Println("must pass car file of chain to benchmark importing") fmt.Println("must pass car file of chain to benchmark importing")
return nil return nil
@ -111,14 +120,22 @@ var importBenchCmd = &cli.Command{
ts = next ts = next
} }
out := make([]TipSetExec, 0, len(tschain)) ibj, err := os.Create("import-bench.json")
if err != nil {
return err
}
defer ibj.Close() //nolint:errcheck
enc := json.NewEncoder(ibj)
var lastTse *TipSetExec
lastState := tschain[len(tschain)-1].ParentState() lastState := tschain[len(tschain)-1].ParentState()
for i := len(tschain) - 2; i >= 0; i-- { for i := len(tschain) - 2; i >= 0; i-- {
cur := tschain[i] cur := tschain[i]
log.Infof("computing state (height: %d, ts=%s)", cur.Height(), cur.Cids()) log.Infof("computing state (height: %d, ts=%s)", cur.Height(), cur.Cids())
if cur.ParentState() != lastState { if cur.ParentState() != lastState {
lastTrace := out[len(out)-1].Trace lastTrace := lastTse.Trace
d, err := json.MarshalIndent(lastTrace, "", " ") d, err := json.MarshalIndent(lastTrace, "", " ")
if err != nil { if err != nil {
panic(err) panic(err)
@ -132,36 +149,98 @@ var importBenchCmd = &cli.Command{
if err != nil { if err != nil {
return err return err
} }
out = append(out, TipSetExec{ stripCallers(trace)
lastTse = &TipSetExec{
TipSet: cur.Key(), TipSet: cur.Key(),
Trace: trace, Trace: trace,
Duration: time.Since(start), Duration: time.Since(start),
}) }
lastState = st lastState = st
if err := enc.Encode(lastTse); err != nil {
return xerrors.Errorf("failed to write out tipsetexec: %w", err)
}
} }
pprof.StopCPUProfile() pprof.StopCPUProfile()
ibj, err := os.Create("import-bench.json")
if err != nil {
return err
}
defer ibj.Close() //nolint:errcheck
if err := json.NewEncoder(ibj).Encode(out); err != nil {
return err
}
return nil return nil
}, },
} }
func walkExecutionTrace(et *types.ExecutionTrace) {
for _, gc := range et.GasCharges {
gc.Callers = nil
}
for _, sub := range et.Subcalls {
walkExecutionTrace(&sub) //nolint:scopelint,gosec
}
}
func stripCallers(trace []*api.InvocResult) {
for _, t := range trace {
walkExecutionTrace(&t.ExecutionTrace)
}
}
type Invocation struct { type Invocation struct {
TipSet types.TipSetKey TipSet types.TipSetKey
Invoc *api.InvocResult Invoc *api.InvocResult
} }
const GasPerNs = 10
func countGasCosts(et *types.ExecutionTrace) (int64, int64) {
var cgas, vgas int64
for _, gc := range et.GasCharges {
cgas += gc.ComputeGas
vgas += gc.VirtualComputeGas
}
for _, sub := range et.Subcalls {
c, v := countGasCosts(&sub)
cgas += c
vgas += v
}
return cgas, vgas
}
func compStats(vals []float64) (float64, float64) {
var sum float64
for _, v := range vals {
sum += v
}
av := sum / float64(len(vals))
var varsum float64
for _, v := range vals {
delta := av - v
varsum += delta * delta
}
return av, math.Sqrt(varsum / float64(len(vals)))
}
func tallyGasCharges(charges map[string][]float64, et *types.ExecutionTrace) {
for _, gc := range et.GasCharges {
compGas := gc.ComputeGas + gc.VirtualComputeGas
ratio := float64(compGas) / float64(gc.TimeTaken.Nanoseconds())
charges[gc.Name] = append(charges[gc.Name], 1/(ratio/GasPerNs))
//fmt.Printf("%s: %d, %s: %0.2f\n", gc.Name, compGas, gc.TimeTaken, 1/(ratio/GasPerNs))
for _, sub := range et.Subcalls {
tallyGasCharges(charges, &sub)
}
}
}
var importAnalyzeCmd = &cli.Command{ var importAnalyzeCmd = &cli.Command{
Name: "analyze", Name: "analyze",
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
@ -176,9 +255,18 @@ var importAnalyzeCmd = &cli.Command{
} }
var results []TipSetExec var results []TipSetExec
if err := json.NewDecoder(fi).Decode(&results); err != nil { for {
var tse TipSetExec
if err := json.NewDecoder(fi).Decode(&tse); err != nil {
if err != io.EOF {
return err return err
} }
break
}
results = append(results, tse)
}
chargeDeltas := make(map[string][]float64)
var invocs []Invocation var invocs []Invocation
var totalTime time.Duration var totalTime time.Duration
@ -191,9 +279,29 @@ var importAnalyzeCmd = &cli.Command{
TipSet: r.TipSet, TipSet: r.TipSet,
Invoc: inv, Invoc: inv,
}) })
cgas, vgas := countGasCosts(&inv.ExecutionTrace)
fmt.Printf("Invocation: %d %s: %s %d -> %0.2f\n", inv.Msg.Method, inv.Msg.To, inv.Duration, cgas+vgas, float64(GasPerNs*inv.Duration.Nanoseconds())/float64(cgas+vgas))
tallyGasCharges(chargeDeltas, &inv.ExecutionTrace)
} }
} }
var keys []string
for k := range chargeDeltas {
keys = append(keys, k)
}
fmt.Println("Gas Price Deltas")
sort.Strings(keys)
for _, k := range keys {
vals := chargeDeltas[k]
av, stdev := compStats(vals)
fmt.Printf("%s: incr by %f (%f)\n", k, av, stdev)
}
sort.Slice(invocs, func(i, j int) bool { sort.Slice(invocs, func(i, j int) bool {
return invocs[i].Invoc.Duration > invocs[j].Invoc.Duration return invocs[i].Invoc.Duration > invocs[j].Invoc.Duration
}) })

View File

@ -139,6 +139,10 @@ var sealBenchCmd = &cli.Command{
Name: "num-sectors", Name: "num-sectors",
Value: 1, Value: 1,
}, },
&cli.IntFlag{
Name: "parallel",
Value: 1,
},
}, },
Action: func(c *cli.Context) error { Action: func(c *cli.Context) error {
if c.Bool("no-gpu") { if c.Bool("no-gpu") {
@ -235,7 +239,12 @@ var sealBenchCmd = &cli.Command{
if robench == "" { if robench == "" {
var err error var err error
sealTimings, sealedSectors, err = runSeals(sb, sbfs, c.Int("num-sectors"), mid, sectorSize, []byte(c.String("ticket-preimage")), c.String("save-commit2-input"), c.Bool("skip-commit2"), c.Bool("skip-unseal")) parCfg := ParCfg{
PreCommit1: c.Int("parallel"),
PreCommit2: 1,
Commit: 1,
}
sealTimings, sealedSectors, err = runSeals(sb, sbfs, c.Int("num-sectors"), parCfg, mid, sectorSize, []byte(c.String("ticket-preimage")), c.String("save-commit2-input"), c.Bool("skip-commit2"), c.Bool("skip-unseal"))
if err != nil { if err != nil {
return xerrors.Errorf("failed to run seals: %w", err) return xerrors.Errorf("failed to run seals: %w", err)
} }
@ -307,7 +316,7 @@ var sealBenchCmd = &cli.Command{
return err return err
} }
winnnigpost1 := time.Now() winningpost1 := time.Now()
log.Info("computing winning post snark (hot)") log.Info("computing winning post snark (hot)")
proof2, err := sb.GenerateWinningPoSt(context.TODO(), mid, candidates, challenge[:]) proof2, err := sb.GenerateWinningPoSt(context.TODO(), mid, candidates, challenge[:])
@ -331,7 +340,7 @@ var sealBenchCmd = &cli.Command{
log.Error("post verification failed") log.Error("post verification failed")
} }
verifyWinnnigPost1 := time.Now() verifyWinningPost1 := time.Now()
pvi2 := abi.WinningPoStVerifyInfo{ pvi2 := abi.WinningPoStVerifyInfo{
Randomness: abi.PoStRandomness(challenge[:]), Randomness: abi.PoStRandomness(challenge[:]),
@ -398,10 +407,10 @@ var sealBenchCmd = &cli.Command{
verifyWindowpost2 := time.Now() verifyWindowpost2 := time.Now()
bo.PostGenerateCandidates = gencandidates.Sub(beforePost) bo.PostGenerateCandidates = gencandidates.Sub(beforePost)
bo.PostWinningProofCold = winnnigpost1.Sub(gencandidates) bo.PostWinningProofCold = winningpost1.Sub(gencandidates)
bo.PostWinningProofHot = winnningpost2.Sub(winnnigpost1) bo.PostWinningProofHot = winnningpost2.Sub(winningpost1)
bo.VerifyWinningPostCold = verifyWinnnigPost1.Sub(winnningpost2) bo.VerifyWinningPostCold = verifyWinningPost1.Sub(winnningpost2)
bo.VerifyWinningPostHot = verifyWinningPost2.Sub(verifyWinnnigPost1) bo.VerifyWinningPostHot = verifyWinningPost2.Sub(verifyWinningPost1)
bo.PostWindowProofCold = windowpost1.Sub(verifyWinningPost2) bo.PostWindowProofCold = windowpost1.Sub(verifyWinningPost2)
bo.PostWindowProofHot = windowpost2.Sub(windowpost1) bo.PostWindowProofHot = windowpost2.Sub(windowpost1)
@ -432,10 +441,10 @@ var sealBenchCmd = &cli.Command{
} }
if !c.Bool("skip-commit2") { if !c.Bool("skip-commit2") {
fmt.Printf("generate candidates: %s (%s)\n", bo.PostGenerateCandidates, bps(bo.SectorSize*abi.SectorSize(len(bo.SealingResults)), bo.PostGenerateCandidates)) fmt.Printf("generate candidates: %s (%s)\n", bo.PostGenerateCandidates, bps(bo.SectorSize*abi.SectorSize(len(bo.SealingResults)), bo.PostGenerateCandidates))
fmt.Printf("compute winnnig post proof (cold): %s\n", bo.PostWinningProofCold) fmt.Printf("compute winning post proof (cold): %s\n", bo.PostWinningProofCold)
fmt.Printf("compute winnnig post proof (hot): %s\n", bo.PostWinningProofHot) fmt.Printf("compute winning post proof (hot): %s\n", bo.PostWinningProofHot)
fmt.Printf("verify winnnig post proof (cold): %s\n", bo.VerifyWinningPostCold) fmt.Printf("verify winning post proof (cold): %s\n", bo.VerifyWinningPostCold)
fmt.Printf("verify winnnig post proof (hot): %s\n\n", bo.VerifyWinningPostHot) fmt.Printf("verify winning post proof (hot): %s\n\n", bo.VerifyWinningPostHot)
fmt.Printf("compute window post proof (cold): %s\n", bo.PostWindowProofCold) fmt.Printf("compute window post proof (cold): %s\n", bo.PostWindowProofCold)
fmt.Printf("compute window post proof (hot): %s\n", bo.PostWindowProofHot) fmt.Printf("compute window post proof (hot): %s\n", bo.PostWindowProofHot)
@ -447,9 +456,23 @@ var sealBenchCmd = &cli.Command{
}, },
} }
func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, mid abi.ActorID, sectorSize abi.SectorSize, ticketPreimage []byte, saveC2inp string, skipc2, skipunseal bool) ([]SealingResult, []abi.SectorInfo, error) { type ParCfg struct {
var sealTimings []SealingResult PreCommit1 int
var sealedSectors []abi.SectorInfo PreCommit2 int
Commit int
}
func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par ParCfg, mid abi.ActorID, sectorSize abi.SectorSize, ticketPreimage []byte, saveC2inp string, skipc2, skipunseal bool) ([]SealingResult, []abi.SectorInfo, error) {
var pieces []abi.PieceInfo
sealTimings := make([]SealingResult, numSectors)
sealedSectors := make([]abi.SectorInfo, numSectors)
preCommit2Sema := make(chan struct{}, par.PreCommit2)
commitSema := make(chan struct{}, par.Commit)
if numSectors%par.PreCommit1 != 0 {
return nil, nil, fmt.Errorf("parallelism factor must cleanly divide numSectors")
}
for i := abi.SectorNumber(1); i <= abi.SectorNumber(numSectors); i++ { for i := abi.SectorNumber(1); i <= abi.SectorNumber(numSectors); i++ {
sid := abi.SectorID{ sid := abi.SectorID{
@ -458,7 +481,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, mid
} }
start := time.Now() start := time.Now()
log.Info("Writing piece into sector...") log.Infof("[%d] Writing piece into sector...", i)
r := rand.New(rand.NewSource(100 + int64(i))) r := rand.New(rand.NewSource(100 + int64(i)))
@ -467,48 +490,73 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, mid
return nil, nil, err return nil, nil, err
} }
addpiece := time.Now() pieces = append(pieces, pi)
sealTimings[i-1].AddPiece = time.Since(start)
}
sectorsPerWorker := numSectors / par.PreCommit1
errs := make(chan error, par.PreCommit1)
for wid := 0; wid < par.PreCommit1; wid++ {
go func(worker int) {
sealerr := func() error {
start := 1 + (worker * sectorsPerWorker)
end := start + sectorsPerWorker
for i := abi.SectorNumber(start); i < abi.SectorNumber(end); i++ {
ix := int(i - 1)
sid := abi.SectorID{
Miner: mid,
Number: i,
}
start := time.Now()
trand := blake2b.Sum256(ticketPreimage) trand := blake2b.Sum256(ticketPreimage)
ticket := abi.SealRandomness(trand[:]) ticket := abi.SealRandomness(trand[:])
log.Info("Running replication(1)...") log.Infof("[%d] Running replication(1)...", i)
pieces := []abi.PieceInfo{pi} pieces := []abi.PieceInfo{pieces[ix]}
pc1o, err := sb.SealPreCommit1(context.TODO(), sid, ticket, pieces) pc1o, err := sb.SealPreCommit1(context.TODO(), sid, ticket, pieces)
if err != nil { if err != nil {
return nil, nil, xerrors.Errorf("commit: %w", err) return xerrors.Errorf("commit: %w", err)
} }
precommit1 := time.Now() precommit1 := time.Now()
log.Info("Running replication(2)...") preCommit2Sema <- struct{}{}
pc2Start := time.Now()
log.Infof("[%d] Running replication(2)...", i)
cids, err := sb.SealPreCommit2(context.TODO(), sid, pc1o) cids, err := sb.SealPreCommit2(context.TODO(), sid, pc1o)
if err != nil { if err != nil {
return nil, nil, xerrors.Errorf("commit: %w", err) return xerrors.Errorf("commit: %w", err)
} }
precommit2 := time.Now() precommit2 := time.Now()
<-preCommit2Sema
sealedSectors = append(sealedSectors, abi.SectorInfo{ sealedSectors[ix] = abi.SectorInfo{
SealProof: sb.SealProofType(), SealProof: sb.SealProofType(),
SectorNumber: i, SectorNumber: i,
SealedCID: cids.Sealed, SealedCID: cids.Sealed,
}) }
seed := lapi.SealSeed{ seed := lapi.SealSeed{
Epoch: 101, Epoch: 101,
Value: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 255}, Value: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 255},
} }
log.Info("Generating PoRep for sector (1)") commitSema <- struct{}{}
commitStart := time.Now()
log.Infof("[%d] Generating PoRep for sector (1)", i)
c1o, err := sb.SealCommit1(context.TODO(), sid, ticket, seed.Value, pieces, cids) c1o, err := sb.SealCommit1(context.TODO(), sid, ticket, seed.Value, pieces, cids)
if err != nil { if err != nil {
return nil, nil, err return err
} }
sealcommit1 := time.Now() sealcommit1 := time.Now()
log.Info("Generating PoRep for sector (2)") log.Infof("[%d] Generating PoRep for sector (2)", i)
if saveC2inp != "" { if saveC2inp != "" {
c2in := Commit2In{ c2in := Commit2In{
@ -519,7 +567,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, mid
b, err := json.Marshal(&c2in) b, err := json.Marshal(&c2in)
if err != nil { if err != nil {
return nil, nil, err return err
} }
if err := ioutil.WriteFile(saveC2inp, b, 0664); err != nil { if err := ioutil.WriteFile(saveC2inp, b, 0664); err != nil {
@ -531,11 +579,12 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, mid
if !skipc2 { if !skipc2 {
proof, err = sb.SealCommit2(context.TODO(), sid, c1o) proof, err = sb.SealCommit2(context.TODO(), sid, c1o)
if err != nil { if err != nil {
return nil, nil, err return err
} }
} }
sealcommit2 := time.Now() sealcommit2 := time.Now()
<-commitSema
if !skipc2 { if !skipc2 {
svi := abi.SealVerifyInfo{ svi := abi.SealVerifyInfo{
@ -551,45 +600,58 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, mid
ok, err := ffiwrapper.ProofVerifier.VerifySeal(svi) ok, err := ffiwrapper.ProofVerifier.VerifySeal(svi)
if err != nil { if err != nil {
return nil, nil, err return err
} }
if !ok { if !ok {
return nil, nil, xerrors.Errorf("porep proof for sector %d was invalid", i) return xerrors.Errorf("porep proof for sector %d was invalid", i)
} }
} }
verifySeal := time.Now() verifySeal := time.Now()
if !skipunseal { if !skipunseal {
log.Info("Unsealing sector") log.Infof("[%d] Unsealing sector", i)
{ {
p, done, err := sbfs.AcquireSector(context.TODO(), abi.SectorID{Miner: mid, Number: 1}, stores.FTUnsealed, stores.FTNone, true) p, done, err := sbfs.AcquireSector(context.TODO(), abi.SectorID{Miner: mid, Number: 1}, stores.FTUnsealed, stores.FTNone, true)
if err != nil { if err != nil {
return nil, nil, xerrors.Errorf("acquire unsealed sector for removing: %w", err) return xerrors.Errorf("acquire unsealed sector for removing: %w", err)
} }
done() done()
if err := os.Remove(p.Unsealed); err != nil { if err := os.Remove(p.Unsealed); err != nil {
return nil, nil, xerrors.Errorf("removing unsealed sector: %w", err) return xerrors.Errorf("removing unsealed sector: %w", err)
} }
} }
err := sb.UnsealPiece(context.TODO(), abi.SectorID{Miner: mid, Number: 1}, 0, abi.PaddedPieceSize(sectorSize).Unpadded(), ticket, cids.Unsealed) err := sb.UnsealPiece(context.TODO(), abi.SectorID{Miner: mid, Number: 1}, 0, abi.PaddedPieceSize(sectorSize).Unpadded(), ticket, cids.Unsealed)
if err != nil { if err != nil {
return nil, nil, err return err
} }
} }
unseal := time.Now() unseal := time.Now()
sealTimings = append(sealTimings, SealingResult{ sealTimings[ix].PreCommit1 = precommit1.Sub(start)
AddPiece: addpiece.Sub(start), sealTimings[ix].PreCommit2 = precommit2.Sub(pc2Start)
PreCommit1: precommit1.Sub(addpiece), sealTimings[ix].Commit1 = sealcommit1.Sub(commitStart)
PreCommit2: precommit2.Sub(precommit1), sealTimings[ix].Commit2 = sealcommit2.Sub(sealcommit1)
Commit1: sealcommit1.Sub(precommit2), sealTimings[ix].Verify = verifySeal.Sub(sealcommit2)
Commit2: sealcommit2.Sub(sealcommit1), sealTimings[ix].Unseal = unseal.Sub(verifySeal)
Verify: verifySeal.Sub(sealcommit2), }
Unseal: unseal.Sub(verifySeal), return nil
}) }()
if sealerr != nil {
errs <- sealerr
return
}
errs <- nil
}(wid)
}
for i := 0; i < par.PreCommit1; i++ {
err := <-errs
if err != nil {
return nil, nil, err
}
} }
return sealTimings, sealedSectors, nil return sealTimings, sealedSectors, nil

View File

@ -118,7 +118,7 @@ create unique index if not exists block_cid_uindex
create materialized view if not exists state_heights create materialized view if not exists state_heights
as select distinct height, parentstateroot from blocks; as select distinct height, parentstateroot from blocks;
create unique index if not exists state_heights_uindex create index if not exists state_heights_index
on state_heights (height); on state_heights (height);
create index if not exists state_heights_height_index create index if not exists state_heights_height_index

View File

@ -53,6 +53,7 @@ type minerKey struct {
addr address.Address addr address.Address
act types.Actor act types.Actor
stateroot cid.Cid stateroot cid.Cid
tsKey types.TipSetKey
} }
type minerInfo struct { type minerInfo struct {
@ -66,10 +67,11 @@ type minerInfo struct {
type actorInfo struct { type actorInfo struct {
stateroot cid.Cid stateroot cid.Cid
tsKey types.TipSetKey
state string state string
} }
func syncHead(ctx context.Context, api api.FullNode, st *storage, ts *types.TipSet, maxBatch int) { func syncHead(ctx context.Context, api api.FullNode, st *storage, headTs *types.TipSet, maxBatch int) {
var alk sync.Mutex var alk sync.Mutex
log.Infof("Getting synced block list") log.Infof("Getting synced block list")
@ -81,7 +83,7 @@ func syncHead(ctx context.Context, api api.FullNode, st *storage, ts *types.TipS
allToSync := map[cid.Cid]*types.BlockHeader{} allToSync := map[cid.Cid]*types.BlockHeader{}
toVisit := list.New() toVisit := list.New()
for _, header := range ts.Blocks() { for _, header := range headTs.Blocks() {
toVisit.PushBack(header) toVisit.PushBack(header)
} }
@ -116,7 +118,7 @@ func syncHead(ctx context.Context, api api.FullNode, st *storage, ts *types.TipS
for len(allToSync) > 0 { for len(allToSync) > 0 {
actors := map[address.Address]map[types.Actor]actorInfo{} actors := map[address.Address]map[types.Actor]actorInfo{}
addresses := map[address.Address]address.Address{} addressToID := map[address.Address]address.Address{}
minH := abi.ChainEpoch(math.MaxInt64) minH := abi.ChainEpoch(math.MaxInt64)
for _, header := range allToSync { for _, header := range allToSync {
@ -129,7 +131,7 @@ func syncHead(ctx context.Context, api api.FullNode, st *storage, ts *types.TipS
for c, header := range allToSync { for c, header := range allToSync {
if header.Height < minH+abi.ChainEpoch(maxBatch) { if header.Height < minH+abi.ChainEpoch(maxBatch) {
toSync[c] = header toSync[c] = header
addresses[header.Miner] = address.Undef addressToID[header.Miner] = address.Undef
} }
} }
for c := range toSync { for c := range toSync {
@ -146,20 +148,20 @@ func syncHead(ctx context.Context, api api.FullNode, st *storage, ts *types.TipS
} }
if len(bh.Parents) == 0 { // genesis case if len(bh.Parents) == 0 { // genesis case
ts, _ := types.NewTipSet([]*types.BlockHeader{bh}) genesisTs, _ := types.NewTipSet([]*types.BlockHeader{bh})
aadrs, err := api.StateListActors(ctx, ts.Key()) aadrs, err := api.StateListActors(ctx, genesisTs.Key())
if err != nil { if err != nil {
log.Error(err) log.Error(err)
return return
} }
parmap.Par(50, aadrs, func(addr address.Address) { parmap.Par(50, aadrs, func(addr address.Address) {
act, err := api.StateGetActor(ctx, addr, ts.Key()) act, err := api.StateGetActor(ctx, addr, genesisTs.Key())
if err != nil { if err != nil {
log.Error(err) log.Error(err)
return return
} }
ast, err := api.StateReadState(ctx, act, ts.Key()) ast, err := api.StateReadState(ctx, act, genesisTs.Key())
if err != nil { if err != nil {
log.Error(err) log.Error(err)
return return
@ -177,9 +179,10 @@ func syncHead(ctx context.Context, api api.FullNode, st *storage, ts *types.TipS
} }
actors[addr][*act] = actorInfo{ actors[addr][*act] = actorInfo{
stateroot: bh.ParentStateRoot, stateroot: bh.ParentStateRoot,
tsKey: genesisTs.Key(),
state: string(state), state: string(state),
} }
addresses[addr] = address.Undef addressToID[addr] = address.Undef
alk.Unlock() alk.Unlock()
}) })
@ -206,11 +209,13 @@ func syncHead(ctx context.Context, api api.FullNode, st *storage, ts *types.TipS
log.Error(err) log.Error(err)
return return
} }
ast, err := api.StateReadState(ctx, &act, pts.Key()) ast, err := api.StateReadState(ctx, &act, pts.Key())
if err != nil { if err != nil {
log.Error(err) log.Error(err)
return return
} }
state, err := json.Marshal(ast.State) state, err := json.Marshal(ast.State)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
@ -225,8 +230,9 @@ func syncHead(ctx context.Context, api api.FullNode, st *storage, ts *types.TipS
actors[addr][act] = actorInfo{ actors[addr][act] = actorInfo{
stateroot: bh.ParentStateRoot, stateroot: bh.ParentStateRoot,
state: string(state), state: string(state),
tsKey: pts.Key(),
} }
addresses[addr] = address.Undef addressToID[addr] = address.Undef
alk.Unlock() alk.Unlock()
} }
}) })
@ -238,18 +244,20 @@ func syncHead(ctx context.Context, api api.FullNode, st *storage, ts *types.TipS
log.Infof("Resolving addresses") log.Infof("Resolving addresses")
for _, message := range msgs { for _, message := range msgs {
addresses[message.To] = address.Undef addressToID[message.To] = address.Undef
addresses[message.From] = address.Undef addressToID[message.From] = address.Undef
} }
parmap.Par(50, parmap.KMapArr(addresses), func(addr address.Address) { parmap.Par(50, parmap.KMapArr(addressToID), func(addr address.Address) {
// FIXME: cannot use EmptyTSK here since actorID's can change during reorgs, need to use the corresponding tipset.
// TODO: figure out a way to get the corresponding tipset...
raddr, err := api.StateLookupID(ctx, addr, types.EmptyTSK) raddr, err := api.StateLookupID(ctx, addr, types.EmptyTSK)
if err != nil { if err != nil {
log.Warn(err) log.Warn(err)
return return
} }
alk.Lock() alk.Lock()
addresses[addr] = raddr addressToID[addr] = raddr
alk.Unlock() alk.Unlock()
}) })
@ -267,6 +275,7 @@ func syncHead(ctx context.Context, api api.FullNode, st *storage, ts *types.TipS
addr: addr, addr: addr,
act: actor, act: actor,
stateroot: c.stateroot, stateroot: c.stateroot,
tsKey: c.tsKey,
}] = &minerInfo{} }] = &minerInfo{}
} }
} }
@ -274,14 +283,17 @@ func syncHead(ctx context.Context, api api.FullNode, st *storage, ts *types.TipS
parmap.Par(50, parmap.KVMapArr(miners), func(it func() (minerKey, *minerInfo)) { parmap.Par(50, parmap.KVMapArr(miners), func(it func() (minerKey, *minerInfo)) {
k, info := it() k, info := it()
pow, err := api.StateMinerPower(ctx, k.addr, types.EmptyTSK) // TODO: get the storage power actors state and and pull the miner power from there, currently this hits the
// storage power actor once for each miner for each tipset, we can do better by just getting it for each tipset
// and reading each miner power from the result.
pow, err := api.StateMinerPower(ctx, k.addr, k.tsKey)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
// Not sure why this would fail, but its probably worth continuing // Not sure why this would fail, but its probably worth continuing
} }
info.power = pow.MinerPower.QualityAdjPower info.power = pow.MinerPower.QualityAdjPower
sszs, err := api.StateMinerSectorCount(ctx, k.addr, types.EmptyTSK) sszs, err := api.StateMinerSectorCount(ctx, k.addr, k.tsKey)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
return return
@ -316,7 +328,7 @@ func syncHead(ctx context.Context, api api.FullNode, st *storage, ts *types.TipS
log.Info("Storing address mapping") log.Info("Storing address mapping")
if err := st.storeAddressMap(addresses); err != nil { if err := st.storeAddressMap(addressToID); err != nil {
log.Error(err) log.Error(err)
return return
} }
@ -361,7 +373,7 @@ func syncHead(ctx context.Context, api api.FullNode, st *storage, ts *types.TipS
log.Infof("Get deals") log.Infof("Get deals")
// TODO: incremental, gather expired // TODO: incremental, gather expired
deals, err := api.StateMarketDeals(ctx, ts.Key()) deals, err := api.StateMarketDeals(ctx, headTs.Key())
if err != nil { if err != nil {
log.Error(err) log.Error(err)
return return

View File

@ -3,11 +3,14 @@ package main
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"fmt"
"io/ioutil" "io/ioutil"
"net" "net"
"net/http" "net/http"
"os" "os"
"path/filepath" "path/filepath"
"syscall"
"time"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/gorilla/mux" "github.com/gorilla/mux"
@ -117,11 +120,19 @@ var runCmd = &cli.Command{
} }
// Connect to storage-miner // Connect to storage-miner
var nodeApi api.StorageMiner
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) var closer func()
if err != nil { var err error
return xerrors.Errorf("getting miner api: %w", err) for {
nodeApi, closer, err = lcli.GetStorageMinerAPI(cctx)
if err == nil {
break
} }
fmt.Printf("\r\x1b[0KConnecting to miner API... (%s)", err)
time.Sleep(time.Second)
continue
}
defer closer() defer closer()
ctx := lcli.ReqContext(cctx) ctx := lcli.ReqContext(cctx)
ctx, cancel := context.WithCancel(ctx) ctx, cancel := context.WithCancel(ctx)
@ -136,6 +147,8 @@ var runCmd = &cli.Command{
} }
log.Infof("Remote version %s", v) log.Infof("Remote version %s", v)
watchMinerConn(ctx, cctx, nodeApi)
// Check params // Check params
act, err := nodeApi.ActorAddress(ctx) act, err := nodeApi.ActorAddress(ctx)
@ -317,3 +330,42 @@ var runCmd = &cli.Command{
return srv.Serve(nl) return srv.Serve(nl)
}, },
} }
func watchMinerConn(ctx context.Context, cctx *cli.Context, nodeApi api.StorageMiner) {
go func() {
closing, err := nodeApi.Closing(ctx)
if err != nil {
log.Errorf("failed to get remote closing channel: %+v", err)
}
select {
case <-closing:
case <-ctx.Done():
}
if ctx.Err() != nil {
return // graceful shutdown
}
log.Warnf("Connection with miner node lost, restarting")
exe, err := os.Executable()
if err != nil {
log.Errorf("getting executable for auto-restart: %+v", err)
}
log.Sync()
// TODO: there are probably cleaner/more graceful ways to restart,
// but this is good enough for now (FSM can recover from the mess this creates)
if err := syscall.Exec(exe, []string{exe, "run",
fmt.Sprintf("--address=%s", cctx.String("address")),
fmt.Sprintf("--no-local-storage=%t", cctx.Bool("no-local-storage")),
fmt.Sprintf("--precommit1=%t", cctx.Bool("precommit1")),
fmt.Sprintf("--precommit2=%t", cctx.Bool("precommit2")),
fmt.Sprintf("--commit=%t", cctx.Bool("commit")),
}, os.Environ()); err != nil {
fmt.Println(err)
}
}()
}

View File

@ -88,7 +88,7 @@ func PreSeal(maddr address.Address, spt abi.RegisteredSealProof, offset abi.Sect
return nil, nil, xerrors.Errorf("commit: %w", err) return nil, nil, xerrors.Errorf("commit: %w", err)
} }
if err := sb.FinalizeSector(context.TODO(), sid); err != nil { if err := sb.FinalizeSector(context.TODO(), sid, nil); err != nil {
return nil, nil, xerrors.Errorf("trim cache: %w", err) return nil, nil, xerrors.Errorf("trim cache: %w", err)
} }

View File

@ -0,0 +1,94 @@
package main
import (
"fmt"
ma "github.com/multiformats/go-multiaddr"
"github.com/urfave/cli/v2"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
)
var actorCmd = &cli.Command{
Name: "actor",
Usage: "manipulate the miner actor",
Subcommands: []*cli.Command{
actorSetAddrsCmd,
},
}
var actorSetAddrsCmd = &cli.Command{
Name: "set-addrs",
Usage: "set addresses that your miner can be publically dialed on",
Flags: []cli.Flag{
&cli.Int64Flag{
Name: "gas-limit",
Usage: "set gas limit",
Value: 100000,
},
},
Action: func(cctx *cli.Context) error {
nodeAPI, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
api, acloser, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer acloser()
ctx := lcli.ReqContext(cctx)
var addrs []abi.Multiaddrs
for _, a := range cctx.Args().Slice() {
maddr, err := ma.NewMultiaddr(a)
if err != nil {
return fmt.Errorf("failed to parse %q as a multiaddr: %w", a, err)
}
addrs = append(addrs, maddr.Bytes())
}
maddr, err := nodeAPI.ActorAddress(ctx)
if err != nil {
return err
}
minfo, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
if err != nil {
return err
}
params, err := actors.SerializeParams(&miner.ChangeMultiaddrsParams{NewMultiaddrs: addrs})
if err != nil {
return err
}
gasLimit := cctx.Int64("gas-limit")
smsg, err := api.MpoolPushMessage(ctx, &types.Message{
To: maddr,
From: minfo.Worker,
Value: types.NewInt(0),
GasPrice: types.NewInt(1),
GasLimit: gasLimit,
Method: 18,
Params: params,
})
if err != nil {
return err
}
fmt.Printf("Requested multiaddrs change in message %s\n", smsg.Cid())
return nil
},
}

View File

@ -12,6 +12,7 @@ import (
"golang.org/x/xerrors" "golang.org/x/xerrors"
"github.com/filecoin-project/specs-actors/actors/builtin/miner" "github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/filecoin-project/specs-actors/actors/builtin/power"
sealing "github.com/filecoin-project/storage-fsm" sealing "github.com/filecoin-project/storage-fsm"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
@ -119,6 +120,9 @@ var infoCmd = &cli.Command{
faultyPercentage) faultyPercentage)
} }
if pow.MinerPower.RawBytePower.LessThan(power.ConsensusMinerMinPower) {
fmt.Print("Below minimum power threshold, no blocks will be won")
} else {
expWinChance := float64(types.BigMul(qpercI, types.NewInt(build.BlocksPerEpoch)).Int64()) / 1000000 expWinChance := float64(types.BigMul(qpercI, types.NewInt(build.BlocksPerEpoch)).Int64()) / 1000000
if expWinChance > 0 { if expWinChance > 0 {
if expWinChance > 1 { if expWinChance > 1 {
@ -130,6 +134,7 @@ var infoCmd = &cli.Command{
fmt.Print("Expected block win rate: ") fmt.Print("Expected block win rate: ")
color.Blue("%.4f/day (every %s)", winPerDay, winRate.Truncate(time.Second)) color.Blue("%.4f/day (every %s)", winPerDay, winRate.Truncate(time.Second))
} }
}
fmt.Println() fmt.Println()

View File

@ -460,7 +460,7 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api lapi.FullNode,
} }
if cerr != nil { if cerr != nil {
return xerrors.Errorf("failed to configure storage miner: %w", err) return xerrors.Errorf("failed to configure storage miner: %w", cerr)
} }
} }

View File

@ -22,7 +22,9 @@ func main() {
lotuslog.SetupLogLevels() lotuslog.SetupLogLevels()
local := []*cli.Command{ local := []*cli.Command{
dealsCmd, actorCmd,
storageDealsCmd,
retrievalDealsCmd,
infoCmd, infoCmd,
initCmd, initCmd,
rewardsCmd, rewardsCmd,
@ -30,7 +32,6 @@ func main() {
stopCmd, stopCmd,
sectorsCmd, sectorsCmd,
storageCmd, storageCmd,
setPriceCmd,
workersCmd, workersCmd,
provingCmd, provingCmd,
} }

View File

@ -1,15 +1,55 @@
package main package main
import ( import (
"bufio"
"encoding/json" "encoding/json"
"fmt" "fmt"
"os"
"path/filepath"
"text/tabwriter"
"time"
"github.com/docker/go-units"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-cidutil/cidenc"
"github.com/multiformats/go-multibase"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli" lcli "github.com/filecoin-project/lotus/cli"
"github.com/ipfs/go-cid"
"github.com/urfave/cli/v2"
) )
var CidBaseFlag = cli.StringFlag{
Name: "cid-base",
Hidden: true,
Value: "base32",
Usage: "Multibase encoding used for version 1 CIDs in output.",
DefaultText: "base32",
}
// GetCidEncoder returns an encoder using the `cid-base` flag if provided, or
// the default (Base32) encoder if not.
func GetCidEncoder(cctx *cli.Context) (cidenc.Encoder, error) {
val := cctx.String("cid-base")
e := cidenc.Encoder{Base: multibase.MustNewEncoder(multibase.Base32)}
if val != "" {
var err error
e.Base, err = multibase.EncoderByName(val)
if err != nil {
return e, err
}
}
return e, nil
}
var enableCmd = &cli.Command{ var enableCmd = &cli.Command{
Name: "enable", Name: "enable",
Usage: "Configure the miner to consider storage deal proposals", Usage: "Configure the miner to consider storage deal proposals",
@ -40,40 +80,156 @@ var disableCmd = &cli.Command{
}, },
} }
var setPriceCmd = &cli.Command{ var setAskCmd = &cli.Command{
Name: "set-price", Name: "set-ask",
Usage: "Set price that miner will accept storage deals at (FIL / GiB / Epoch)", Usage: "Configure the miner's ask",
Flags: []cli.Flag{}, Flags: []cli.Flag{
&cli.Uint64Flag{
Name: "price",
Usage: "Set the price of the ask (specified as FIL / GiB / Epoch) to `PRICE`",
Required: true,
},
&cli.StringFlag{
Name: "duration",
Usage: "Set duration of ask (a quantity of time after which the ask expires) `DURATION`",
DefaultText: "720h0m0s",
Value: "720h0m0s",
},
&cli.StringFlag{
Name: "min-piece-size",
Usage: "Set minimum piece size (w/bit-padding, in bytes) in ask to `SIZE`",
DefaultText: "256B",
Value: "256B",
},
&cli.StringFlag{
Name: "max-piece-size",
Usage: "Set maximum piece size (w/bit-padding, in bytes) in ask to `SIZE`",
DefaultText: "miner sector size",
},
},
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
ctx := lcli.DaemonContext(cctx)
api, closer, err := lcli.GetStorageMinerAPI(cctx) api, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil { if err != nil {
return err return err
} }
defer closer() defer closer()
ctx := lcli.DaemonContext(cctx) pri := types.NewInt(cctx.Uint64("price"))
if !cctx.Args().Present() { dur, err := time.ParseDuration(cctx.String("duration"))
return fmt.Errorf("must specify price to set") if err != nil {
return xerrors.Errorf("cannot parse duration: %w", err)
} }
fp, err := types.ParseFIL(cctx.Args().First()) qty := dur.Seconds() / build.BlockDelay
min, err := units.RAMInBytes(cctx.String("min-piece-size"))
if err != nil {
return xerrors.Errorf("cannot parse min-piece-size to quantity of bytes: %w", err)
}
if min < 256 {
return xerrors.New("minimum piece size (w/bit-padding) is 256B")
}
max, err := units.RAMInBytes(cctx.String("max-piece-size"))
if err != nil {
return xerrors.Errorf("cannot parse max-piece-size to quantity of bytes: %w", err)
}
maddr, err := api.ActorAddress(ctx)
if err != nil { if err != nil {
return err return err
} }
return api.MarketSetPrice(ctx, types.BigInt(fp)) ssize, err := api.ActorSectorSize(ctx, maddr)
if err != nil {
return err
}
smax := int64(ssize)
if max == 0 {
max = smax
}
if max > smax {
return xerrors.Errorf("max piece size (w/bit-padding) %s cannot exceed miner sector size %s", types.SizeStr(types.NewInt(uint64(max))), types.SizeStr(types.NewInt(uint64(smax))))
}
return api.MarketSetAsk(ctx, pri, abi.ChainEpoch(qty), abi.PaddedPieceSize(min), abi.PaddedPieceSize(max))
}, },
} }
var dealsCmd = &cli.Command{ var getAskCmd = &cli.Command{
Name: "deals", Name: "get-ask",
Usage: "interact with your deals", Usage: "Print the miner's ask",
Flags: []cli.Flag{},
Action: func(cctx *cli.Context) error {
ctx := lcli.DaemonContext(cctx)
fnapi, closer, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
smapi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
sask, err := smapi.MarketGetAsk(ctx)
if err != nil {
return err
}
var ask *storagemarket.StorageAsk
if sask != nil && sask.Ask != nil {
ask = sask.Ask
}
w := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0)
fmt.Fprintf(w, "Price per GiB / Epoch\tMin. Piece Size (w/bit-padding)\tMax. Piece Size (w/bit-padding)\tExpiry (Epoch)\tExpiry (Appx. Rem. Time)\tSeq. No.\n")
if ask == nil {
fmt.Fprintf(w, "<miner does not have an ask>\n")
return w.Flush()
}
head, err := fnapi.ChainHead(ctx)
if err != nil {
return err
}
dlt := ask.Expiry - head.Height()
rem := "<expired>"
if dlt > 0 {
rem = (time.Second * time.Duration(dlt*build.BlockDelay)).String()
}
fmt.Fprintf(w, "%s\t%s\t%s\t%d\t%s\t%d\n", ask.Price, types.SizeStr(types.NewInt(uint64(ask.MinPieceSize))), types.SizeStr(types.NewInt(uint64(ask.MaxPieceSize))), ask.Expiry, rem, ask.SeqNo)
return w.Flush()
},
}
var storageDealsCmd = &cli.Command{
Name: "storage-deals",
Usage: "Manage storage deals and related configuration",
Subcommands: []*cli.Command{ Subcommands: []*cli.Command{
dealsImportDataCmd, dealsImportDataCmd,
dealsListCmd, dealsListCmd,
enableCmd, enableCmd,
disableCmd, disableCmd,
setAskCmd,
getAskCmd,
setBlocklistCmd,
getBlocklistCmd,
resetBlocklistCmd,
}, },
} }
@ -132,3 +288,96 @@ var dealsListCmd = &cli.Command{
return nil return nil
}, },
} }
var getBlocklistCmd = &cli.Command{
Name: "get-blocklist",
Usage: "List the contents of the storage miner's piece CID blocklist",
Flags: []cli.Flag{
&CidBaseFlag,
},
Action: func(cctx *cli.Context) error {
api, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
blocklist, err := api.DealsPieceCidBlocklist(lcli.DaemonContext(cctx))
if err != nil {
return err
}
encoder, err := GetCidEncoder(cctx)
if err != nil {
return err
}
for idx := range blocklist {
fmt.Println(encoder.Encode(blocklist[idx]))
}
return nil
},
}
var setBlocklistCmd = &cli.Command{
Name: "set-blocklist",
Usage: "Set the storage miner's list of blocklisted piece CIDs",
ArgsUsage: "[<path-of-file-containing-newline-delimited-piece-CIDs> (optional, will read from stdin if omitted)]",
Flags: []cli.Flag{},
Action: func(cctx *cli.Context) error {
api, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
scanner := bufio.NewScanner(os.Stdin)
if cctx.Args().Present() && cctx.Args().First() != "-" {
absPath, err := filepath.Abs(cctx.Args().First())
if err != nil {
return err
}
file, err := os.Open(absPath)
if err != nil {
log.Fatal(err)
}
defer file.Close() //nolint:errcheck
scanner = bufio.NewScanner(file)
}
var blocklist []cid.Cid
for scanner.Scan() {
decoded, err := cid.Decode(scanner.Text())
if err != nil {
return err
}
blocklist = append(blocklist, decoded)
}
err = scanner.Err()
if err != nil {
return err
}
return api.DealsSetPieceCidBlocklist(lcli.DaemonContext(cctx), blocklist)
},
}
var resetBlocklistCmd = &cli.Command{
Name: "reset-blocklist",
Usage: "Remove all entries from the storage miner's piece CID blocklist",
Flags: []cli.Flag{},
Action: func(cctx *cli.Context) error {
api, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
return api.DealsSetPieceCidBlocklist(lcli.DaemonContext(cctx), []cid.Cid{})
},
}

View File

@ -26,6 +26,75 @@ var provingCmd = &cli.Command{
Subcommands: []*cli.Command{ Subcommands: []*cli.Command{
provingInfoCmd, provingInfoCmd,
provingDeadlinesCmd, provingDeadlinesCmd,
provingFaultsCmd,
},
}
var provingFaultsCmd = &cli.Command{
Name: "faults",
Usage: "View the currently known proving faulty sectors information",
Action: func(cctx *cli.Context) error {
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
api, acloser, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer acloser()
ctx := lcli.ReqContext(cctx)
maddr, err := nodeApi.ActorAddress(ctx)
if err != nil {
return xerrors.Errorf("getting actor address: %w", err)
}
var mas miner.State
{
mact, err := api.StateGetActor(ctx, maddr, types.EmptyTSK)
if err != nil {
return err
}
rmas, err := api.ChainReadObj(ctx, mact.Head)
if err != nil {
return err
}
if err := mas.UnmarshalCBOR(bytes.NewReader(rmas)); err != nil {
return err
}
}
faults, err := mas.Faults.All(100000000000)
if err != nil {
return err
}
if len(faults) == 0 {
fmt.Println("no faulty sectors")
}
head, err := api.ChainHead(ctx)
if err != nil {
return xerrors.Errorf("getting chain head: %w", err)
}
deadlines, err := api.StateMinerDeadlines(ctx, maddr, head.Key())
if err != nil {
return xerrors.Errorf("getting miner deadlines: %w", err)
}
tw := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0)
_, _ = fmt.Fprintln(tw, "deadline\tsectors")
for deadline, sectors := range deadlines.Due {
intersectSectors, _ := bitfield.IntersectBitField(sectors, mas.Faults)
if intersectSectors != nil {
allSectors, _ := intersectSectors.All(100000000000)
for _, num := range allSectors {
_, _ = fmt.Fprintf(tw, "%d\t%d\n", deadline, num)
}
}
}
return tw.Flush()
}, },
} }

View File

@ -0,0 +1,45 @@
package main
import (
lcli "github.com/filecoin-project/lotus/cli"
"github.com/urfave/cli/v2"
)
var retrievalDealsCmd = &cli.Command{
Name: "retrieval-deals",
Usage: "Manage retrieval deals and related configuration",
Subcommands: []*cli.Command{
enableRetrievalCmd,
disableRetrievalCmd,
},
}
var enableRetrievalCmd = &cli.Command{
Name: "enable",
Usage: "Configure the miner to consider retrieval deal proposals",
Flags: []cli.Flag{},
Action: func(cctx *cli.Context) error {
api, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
return api.DealsSetAcceptingRetrievalDeals(lcli.DaemonContext(cctx), true)
},
}
var disableRetrievalCmd = &cli.Command{
Name: "disable",
Usage: "Configure the miner to reject all retrieval deal proposals",
Flags: []cli.Flag{},
Action: func(cctx *cli.Context) error {
api, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
return api.DealsSetAcceptingRetrievalDeals(lcli.DaemonContext(cctx), false)
},
}

View File

@ -27,6 +27,7 @@ var sectorsCmd = &cli.Command{
sectorsRefsCmd, sectorsRefsCmd,
sectorsUpdateCmd, sectorsUpdateCmd,
sectorsPledgeCmd, sectorsPledgeCmd,
sectorsRemoveCmd,
}, },
} }
@ -47,7 +48,8 @@ var sectorsPledgeCmd = &cli.Command{
var sectorsStatusCmd = &cli.Command{ var sectorsStatusCmd = &cli.Command{
Name: "status", Name: "status",
Usage: "Get the seal status of a sector by its ID", Usage: "Get the seal status of a sector by its number",
ArgsUsage: "<sectorNum>",
Flags: []cli.Flag{ Flags: []cli.Flag{
&cli.BoolFlag{ &cli.BoolFlag{
Name: "log", Name: "log",
@ -63,7 +65,7 @@ var sectorsStatusCmd = &cli.Command{
ctx := lcli.ReqContext(cctx) ctx := lcli.ReqContext(cctx)
if !cctx.Args().Present() { if !cctx.Args().Present() {
return fmt.Errorf("must specify sector ID to get status of") return fmt.Errorf("must specify sector number to get status of")
} }
id, err := strconv.ParseUint(cctx.Args().First(), 10, 64) id, err := strconv.ParseUint(cctx.Args().First(), 10, 64)
@ -208,6 +210,39 @@ var sectorsRefsCmd = &cli.Command{
}, },
} }
var sectorsRemoveCmd = &cli.Command{
Name: "remove",
Usage: "Forcefully remove a sector (WARNING: This means losing power and collateral for the removed sector)",
ArgsUsage: "<sectorNum>",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "really-do-it",
Usage: "pass this flag if you know what you are doing",
},
},
Action: func(cctx *cli.Context) error {
if !cctx.Bool("really-do-it") {
return xerrors.Errorf("this is a command for advanced users, only use it if you are sure of what you are doing")
}
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := lcli.ReqContext(cctx)
if cctx.Args().Len() != 1 {
return xerrors.Errorf("must pass sector number")
}
id, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64)
if err != nil {
return xerrors.Errorf("could not parse sector number: %w", err)
}
return nodeApi.SectorRemove(ctx, abi.SectorNumber(id))
},
}
var sectorsUpdateCmd = &cli.Command{ var sectorsUpdateCmd = &cli.Command{
Name: "update-state", Name: "update-state",
Usage: "ADVANCED: manually update the state of a sector, this may aid in error recovery", Usage: "ADVANCED: manually update the state of a sector, this may aid in error recovery",
@ -228,12 +263,12 @@ var sectorsUpdateCmd = &cli.Command{
defer closer() defer closer()
ctx := lcli.ReqContext(cctx) ctx := lcli.ReqContext(cctx)
if cctx.Args().Len() < 2 { if cctx.Args().Len() < 2 {
return xerrors.Errorf("must pass sector ID and new state") return xerrors.Errorf("must pass sector number and new state")
} }
id, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64) id, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64)
if err != nil { if err != nil {
return xerrors.Errorf("could not parse sector ID: %w", err) return xerrors.Errorf("could not parse sector number: %w", err)
} }
return nodeApi.SectorsUpdate(ctx, abi.SectorNumber(id), api.SectorState(cctx.Args().Get(1))) return nodeApi.SectorsUpdate(ctx, abi.SectorNumber(id), api.SectorState(cctx.Args().Get(1)))

View File

@ -63,10 +63,6 @@
"title": "Proof-of-Spacetime(s)", "title": "Proof-of-Spacetime(s)",
"value": "Filecoin is a protocol token whose blockchain runs on a novel proof, called Proof-of-Spacetime, where blocks are created by miners that are storing data." "value": "Filecoin is a protocol token whose blockchain runs on a novel proof, called Proof-of-Spacetime, where blocks are created by miners that are storing data."
}, },
"lotus-testnet": {
"title": "Filecoin Testnet",
"value": "Until we launch, we are making lots of changes to Lotus. The Testnet is expected to bring a few significant fixes/improvements. During Testnet, you can retrieve test filecoin from our network faucet to use as collateral to start mining. Test filecoin do not have any value the official filecoin tokens will not be released until Mainnet launch."
},
"filecoin-testnet": { "filecoin-testnet": {
"title": "Filecoin Testnet", "title": "Filecoin Testnet",
"value": "Until we launch, we are making lots of changes to Lotus. The Testnet is expected to bring a few significant fixes/improvements. During Testnet, you can retrieve test filecoin from our network faucet to use as collateral to start mining. Test filecoin do not have any value the official filecoin tokens will not be released until Mainnet launch." "value": "Until we launch, we are making lots of changes to Lotus. The Testnet is expected to bring a few significant fixes/improvements. During Testnet, you can retrieve test filecoin from our network faucet to use as collateral to start mining. Test filecoin do not have any value the official filecoin tokens will not be released until Mainnet launch."

View File

@ -298,7 +298,7 @@ needed to properly set up the node's repo. We list the most salient ones here.
### Datastore ### Datastore
`Datastore` and `ChainBlockstore`: Data related to the node state is saved in the repo's `Datastore`, `Datastore` and `ChainBlockstore`: Data related to the node state is saved in the repo's `Datastore`,
an IPFS interface defined [here](github.com/ipfs/go-datastore/datastore.go). an IPFS interface defined [here](https://github.com/ipfs/go-datastore/blob/master/datastore.go).
Lotus creates this interface from a [Badger DB](https://github.com/dgraph-io/badger) in Lotus creates this interface from a [Badger DB](https://github.com/dgraph-io/badger) in
[`FsRepo`](https://github.com/filecoin-project/lotus/blob/master/node/repo/fsrepo.go). [`FsRepo`](https://github.com/filecoin-project/lotus/blob/master/node/repo/fsrepo.go).
Every piece of data is fundamentally a key-value pair in the `datastore` directory of the repo. Every piece of data is fundamentally a key-value pair in the `datastore` directory of the repo.

View File

@ -51,4 +51,4 @@ To get the number of cores for your GPU, you will need to check your cards sp
## Benchmarking ## Benchmarking
Here is a [benchmarking tool](https://github.com/filecoin-project/lotus/tree/testnet-staging/cmd/lotus-bench) and a [GitHub issue thread](https://github.com/filecoin-project/lotus/issues/694) for those who wish to experiment with and contribute hardware setups for the **Filecoin Testnet**. Here is a [benchmarking tool](https://github.com/filecoin-project/lotus/tree/master/cmd/lotus-bench) and a [GitHub issue thread](https://github.com/filecoin-project/lotus/issues/694) for those who wish to experiment with and contribute hardware setups for the **Filecoin Testnet**.

24
go.mod
View File

@ -29,10 +29,10 @@ require (
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200605171344-fcac609550ca github.com/filecoin-project/go-paramfetch v0.0.2-0.20200605171344-fcac609550ca
github.com/filecoin-project/go-statestore v0.1.0 github.com/filecoin-project/go-statestore v0.1.0
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b
github.com/filecoin-project/sector-storage v0.0.0-20200615192001-42c9e08595b7 github.com/filecoin-project/sector-storage v0.0.0-20200625154333-98ef8e4ef246
github.com/filecoin-project/specs-actors v0.6.1 github.com/filecoin-project/specs-actors v0.6.2-0.20200617175406-de392ca14121
github.com/filecoin-project/specs-storage v0.1.0 github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea
github.com/filecoin-project/storage-fsm v0.0.0-20200615162749-494c3bc48743 github.com/filecoin-project/storage-fsm v0.0.0-20200625160832-379a4655b044
github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1 github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1
github.com/go-kit/kit v0.10.0 github.com/go-kit/kit v0.10.0
github.com/go-ole/go-ole v1.2.4 // indirect github.com/go-ole/go-ole v1.2.4 // indirect
@ -44,8 +44,8 @@ require (
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d
github.com/ipfs/go-bitswap v0.2.8 github.com/ipfs/go-bitswap v0.2.8
github.com/ipfs/go-block-format v0.0.2 github.com/ipfs/go-block-format v0.0.2
github.com/ipfs/go-blockservice v0.1.3 github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834
github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00 github.com/ipfs/go-cid v0.0.6
github.com/ipfs/go-cidutil v0.0.2 github.com/ipfs/go-cidutil v0.0.2
github.com/ipfs/go-datastore v0.4.4 github.com/ipfs/go-datastore v0.4.4
github.com/ipfs/go-ds-badger2 v0.1.0 github.com/ipfs/go-ds-badger2 v0.1.0
@ -76,20 +76,20 @@ require (
github.com/kelseyhightower/envconfig v1.4.0 github.com/kelseyhightower/envconfig v1.4.0
github.com/lib/pq v1.2.0 github.com/lib/pq v1.2.0
github.com/libp2p/go-eventbus v0.2.1 github.com/libp2p/go-eventbus v0.2.1
github.com/libp2p/go-libp2p v0.9.4 github.com/libp2p/go-libp2p v0.10.0
github.com/libp2p/go-libp2p-connmgr v0.2.4 github.com/libp2p/go-libp2p-connmgr v0.2.4
github.com/libp2p/go-libp2p-core v0.5.7 github.com/libp2p/go-libp2p-core v0.6.0
github.com/libp2p/go-libp2p-discovery v0.4.0 github.com/libp2p/go-libp2p-discovery v0.4.0
github.com/libp2p/go-libp2p-kad-dht v0.8.1 github.com/libp2p/go-libp2p-kad-dht v0.8.1
github.com/libp2p/go-libp2p-mplex v0.2.3 github.com/libp2p/go-libp2p-mplex v0.2.3
github.com/libp2p/go-libp2p-peer v0.2.0 github.com/libp2p/go-libp2p-peer v0.2.0
github.com/libp2p/go-libp2p-peerstore v0.2.4 github.com/libp2p/go-libp2p-peerstore v0.2.6
github.com/libp2p/go-libp2p-pubsub v0.3.2 github.com/libp2p/go-libp2p-pubsub v0.3.2
github.com/libp2p/go-libp2p-quic-transport v0.5.0 github.com/libp2p/go-libp2p-quic-transport v0.5.0
github.com/libp2p/go-libp2p-record v0.1.2 github.com/libp2p/go-libp2p-record v0.1.2
github.com/libp2p/go-libp2p-routing-helpers v0.2.3 github.com/libp2p/go-libp2p-routing-helpers v0.2.3
github.com/libp2p/go-libp2p-secio v0.2.2 github.com/libp2p/go-libp2p-secio v0.2.2
github.com/libp2p/go-libp2p-swarm v0.2.6 github.com/libp2p/go-libp2p-swarm v0.2.7
github.com/libp2p/go-libp2p-tls v0.1.3 github.com/libp2p/go-libp2p-tls v0.1.3
github.com/libp2p/go-libp2p-yamux v0.2.8 github.com/libp2p/go-libp2p-yamux v0.2.8
github.com/libp2p/go-maddr-filter v0.1.0 github.com/libp2p/go-maddr-filter v0.1.0
@ -100,11 +100,11 @@ require (
github.com/multiformats/go-multiaddr v0.2.2 github.com/multiformats/go-multiaddr v0.2.2
github.com/multiformats/go-multiaddr-dns v0.2.0 github.com/multiformats/go-multiaddr-dns v0.2.0
github.com/multiformats/go-multiaddr-net v0.1.5 github.com/multiformats/go-multiaddr-net v0.1.5
github.com/multiformats/go-multibase v0.0.2 github.com/multiformats/go-multibase v0.0.3
github.com/multiformats/go-multihash v0.0.13 github.com/multiformats/go-multihash v0.0.13
github.com/opentracing/opentracing-go v1.1.0 github.com/opentracing/opentracing-go v1.1.0
github.com/stretchr/objx v0.2.0 // indirect github.com/stretchr/objx v0.2.0 // indirect
github.com/stretchr/testify v1.5.1 github.com/stretchr/testify v1.6.1
github.com/syndtr/goleveldb v1.0.0 github.com/syndtr/goleveldb v1.0.0
github.com/urfave/cli/v2 v2.2.0 github.com/urfave/cli/v2 v2.2.0
github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba

48
go.sum
View File

@ -62,7 +62,6 @@ github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBA
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
github.com/akavel/rsrc v0.8.0 h1:zjWn7ukO9Kc5Q62DOJCcxGpXC18RawVtYAGdz2aLlfw= github.com/akavel/rsrc v0.8.0 h1:zjWn7ukO9Kc5Q62DOJCcxGpXC18RawVtYAGdz2aLlfw=
github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c=
github.com/alangpierce/go-forceexport v0.0.0-20160317203124-8f1d6941cd75/go.mod h1:uAXEEpARkRhCZfEvy/y0Jcc888f9tHCc1W7/UeEtreE=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
@ -253,18 +252,19 @@ github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZO
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b h1:fkRZSPrYpk42PV3/lIXiL0LHetxde7vyYYvSsttQtfg= github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b h1:fkRZSPrYpk42PV3/lIXiL0LHetxde7vyYYvSsttQtfg=
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/go.mod h1:Q0GQOBtKf1oE10eSXSlhN45kDBdGvEcVOqMiffqX+N8= github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/go.mod h1:Q0GQOBtKf1oE10eSXSlhN45kDBdGvEcVOqMiffqX+N8=
github.com/filecoin-project/sector-storage v0.0.0-20200615154852-728a47ab99d6/go.mod h1:M59QnAeA/oV+Z8oHFLoNpGMv0LZ8Rll+vHVXX7GirPM= github.com/filecoin-project/sector-storage v0.0.0-20200615154852-728a47ab99d6/go.mod h1:M59QnAeA/oV+Z8oHFLoNpGMv0LZ8Rll+vHVXX7GirPM=
github.com/filecoin-project/sector-storage v0.0.0-20200615192001-42c9e08595b7 h1:cjsOpQKvZosPx9/qqq2bucHVdRyXzvBR1f37atiR3/0= github.com/filecoin-project/sector-storage v0.0.0-20200625154333-98ef8e4ef246 h1:NfYQRmVRe0LzlNbK5Ket3vbBOwFD5TvtcNtfo/Sd8mg=
github.com/filecoin-project/sector-storage v0.0.0-20200615192001-42c9e08595b7/go.mod h1:M59QnAeA/oV+Z8oHFLoNpGMv0LZ8Rll+vHVXX7GirPM= github.com/filecoin-project/sector-storage v0.0.0-20200625154333-98ef8e4ef246/go.mod h1:8f0hWDzzIi1hKs4IVKH9RnDsO4LEHVz8BNat0okDOuY=
github.com/filecoin-project/specs-actors v0.0.0-20200210130641-2d1fbd8672cf/go.mod h1:xtDZUB6pe4Pksa/bAJbJ693OilaC5Wbot9jMhLm3cZA= github.com/filecoin-project/specs-actors v0.0.0-20200210130641-2d1fbd8672cf/go.mod h1:xtDZUB6pe4Pksa/bAJbJ693OilaC5Wbot9jMhLm3cZA=
github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y= github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y=
github.com/filecoin-project/specs-actors v0.6.0 h1:IepUsmDGY60QliENVTkBTAkwqGWw9kNbbHOcU/9oiC0=
github.com/filecoin-project/specs-actors v0.6.0/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY= github.com/filecoin-project/specs-actors v0.6.0/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY=
github.com/filecoin-project/specs-actors v0.6.1 h1:rhHlEzqcuuQU6oKc4csuq+/kQBDZ4EXtSomoN2XApCA= github.com/filecoin-project/specs-actors v0.6.2-0.20200617175406-de392ca14121 h1:oRA+b4iN4H86xXDXbU3TOyvmBZp7//c5VqTc0oJ6nLg=
github.com/filecoin-project/specs-actors v0.6.1/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY= github.com/filecoin-project/specs-actors v0.6.2-0.20200617175406-de392ca14121/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY=
github.com/filecoin-project/specs-storage v0.1.0 h1:PkDgTOT5W5Ao7752onjDl4QSv+sgOVdJbvFjOnD5w94= github.com/filecoin-project/specs-storage v0.1.0 h1:PkDgTOT5W5Ao7752onjDl4QSv+sgOVdJbvFjOnD5w94=
github.com/filecoin-project/specs-storage v0.1.0/go.mod h1:Pr5ntAaxsh+sLG/LYiL4tKzvA83Vk5vLODYhfNwOg7k= github.com/filecoin-project/specs-storage v0.1.0/go.mod h1:Pr5ntAaxsh+sLG/LYiL4tKzvA83Vk5vLODYhfNwOg7k=
github.com/filecoin-project/storage-fsm v0.0.0-20200615162749-494c3bc48743 h1:a8f1p6UdeD+ZINBKJN4FhEos8uaKeASOAabq5RCpQdg= github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea h1:iixjULRQFPn7Q9KlIqfwLJnlAXO10bbkI+xy5GKGdLY=
github.com/filecoin-project/storage-fsm v0.0.0-20200615162749-494c3bc48743/go.mod h1:q1YCutTSMq/yGYvDPHReT37bPfDLHltnwJutzR9kOY0= github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea/go.mod h1:Pr5ntAaxsh+sLG/LYiL4tKzvA83Vk5vLODYhfNwOg7k=
github.com/filecoin-project/storage-fsm v0.0.0-20200625160832-379a4655b044 h1:i4oMhv1kx/MAUxRN4EM5tag5fI1uagrwQwINgKrzUt4=
github.com/filecoin-project/storage-fsm v0.0.0-20200625160832-379a4655b044/go.mod h1:JD7fmV1BYADDcy4EYQnqFH/rUzXsh0Je0jXarCjZqSk=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
@ -465,6 +465,8 @@ github.com/ipfs/go-blockservice v0.0.7/go.mod h1:EOfb9k/Y878ZTRY/CH0x5+ATtaipfbR
github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M=
github.com/ipfs/go-blockservice v0.1.3 h1:9XgsPMwwWJSC9uVr2pMDsW2qFTBSkxpGMhmna8mIjPM= github.com/ipfs/go-blockservice v0.1.3 h1:9XgsPMwwWJSC9uVr2pMDsW2qFTBSkxpGMhmna8mIjPM=
github.com/ipfs/go-blockservice v0.1.3/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= github.com/ipfs/go-blockservice v0.1.3/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU=
github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834 h1:hFJoI1D2a3MqiNkSb4nKwrdkhCngUxUTFNwVwovZX2s=
github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU=
github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
@ -473,6 +475,8 @@ github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj
github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog=
github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00 h1:QN88Q0kT2QiDaLxpR/SDsqOBtNIEF/F3n96gSDUimkA= github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00 h1:QN88Q0kT2QiDaLxpR/SDsqOBtNIEF/F3n96gSDUimkA=
github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog=
github.com/ipfs/go-cid v0.0.6 h1:go0y+GcDOGeJIV01FeBsta4FHngoA4Wz7KMeLkXAhMs=
github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I=
github.com/ipfs/go-cidutil v0.0.2 h1:CNOboQf1t7Qp0nuNh8QMmhJs0+Q//bRL1axtCnIB1Yo= github.com/ipfs/go-cidutil v0.0.2 h1:CNOboQf1t7Qp0nuNh8QMmhJs0+Q//bRL1axtCnIB1Yo=
github.com/ipfs/go-cidutil v0.0.2/go.mod h1:ewllrvrxG6AMYStla3GD7Cqn+XYSLqjK0vc+086tB6s= github.com/ipfs/go-cidutil v0.0.2/go.mod h1:ewllrvrxG6AMYStla3GD7Cqn+XYSLqjK0vc+086tB6s=
github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE=
@ -558,6 +562,8 @@ github.com/ipfs/go-ipfs-routing v0.1.0 h1:gAJTT1cEeeLj6/DlLX6t+NxD9fQe2ymTO6qWRD
github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY=
github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50= github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50=
github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc=
github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8=
github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ=
github.com/ipfs/go-ipld-cbor v0.0.1/go.mod h1:RXHr8s4k0NE0TKhnrxqZC9M888QfsBN9rhS5NjfKzY8= github.com/ipfs/go-ipld-cbor v0.0.1/go.mod h1:RXHr8s4k0NE0TKhnrxqZC9M888QfsBN9rhS5NjfKzY8=
github.com/ipfs/go-ipld-cbor v0.0.2/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= github.com/ipfs/go-ipld-cbor v0.0.2/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc=
github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc=
@ -724,8 +730,8 @@ github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniV
github.com/libp2p/go-libp2p v0.8.2/go.mod h1:NQDA/F/qArMHGe0J7sDScaKjW8Jh4y/ozQqBbYJ+BnA= github.com/libp2p/go-libp2p v0.8.2/go.mod h1:NQDA/F/qArMHGe0J7sDScaKjW8Jh4y/ozQqBbYJ+BnA=
github.com/libp2p/go-libp2p v0.8.3/go.mod h1:EsH1A+8yoWK+L4iKcbPYu6MPluZ+CHWI9El8cTaefiM= github.com/libp2p/go-libp2p v0.8.3/go.mod h1:EsH1A+8yoWK+L4iKcbPYu6MPluZ+CHWI9El8cTaefiM=
github.com/libp2p/go-libp2p v0.9.2/go.mod h1:cunHNLDVus66Ct9iXXcjKRLdmHdFdHVe1TAnbubJQqQ= github.com/libp2p/go-libp2p v0.9.2/go.mod h1:cunHNLDVus66Ct9iXXcjKRLdmHdFdHVe1TAnbubJQqQ=
github.com/libp2p/go-libp2p v0.9.4 h1:yighwjFvsF/qQaGtHPZfxcF+ph4ydCNnsKvg712lYRo= github.com/libp2p/go-libp2p v0.10.0 h1:7ooOvK1wi8eLpyTppy8TeH43UHy5uI75GAHGJxenUi0=
github.com/libp2p/go-libp2p v0.9.4/go.mod h1:NzQcC2o19xgwGqCmjx7DN+4h2F13qPCZ9UJmweYzsnU= github.com/libp2p/go-libp2p v0.10.0/go.mod h1:yBJNpb+mGJdgrwbKAKrhPU0u3ogyNFTfjJ6bdM+Q/G8=
github.com/libp2p/go-libp2p-autonat v0.0.2/go.mod h1:fs71q5Xk+pdnKU014o2iq1RhMs9/PMaG5zXRFNnIIT4= github.com/libp2p/go-libp2p-autonat v0.0.2/go.mod h1:fs71q5Xk+pdnKU014o2iq1RhMs9/PMaG5zXRFNnIIT4=
github.com/libp2p/go-libp2p-autonat v0.0.6/go.mod h1:uZneLdOkZHro35xIhpbtTzLlgYturpu4J5+0cZK3MqE= github.com/libp2p/go-libp2p-autonat v0.0.6/go.mod h1:uZneLdOkZHro35xIhpbtTzLlgYturpu4J5+0cZK3MqE=
github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8=
@ -751,6 +757,8 @@ github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3
github.com/libp2p/go-libp2p-circuit v0.2.1/go.mod h1:BXPwYDN5A8z4OEY9sOfr2DUQMLQvKt/6oku45YUmjIo= github.com/libp2p/go-libp2p-circuit v0.2.1/go.mod h1:BXPwYDN5A8z4OEY9sOfr2DUQMLQvKt/6oku45YUmjIo=
github.com/libp2p/go-libp2p-circuit v0.2.2 h1:87RLabJ9lrhoiSDDZyCJ80ZlI5TLJMwfyoGAaWXzWqA= github.com/libp2p/go-libp2p-circuit v0.2.2 h1:87RLabJ9lrhoiSDDZyCJ80ZlI5TLJMwfyoGAaWXzWqA=
github.com/libp2p/go-libp2p-circuit v0.2.2/go.mod h1:nkG3iE01tR3FoQ2nMm06IUrCpCyJp1Eo4A1xYdpjfs4= github.com/libp2p/go-libp2p-circuit v0.2.2/go.mod h1:nkG3iE01tR3FoQ2nMm06IUrCpCyJp1Eo4A1xYdpjfs4=
github.com/libp2p/go-libp2p-circuit v0.2.3 h1:3Uw1fPHWrp1tgIhBz0vSOxRUmnKL8L/NGUyEd5WfSGM=
github.com/libp2p/go-libp2p-circuit v0.2.3/go.mod h1:nkG3iE01tR3FoQ2nMm06IUrCpCyJp1Eo4A1xYdpjfs4=
github.com/libp2p/go-libp2p-connmgr v0.1.1/go.mod h1:wZxh8veAmU5qdrfJ0ZBLcU8oJe9L82ciVP/fl1VHjXk= github.com/libp2p/go-libp2p-connmgr v0.1.1/go.mod h1:wZxh8veAmU5qdrfJ0ZBLcU8oJe9L82ciVP/fl1VHjXk=
github.com/libp2p/go-libp2p-connmgr v0.2.3/go.mod h1:Gqjg29zI8CwXX21zRxy6gOg8VYu3zVerJRt2KyktzH4= github.com/libp2p/go-libp2p-connmgr v0.2.3/go.mod h1:Gqjg29zI8CwXX21zRxy6gOg8VYu3zVerJRt2KyktzH4=
github.com/libp2p/go-libp2p-connmgr v0.2.4 h1:TMS0vc0TCBomtQJyWr7fYxcVYYhx+q/2gF++G5Jkl/w= github.com/libp2p/go-libp2p-connmgr v0.2.4 h1:TMS0vc0TCBomtQJyWr7fYxcVYYhx+q/2gF++G5Jkl/w=
@ -777,6 +785,8 @@ github.com/libp2p/go-libp2p-core v0.5.5/go.mod h1:vj3awlOr9+GMZJFH9s4mpt9RHHgGqe
github.com/libp2p/go-libp2p-core v0.5.6/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= github.com/libp2p/go-libp2p-core v0.5.6/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo=
github.com/libp2p/go-libp2p-core v0.5.7 h1:QK3xRwFxqd0Xd9bSZL+8yZ8ncZZbl6Zngd/+Y+A6sgQ= github.com/libp2p/go-libp2p-core v0.5.7 h1:QK3xRwFxqd0Xd9bSZL+8yZ8ncZZbl6Zngd/+Y+A6sgQ=
github.com/libp2p/go-libp2p-core v0.5.7/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= github.com/libp2p/go-libp2p-core v0.5.7/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo=
github.com/libp2p/go-libp2p-core v0.6.0 h1:u03qofNYTBN+yVg08PuAKylZogVf0xcTEeM8skGf+ak=
github.com/libp2p/go-libp2p-core v0.6.0/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo=
github.com/libp2p/go-libp2p-crypto v0.0.1/go.mod h1:yJkNyDmO341d5wwXxDUGO0LykUVT72ImHNUqh5D/dBE= github.com/libp2p/go-libp2p-crypto v0.0.1/go.mod h1:yJkNyDmO341d5wwXxDUGO0LykUVT72ImHNUqh5D/dBE=
github.com/libp2p/go-libp2p-crypto v0.0.2/go.mod h1:eETI5OUfBnvARGOHrJz2eWNyTUxEGZnBxMcbUjfIj4I= github.com/libp2p/go-libp2p-crypto v0.0.2/go.mod h1:eETI5OUfBnvARGOHrJz2eWNyTUxEGZnBxMcbUjfIj4I=
github.com/libp2p/go-libp2p-crypto v0.1.0 h1:k9MFy+o2zGDNGsaoZl0MA3iZ75qXxr9OOoAZF+sD5OQ= github.com/libp2p/go-libp2p-crypto v0.1.0 h1:k9MFy+o2zGDNGsaoZl0MA3iZ75qXxr9OOoAZF+sD5OQ=
@ -836,17 +846,17 @@ github.com/libp2p/go-libp2p-peerstore v0.2.2/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRj
github.com/libp2p/go-libp2p-peerstore v0.2.3/go.mod h1:K8ljLdFn590GMttg/luh4caB/3g0vKuY01psze0upRw= github.com/libp2p/go-libp2p-peerstore v0.2.3/go.mod h1:K8ljLdFn590GMttg/luh4caB/3g0vKuY01psze0upRw=
github.com/libp2p/go-libp2p-peerstore v0.2.4 h1:jU9S4jYN30kdzTpDAR7SlHUD+meDUjTODh4waLWF1ws= github.com/libp2p/go-libp2p-peerstore v0.2.4 h1:jU9S4jYN30kdzTpDAR7SlHUD+meDUjTODh4waLWF1ws=
github.com/libp2p/go-libp2p-peerstore v0.2.4/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-peerstore v0.2.4/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s=
github.com/libp2p/go-libp2p-peerstore v0.2.6 h1:2ACefBX23iMdJU9Ke+dcXt3w86MIryes9v7In4+Qq3U=
github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s=
github.com/libp2p/go-libp2p-pnet v0.2.0 h1:J6htxttBipJujEjz1y0a5+eYoiPcFHhSYHH6na5f0/k= github.com/libp2p/go-libp2p-pnet v0.2.0 h1:J6htxttBipJujEjz1y0a5+eYoiPcFHhSYHH6na5f0/k=
github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA=
github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1VZNHYcK8cLgFJLZ4s= github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1VZNHYcK8cLgFJLZ4s=
github.com/libp2p/go-libp2p-protocol v0.1.0/go.mod h1:KQPHpAabB57XQxGrXCNvbL6UEXfQqUgC/1adR2Xtflk= github.com/libp2p/go-libp2p-protocol v0.1.0/go.mod h1:KQPHpAabB57XQxGrXCNvbL6UEXfQqUgC/1adR2Xtflk=
github.com/libp2p/go-libp2p-pubsub v0.1.1/go.mod h1:ZwlKzRSe1eGvSIdU5bD7+8RZN/Uzw0t1Bp9R1znpR/Q= github.com/libp2p/go-libp2p-pubsub v0.1.1/go.mod h1:ZwlKzRSe1eGvSIdU5bD7+8RZN/Uzw0t1Bp9R1znpR/Q=
github.com/libp2p/go-libp2p-pubsub v0.3.1/go.mod h1:TxPOBuo1FPdsTjFnv+FGZbNbWYsp74Culx+4ViQpato=
github.com/libp2p/go-libp2p-pubsub v0.3.2-0.20200527132641-c0712c6e92cf/go.mod h1:TxPOBuo1FPdsTjFnv+FGZbNbWYsp74Culx+4ViQpato= github.com/libp2p/go-libp2p-pubsub v0.3.2-0.20200527132641-c0712c6e92cf/go.mod h1:TxPOBuo1FPdsTjFnv+FGZbNbWYsp74Culx+4ViQpato=
github.com/libp2p/go-libp2p-pubsub v0.3.2 h1:k3cJm5JW5mjaWZkobS50sJLJWaB2mBi0HW4eRlE8mSo= github.com/libp2p/go-libp2p-pubsub v0.3.2 h1:k3cJm5JW5mjaWZkobS50sJLJWaB2mBi0HW4eRlE8mSo=
github.com/libp2p/go-libp2p-pubsub v0.3.2/go.mod h1:Uss7/Cfz872KggNb+doCVPHeCDmXB7z500m/R8DaAUk= github.com/libp2p/go-libp2p-pubsub v0.3.2/go.mod h1:Uss7/Cfz872KggNb+doCVPHeCDmXB7z500m/R8DaAUk=
github.com/libp2p/go-libp2p-quic-transport v0.1.1/go.mod h1:wqG/jzhF3Pu2NrhJEvE+IE0NTHNXslOPn9JQzyCAxzU= github.com/libp2p/go-libp2p-quic-transport v0.1.1/go.mod h1:wqG/jzhF3Pu2NrhJEvE+IE0NTHNXslOPn9JQzyCAxzU=
github.com/libp2p/go-libp2p-quic-transport v0.3.7/go.mod h1:Kr4aDtnfHHNeENn5J+sZIVc+t8HpQn9W6BOxhVGHbgI=
github.com/libp2p/go-libp2p-quic-transport v0.5.0 h1:BUN1lgYNUrtv4WLLQ5rQmC9MCJ6uEXusezGvYRNoJXE= github.com/libp2p/go-libp2p-quic-transport v0.5.0 h1:BUN1lgYNUrtv4WLLQ5rQmC9MCJ6uEXusezGvYRNoJXE=
github.com/libp2p/go-libp2p-quic-transport v0.5.0/go.mod h1:IEcuC5MLxvZ5KuHKjRu+dr3LjCT1Be3rcD/4d8JrX8M= github.com/libp2p/go-libp2p-quic-transport v0.5.0/go.mod h1:IEcuC5MLxvZ5KuHKjRu+dr3LjCT1Be3rcD/4d8JrX8M=
github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q= github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q=
@ -872,8 +882,8 @@ github.com/libp2p/go-libp2p-swarm v0.2.1/go.mod h1:x07b4zkMFo2EvgPV2bMTlNmdQc8i+
github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaTNyBcHImCxRpPKU= github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaTNyBcHImCxRpPKU=
github.com/libp2p/go-libp2p-swarm v0.2.3/go.mod h1:P2VO/EpxRyDxtChXz/VPVXyTnszHvokHKRhfkEgFKNM= github.com/libp2p/go-libp2p-swarm v0.2.3/go.mod h1:P2VO/EpxRyDxtChXz/VPVXyTnszHvokHKRhfkEgFKNM=
github.com/libp2p/go-libp2p-swarm v0.2.4/go.mod h1:/xIpHFPPh3wmSthtxdGbkHZ0OET1h/GGZes8Wku/M5Y= github.com/libp2p/go-libp2p-swarm v0.2.4/go.mod h1:/xIpHFPPh3wmSthtxdGbkHZ0OET1h/GGZes8Wku/M5Y=
github.com/libp2p/go-libp2p-swarm v0.2.6 h1:UhMXIa+yCOALQyceENEIStMlbTCzOM6aWo6vw8QW17Q= github.com/libp2p/go-libp2p-swarm v0.2.7 h1:4lV/sf7f0NuVqunOpt1I11+Z54+xp+m0eeAvxj/LyRc=
github.com/libp2p/go-libp2p-swarm v0.2.6/go.mod h1:F9hrkZjO7dDbcEiYii/fAB1QdpLuU6h1pa4P5VNsEgc= github.com/libp2p/go-libp2p-swarm v0.2.7/go.mod h1:ZSJ0Q+oq/B1JgfPHJAT2HTall+xYRNYp1xs4S2FBWKA=
github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
@ -972,7 +982,6 @@ github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/h
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/lucas-clemente/quic-go v0.11.2/go.mod h1:PpMmPfPKO9nKJ/psF49ESTAGQSdfXxlg1otPbEB2nOw= github.com/lucas-clemente/quic-go v0.11.2/go.mod h1:PpMmPfPKO9nKJ/psF49ESTAGQSdfXxlg1otPbEB2nOw=
github.com/lucas-clemente/quic-go v0.15.7/go.mod h1:Myi1OyS0FOjL3not4BxT7KN29bRkcMUV5JVVFLKtDp8=
github.com/lucas-clemente/quic-go v0.16.0 h1:jJw36wfzGJhmOhAOaOC2lS36WgeqXQszH47A7spo1LI= github.com/lucas-clemente/quic-go v0.16.0 h1:jJw36wfzGJhmOhAOaOC2lS36WgeqXQszH47A7spo1LI=
github.com/lucas-clemente/quic-go v0.16.0/go.mod h1:I0+fcNTdb9eS1ZcjQZbDVPGchJ86chcIxPALn9lEJqE= github.com/lucas-clemente/quic-go v0.16.0/go.mod h1:I0+fcNTdb9eS1ZcjQZbDVPGchJ86chcIxPALn9lEJqE=
github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg=
@ -1043,6 +1052,8 @@ github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc=
github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI=
github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA=
github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4=
github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM=
github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44=
github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44=
github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44=
@ -1072,6 +1083,8 @@ github.com/multiformats/go-multiaddr-net v0.1.5/go.mod h1:ilNnaM9HbmVFqsb/qcNysj
github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs=
github.com/multiformats/go-multibase v0.0.2 h1:2pAgScmS1g9XjH7EtAfNhTuyrWYEWcxy0G5Wo85hWDA= github.com/multiformats/go-multibase v0.0.2 h1:2pAgScmS1g9XjH7EtAfNhTuyrWYEWcxy0G5Wo85hWDA=
github.com/multiformats/go-multibase v0.0.2/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= github.com/multiformats/go-multibase v0.0.2/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs=
github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk=
github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc=
github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U=
github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po=
github.com/multiformats/go-multihash v0.0.7/go.mod h1:XuKXPp8VHcTygube3OWZC+aZrA+H1IhmjoCDtJc7PXM= github.com/multiformats/go-multihash v0.0.7/go.mod h1:XuKXPp8VHcTygube3OWZC+aZrA+H1IhmjoCDtJc7PXM=
@ -1291,6 +1304,9 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
@ -1778,6 +1794,8 @@ gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=

View File

@ -215,6 +215,9 @@ type MiningBase struct {
} }
func (m *Miner) GetBestMiningCandidate(ctx context.Context) (*MiningBase, error) { func (m *Miner) GetBestMiningCandidate(ctx context.Context) (*MiningBase, error) {
m.lk.Lock()
defer m.lk.Unlock()
bts, err := m.api.ChainHead(ctx) bts, err := m.api.ChainHead(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
@ -252,6 +255,12 @@ func (m *Miner) hasPower(ctx context.Context, addr address.Address, ts *types.Ti
return mpower.MinerPower.QualityAdjPower.GreaterThanEqual(power.ConsensusMinerMinPower), nil return mpower.MinerPower.QualityAdjPower.GreaterThanEqual(power.ConsensusMinerMinPower), nil
} }
// mineOne mines a single block, and does so synchronously, if and only if we
// have won the current round.
//
// {hint/landmark}: This method coordinates all the steps involved in mining a
// block, including the condition of whether mine or not at all depending on
// whether we win the round or not.
func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (*types.BlockMsg, error) { func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (*types.BlockMsg, error) {
log.Debugw("attempting to mine a block", "tipset", types.LogCids(base.TipSet.Cids())) log.Debugw("attempting to mine a block", "tipset", types.LogCids(base.TipSet.Cids()))
start := time.Now() start := time.Now()

View File

@ -218,6 +218,7 @@ func Online() Option {
Override(new(dtypes.BootstrapPeers), modules.BuiltinBootstrap), Override(new(dtypes.BootstrapPeers), modules.BuiltinBootstrap),
Override(new(dtypes.DrandBootstrap), modules.DrandBootstrap), Override(new(dtypes.DrandBootstrap), modules.DrandBootstrap),
Override(new(dtypes.DrandConfig), modules.BuiltinDrandConfig),
Override(HandleIncomingMessagesKey, modules.HandleIncomingMessages), Override(HandleIncomingMessagesKey, modules.HandleIncomingMessages),
@ -312,8 +313,12 @@ func Online() Option {
Override(new(gen.WinningPoStProver), storage.NewWinningPoStProver), Override(new(gen.WinningPoStProver), storage.NewWinningPoStProver),
Override(new(*miner.Miner), modules.SetupBlockProducer), Override(new(*miner.Miner), modules.SetupBlockProducer),
Override(new(dtypes.AcceptingRetrievalDealsConfigFunc), modules.NewAcceptingRetrievalDealsConfigFunc),
Override(new(dtypes.SetAcceptingRetrievalDealsConfigFunc), modules.NewSetAcceptingRetrievalDealsConfigFunc),
Override(new(dtypes.AcceptingStorageDealsConfigFunc), modules.NewAcceptingStorageDealsConfigFunc), Override(new(dtypes.AcceptingStorageDealsConfigFunc), modules.NewAcceptingStorageDealsConfigFunc),
Override(new(dtypes.SetAcceptingStorageDealsConfigFunc), modules.NewSetAcceptingStorageDealsConfigFunc), Override(new(dtypes.SetAcceptingStorageDealsConfigFunc), modules.NewSetAcceptingStorageDealsConfigFunc),
Override(new(dtypes.StorageDealPieceCidBlocklistConfigFunc), modules.NewStorageDealPieceCidBlocklistConfigFunc),
Override(new(dtypes.SetStorageDealPieceCidBlocklistConfigFunc), modules.NewSetStorageDealPieceCidBlocklistConfigFunc),
), ),
) )
} }

View File

@ -4,6 +4,8 @@ import (
"encoding" "encoding"
"time" "time"
"github.com/ipfs/go-cid"
sectorstorage "github.com/filecoin-project/sector-storage" sectorstorage "github.com/filecoin-project/sector-storage"
) )
@ -33,6 +35,8 @@ type StorageMiner struct {
type DealmakingConfig struct { type DealmakingConfig struct {
AcceptingStorageDeals bool AcceptingStorageDeals bool
AcceptingRetrievalDeals bool
PieceCidBlocklist []cid.Cid
} }
// API contains configs for API endpoint // API contains configs for API endpoint
@ -121,6 +125,8 @@ func DefaultStorageMiner() *StorageMiner {
Dealmaking: DealmakingConfig{ Dealmaking: DealmakingConfig{
AcceptingStorageDeals: true, AcceptingStorageDeals: true,
AcceptingRetrievalDeals: true,
PieceCidBlocklist: []cid.Cid{},
}, },
} }
cfg.Common.API.ListenAddress = "/ip4/127.0.0.1/tcp/2345/http" cfg.Common.API.ListenAddress = "/ip4/127.0.0.1/tcp/2345/http"

View File

@ -3,6 +3,7 @@ package client
import ( import (
"context" "context"
"errors" "errors"
"fmt"
"github.com/filecoin-project/go-fil-markets/pieceio" "github.com/filecoin-project/go-fil-markets/pieceio"
basicnode "github.com/ipld/go-ipld-prime/node/basic" basicnode "github.com/ipld/go-ipld-prime/node/basic"
@ -30,7 +31,7 @@ import (
"go.uber.org/fx" "go.uber.org/fx"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-fil-markets/retrievalmarket" rm "github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/sector-storage/ffiwrapper" "github.com/filecoin-project/sector-storage/ffiwrapper"
"github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-actors/actors/abi"
@ -58,8 +59,8 @@ type API struct {
paych.PaychAPI paych.PaychAPI
SMDealClient storagemarket.StorageClient SMDealClient storagemarket.StorageClient
RetDiscovery retrievalmarket.PeerResolver RetDiscovery rm.PeerResolver
Retrieval retrievalmarket.RetrievalClient Retrieval rm.RetrievalClient
Chain *store.ChainStore Chain *store.ChainStore
LocalDAG dtypes.ClientDAG LocalDAG dtypes.ClientDAG
@ -201,23 +202,49 @@ func (a *API) ClientFindData(ctx context.Context, root cid.Cid) ([]api.QueryOffe
out := make([]api.QueryOffer, len(peers)) out := make([]api.QueryOffer, len(peers))
for k, p := range peers { for k, p := range peers {
queryResponse, err := a.Retrieval.Query(ctx, p, root, retrievalmarket.QueryParams{}) out[k] = a.makeRetrievalQuery(ctx, p, root, rm.QueryParams{})
}
return out, nil
}
func (a *API) ClientMinerQueryOffer(ctx context.Context, payload cid.Cid, miner address.Address) (api.QueryOffer, error) {
mi, err := a.StateMinerInfo(ctx, miner, types.EmptyTSK)
if err != nil { if err != nil {
out[k] = api.QueryOffer{Err: err.Error(), Miner: p.Address, MinerPeerID: p.ID} return api.QueryOffer{}, err
} else { }
out[k] = api.QueryOffer{ rp := rm.RetrievalPeer{
Root: root, Address: miner,
ID: mi.PeerId,
}
return a.makeRetrievalQuery(ctx, rp, payload, rm.QueryParams{}), nil
}
func (a *API) makeRetrievalQuery(ctx context.Context, rp rm.RetrievalPeer, payload cid.Cid, qp rm.QueryParams) api.QueryOffer {
queryResponse, err := a.Retrieval.Query(ctx, rp, payload, qp)
if err != nil {
return api.QueryOffer{Err: err.Error(), Miner: rp.Address, MinerPeerID: rp.ID}
}
var errStr string
switch queryResponse.Status {
case rm.QueryResponseAvailable:
errStr = ""
case rm.QueryResponseUnavailable:
errStr = fmt.Sprintf("retrieval query offer was unavailable: %s", queryResponse.Message)
case rm.QueryResponseError:
errStr = fmt.Sprintf("retrieval query offer errored: %s", queryResponse.Message)
}
return api.QueryOffer{
Root: payload,
Size: queryResponse.Size, Size: queryResponse.Size,
MinPrice: queryResponse.PieceRetrievalPrice(), MinPrice: queryResponse.PieceRetrievalPrice(),
PaymentInterval: queryResponse.MaxPaymentInterval, PaymentInterval: queryResponse.MaxPaymentInterval,
PaymentIntervalIncrease: queryResponse.MaxPaymentIntervalIncrease, PaymentIntervalIncrease: queryResponse.MaxPaymentIntervalIncrease,
Miner: queryResponse.PaymentAddress, // TODO: check Miner: queryResponse.PaymentAddress, // TODO: check
MinerPeerID: p.ID, MinerPeerID: rp.ID,
Err: errStr,
} }
}
}
return out, nil
} }
func (a *API) ClientImport(ctx context.Context, ref api.FileRef) (cid.Cid, error) { func (a *API) ClientImport(ctx context.Context, ref api.FileRef) (cid.Cid, error) {
@ -318,13 +345,35 @@ func (a *API) ClientRetrieve(ctx context.Context, order api.RetrievalOrder, ref
retrievalResult := make(chan error, 1) retrievalResult := make(chan error, 1)
unsubscribe := a.Retrieval.SubscribeToEvents(func(event retrievalmarket.ClientEvent, state retrievalmarket.ClientDealState) { unsubscribe := a.Retrieval.SubscribeToEvents(func(event rm.ClientEvent, state rm.ClientDealState) {
if state.PayloadCID.Equals(order.Root) { if state.PayloadCID.Equals(order.Root) {
switch state.Status { switch state.Status {
case retrievalmarket.DealStatusFailed, retrievalmarket.DealStatusErrored: case rm.DealStatusCompleted:
retrievalResult <- xerrors.Errorf("Retrieval Error: %s", state.Message)
case retrievalmarket.DealStatusCompleted:
retrievalResult <- nil retrievalResult <- nil
case rm.DealStatusRejected:
retrievalResult <- xerrors.Errorf("Retrieval Proposal Rejected: %s", state.Message)
case
rm.DealStatusDealNotFound,
rm.DealStatusErrored,
rm.DealStatusFailed:
retrievalResult <- xerrors.Errorf("Retrieval Error: %s", state.Message)
case
rm.DealStatusAccepted,
rm.DealStatusAwaitingAcceptance,
rm.DealStatusBlocksComplete,
rm.DealStatusFinalizing,
rm.DealStatusFundsNeeded,
rm.DealStatusFundsNeededLastPayment,
rm.DealStatusNew,
rm.DealStatusOngoing,
rm.DealStatusPaymentChannelAddingFunds,
rm.DealStatusPaymentChannelAllocatingLane,
rm.DealStatusPaymentChannelCreating,
rm.DealStatusPaymentChannelReady,
rm.DealStatusVerified:
return
default:
retrievalResult <- xerrors.Errorf("Unhandled Retrieval Status: %+v", state.Status)
} }
} }
}) })
@ -334,7 +383,7 @@ func (a *API) ClientRetrieve(ctx context.Context, order api.RetrievalOrder, ref
_, err := a.Retrieval.Retrieve( _, err := a.Retrieval.Retrieve(
ctx, ctx,
order.Root, order.Root,
retrievalmarket.NewParamsV0(ppb, order.PaymentInterval, order.PaymentIntervalIncrease), rm.NewParamsV0(ppb, order.PaymentInterval, order.PaymentIntervalIncrease),
order.Total, order.Total,
order.MinerPeerID, order.MinerPeerID,
order.Client, order.Client,

View File

@ -139,4 +139,8 @@ func (a *CommonAPI) Shutdown(ctx context.Context) error {
return nil return nil
} }
func (a *CommonAPI) Closing(ctx context.Context) (<-chan struct{}, error) {
return make(chan struct{}), nil // relies on jsonrpc closing
}
var _ api.Common = &CommonAPI{} var _ api.Common = &CommonAPI{}

View File

@ -425,7 +425,7 @@ func (a *StateAPI) StateMarketParticipants(ctx context.Context, tsk types.TipSet
if err != nil { if err != nil {
return nil, err return nil, err
} }
locked, err := hamt.LoadNode(ctx, cst, state.EscrowTable, hamt.UseTreeBitWidth(5)) locked, err := hamt.LoadNode(ctx, cst, state.LockedTable, hamt.UseTreeBitWidth(5))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -489,14 +489,12 @@ func (a *StateAPI) StateMarketDeals(ctx context.Context, tsk types.TipSetKey) (m
var s market.DealState var s market.DealState
if err := sa.Get(ctx, i, &s); err != nil { if err := sa.Get(ctx, i, &s); err != nil {
if err != nil {
if _, ok := err.(*amt.ErrNotFound); !ok { if _, ok := err.(*amt.ErrNotFound); !ok {
return xerrors.Errorf("failed to get state for deal in proposals array: %w", err) return xerrors.Errorf("failed to get state for deal in proposals array: %w", err)
} }
s.SectorStartEpoch = -1 s.SectorStartEpoch = -1
} }
}
out[strconv.FormatInt(int64(i), 10)] = api.MarketDeal{ out[strconv.FormatInt(int64(i), 10)] = api.MarketDeal{
Proposal: d, Proposal: d,
State: s, State: s,

View File

@ -44,6 +44,9 @@ type StorageMinerAPI struct {
*stores.Index *stores.Index
SetAcceptingStorageDealsConfigFunc dtypes.SetAcceptingStorageDealsConfigFunc SetAcceptingStorageDealsConfigFunc dtypes.SetAcceptingStorageDealsConfigFunc
SetAcceptingRetrievalDealsConfigFunc dtypes.SetAcceptingRetrievalDealsConfigFunc
StorageDealPieceCidBlocklistConfigFunc dtypes.StorageDealPieceCidBlocklistConfigFunc
SetStorageDealPieceCidBlocklistConfigFunc dtypes.SetStorageDealPieceCidBlocklistConfigFunc
} }
func (sm *StorageMinerAPI) ServeRemote(w http.ResponseWriter, r *http.Request) { func (sm *StorageMinerAPI) ServeRemote(w http.ResponseWriter, r *http.Request) {
@ -172,6 +175,10 @@ func (sm *StorageMinerAPI) SectorsUpdate(ctx context.Context, id abi.SectorNumbe
return sm.Miner.ForceSectorState(ctx, id, sealing.SectorState(state)) return sm.Miner.ForceSectorState(ctx, id, sealing.SectorState(state))
} }
func (sm *StorageMinerAPI) SectorRemove(ctx context.Context, id abi.SectorNumber) error {
return sm.Miner.RemoveSector(ctx, id)
}
func (sm *StorageMinerAPI) WorkerConnect(ctx context.Context, url string) error { func (sm *StorageMinerAPI) WorkerConnect(ctx context.Context, url string) error {
w, err := connectRemoteWorker(ctx, sm, url) w, err := connectRemoteWorker(ctx, sm, url)
if err != nil { if err != nil {
@ -201,8 +208,17 @@ func (sm *StorageMinerAPI) MarketListIncompleteDeals(ctx context.Context) ([]sto
return sm.StorageProvider.ListLocalDeals() return sm.StorageProvider.ListLocalDeals()
} }
func (sm *StorageMinerAPI) MarketSetPrice(ctx context.Context, p types.BigInt) error { func (sm *StorageMinerAPI) MarketSetAsk(ctx context.Context, price types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error {
return sm.StorageProvider.SetAsk(p, 60*60*24*100) // lasts for 100 days? options := []storagemarket.StorageAskOption{
storagemarket.MinPieceSize(minPieceSize),
storagemarket.MaxPieceSize(maxPieceSize),
}
return sm.StorageProvider.SetAsk(price, duration, options...)
}
func (sm *StorageMinerAPI) MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error) {
return sm.StorageProvider.GetAsk(), nil
} }
func (sm *StorageMinerAPI) DealsList(ctx context.Context) ([]storagemarket.StorageDeal, error) { func (sm *StorageMinerAPI) DealsList(ctx context.Context) ([]storagemarket.StorageDeal, error) {
@ -213,6 +229,10 @@ func (sm *StorageMinerAPI) DealsSetAcceptingStorageDeals(ctx context.Context, b
return sm.SetAcceptingStorageDealsConfigFunc(b) return sm.SetAcceptingStorageDealsConfigFunc(b)
} }
func (sm *StorageMinerAPI) DealsSetAcceptingRetrievalDeals(ctx context.Context, b bool) error {
return sm.SetAcceptingRetrievalDealsConfigFunc(b)
}
func (sm *StorageMinerAPI) DealsImportData(ctx context.Context, deal cid.Cid, fname string) error { func (sm *StorageMinerAPI) DealsImportData(ctx context.Context, deal cid.Cid, fname string) error {
fi, err := os.Open(fname) fi, err := os.Open(fname)
if err != nil { if err != nil {
@ -223,6 +243,14 @@ func (sm *StorageMinerAPI) DealsImportData(ctx context.Context, deal cid.Cid, fn
return sm.StorageProvider.ImportDataForDeal(ctx, deal, fi) return sm.StorageProvider.ImportDataForDeal(ctx, deal, fi)
} }
func (sm *StorageMinerAPI) DealsPieceCidBlocklist(ctx context.Context) ([]cid.Cid, error) {
return sm.StorageDealPieceCidBlocklistConfigFunc()
}
func (sm *StorageMinerAPI) DealsSetPieceCidBlocklist(ctx context.Context, cids []cid.Cid) error {
return sm.SetStorageDealPieceCidBlocklistConfigFunc(cids)
}
func (sm *StorageMinerAPI) StorageAddLocal(ctx context.Context, path string) error { func (sm *StorageMinerAPI) StorageAddLocal(ctx context.Context, path string) error {
if sm.StorageMgr == nil { if sm.StorageMgr == nil {
return xerrors.Errorf("no storage manager") return xerrors.Errorf("no storage manager")

View File

@ -0,0 +1,6 @@
package dtypes
type DrandConfig struct {
Servers []string
ChainInfoJSON string
}

View File

@ -1,6 +1,8 @@
package dtypes package dtypes
import ( import (
"github.com/ipfs/go-cid"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-actors/actors/abi"
) )
@ -8,10 +10,27 @@ import (
type MinerAddress address.Address type MinerAddress address.Address
type MinerID abi.ActorID type MinerID abi.ActorID
// AcceptingStorageDealsFunc is a function which reads from miner config to // AcceptingStorageDealsConfigFunc is a function which reads from miner config
// determine if the user has disabled storage deals (or not). // to determine if the user has disabled storage deals (or not).
type AcceptingStorageDealsConfigFunc func() (bool, error) type AcceptingStorageDealsConfigFunc func() (bool, error)
// SetAcceptingStorageDealsFunc is a function which is used to disable or enable // SetAcceptingStorageDealsConfigFunc is a function which is used to disable or
// storage deal acceptance. // enable storage deal acceptance.
type SetAcceptingStorageDealsConfigFunc func(bool) error type SetAcceptingStorageDealsConfigFunc func(bool) error
// AcceptingRetrievalDealsConfigFunc is a function which reads from miner config
// to determine if the user has disabled retrieval acceptance (or not).
type AcceptingRetrievalDealsConfigFunc func() (bool, error)
// SetAcceptingRetrievalDealsConfigFunc is a function which is used to disable
// or enable retrieval deal acceptance.
type SetAcceptingRetrievalDealsConfigFunc func(bool) error
// StorageDealPieceCidBlocklistConfigFunc is a function which reads from miner config
// to obtain a list of CIDs for which the storage miner will not accept storage
// proposals.
type StorageDealPieceCidBlocklistConfigFunc func() ([]cid.Cid, error)
// SetStorageDealPieceCidBlocklistConfigFunc is a function which is used to set a
// list of CIDs for which the storage miner will reject deal proposals.
type SetStorageDealPieceCidBlocklistConfigFunc func([]cid.Cid) error

View File

@ -44,13 +44,14 @@ type GossipIn struct {
Db dtypes.DrandBootstrap Db dtypes.DrandBootstrap
Cfg *config.Pubsub Cfg *config.Pubsub
Sk *dtypes.ScoreKeeper Sk *dtypes.ScoreKeeper
Dr dtypes.DrandConfig
} }
func getDrandTopic() (string, error) { func getDrandTopic(chainInfoJSON string) (string, error) {
var drandInfo = struct { var drandInfo = struct {
Hash string `json:"hash"` Hash string `json:"hash"`
}{} }{}
err := json.Unmarshal([]byte(build.DrandChain), &drandInfo) err := json.Unmarshal([]byte(chainInfoJSON), &drandInfo)
if err != nil { if err != nil {
return "", xerrors.Errorf("could not unmarshal drand chain info: %w", err) return "", xerrors.Errorf("could not unmarshal drand chain info: %w", err)
} }
@ -68,7 +69,7 @@ func GossipSub(in GossipIn) (service *pubsub.PubSub, err error) {
} }
isBootstrapNode := in.Cfg.Bootstrapper isBootstrapNode := in.Cfg.Bootstrapper
drandTopic, err := getDrandTopic() drandTopic, err := getDrandTopic(in.Dr.ChainInfoJSON)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -110,6 +110,11 @@ type RandomBeaconParams struct {
PubSub *pubsub.PubSub `optional:"true"` PubSub *pubsub.PubSub `optional:"true"`
Cs *store.ChainStore Cs *store.ChainStore
DrandConfig dtypes.DrandConfig
}
func BuiltinDrandConfig() dtypes.DrandConfig {
return build.DrandConfig
} }
func RandomBeacon(p RandomBeaconParams, _ dtypes.AfterGenesisSet) (beacon.RandomBeacon, error) { func RandomBeacon(p RandomBeaconParams, _ dtypes.AfterGenesisSet) (beacon.RandomBeacon, error) {
@ -119,5 +124,5 @@ func RandomBeacon(p RandomBeaconParams, _ dtypes.AfterGenesisSet) (beacon.Random
} }
//return beacon.NewMockBeacon(build.BlockDelay * time.Second) //return beacon.NewMockBeacon(build.BlockDelay * time.Second)
return drand.NewDrandBeacon(gen.Timestamp, build.BlockDelay, p.PubSub) return drand.NewDrandBeacon(gen.Timestamp, build.BlockDelay, p.PubSub, p.DrandConfig)
} }

View File

@ -3,11 +3,13 @@ package modules
import ( import (
"context" "context"
"errors" "errors"
"fmt"
"net/http" "net/http"
"github.com/ipfs/go-bitswap" "github.com/ipfs/go-bitswap"
"github.com/ipfs/go-bitswap/network" "github.com/ipfs/go-bitswap/network"
"github.com/ipfs/go-blockservice" "github.com/ipfs/go-blockservice"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/namespace" "github.com/ipfs/go-datastore/namespace"
graphsync "github.com/ipfs/go-graphsync/impl" graphsync "github.com/ipfs/go-graphsync/impl"
@ -73,6 +75,12 @@ func GetParams(sbc *ffiwrapper.Config) error {
return err return err
} }
// If built-in assets are disabled, we expect the user to have placed the right
// parameters in the right location on the filesystem (/var/tmp/filecoin-proof-parameters).
if build.DisableBuiltinAssets {
return nil
}
if err := paramfetch.GetParams(context.TODO(), build.ParametersJSON(), uint64(ssize)); err != nil { if err := paramfetch.GetParams(context.TODO(), build.ParametersJSON(), uint64(ssize)); err != nil {
return xerrors.Errorf("fetching proof parameters: %w", err) return xerrors.Errorf("fetching proof parameters: %w", err)
} }
@ -307,7 +315,7 @@ func NewStorageAsk(ctx helpers.MetricsCtx, fapi lapi.FullNode, ds dtypes.Metadat
return storedAsk, nil return storedAsk, nil
} }
func StorageProvider(minerAddress dtypes.MinerAddress, ffiConfig *ffiwrapper.Config, storedAsk *storedask.StoredAsk, h host.Host, ds dtypes.MetadataDS, ibs dtypes.StagingBlockstore, r repo.LockedRepo, pieceStore dtypes.ProviderPieceStore, dataTransfer dtypes.ProviderDataTransfer, spn storagemarket.StorageProviderNode, isAcceptingFunc dtypes.AcceptingStorageDealsConfigFunc) (storagemarket.StorageProvider, error) { func StorageProvider(minerAddress dtypes.MinerAddress, ffiConfig *ffiwrapper.Config, storedAsk *storedask.StoredAsk, h host.Host, ds dtypes.MetadataDS, ibs dtypes.StagingBlockstore, r repo.LockedRepo, pieceStore dtypes.ProviderPieceStore, dataTransfer dtypes.ProviderDataTransfer, spn storagemarket.StorageProviderNode, isAcceptingFunc dtypes.AcceptingStorageDealsConfigFunc, blocklistFunc dtypes.StorageDealPieceCidBlocklistConfigFunc) (storagemarket.StorageProvider, error) {
net := smnet.NewFromLibp2pHost(h) net := smnet.NewFromLibp2pHost(h)
store, err := piecefilestore.NewLocalFileStore(piecefilestore.OsPath(r.Path())) store, err := piecefilestore.NewLocalFileStore(piecefilestore.OsPath(r.Path()))
if err != nil { if err != nil {
@ -325,6 +333,18 @@ func StorageProvider(minerAddress dtypes.MinerAddress, ffiConfig *ffiwrapper.Con
return false, "miner is not accepting storage deals", nil return false, "miner is not accepting storage deals", nil
} }
blocklist, err := blocklistFunc()
if err != nil {
return false, "miner error", err
}
for idx := range blocklist {
if deal.Proposal.PieceCID.Equals(blocklist[idx]) {
log.Warnf("piece CID in proposal %s is blocklisted; rejecting storage deal proposal from client: %s", deal.Proposal.PieceCID, deal.Client.String())
return false, fmt.Sprintf("miner has blocklisted piece CID %s", deal.Proposal.PieceCID), nil
}
}
return true, "", nil return true, "", nil
}) })
@ -337,14 +357,31 @@ func StorageProvider(minerAddress dtypes.MinerAddress, ffiConfig *ffiwrapper.Con
} }
// RetrievalProvider creates a new retrieval provider attached to the provider blockstore // RetrievalProvider creates a new retrieval provider attached to the provider blockstore
func RetrievalProvider(h host.Host, miner *storage.Miner, sealer sectorstorage.SectorManager, full lapi.FullNode, ds dtypes.MetadataDS, pieceStore dtypes.ProviderPieceStore, ibs dtypes.StagingBlockstore) (retrievalmarket.RetrievalProvider, error) { func RetrievalProvider(h host.Host, miner *storage.Miner, sealer sectorstorage.SectorManager, full lapi.FullNode, ds dtypes.MetadataDS, pieceStore dtypes.ProviderPieceStore, ibs dtypes.StagingBlockstore, isAcceptingFunc dtypes.AcceptingRetrievalDealsConfigFunc) (retrievalmarket.RetrievalProvider, error) {
adapter := retrievaladapter.NewRetrievalProviderNode(miner, sealer, full) adapter := retrievaladapter.NewRetrievalProviderNode(miner, sealer, full)
address, err := minerAddrFromDS(ds)
maddr, err := minerAddrFromDS(ds)
if err != nil { if err != nil {
return nil, err return nil, err
} }
network := rmnet.NewFromLibp2pHost(h)
return retrievalimpl.NewProvider(address, adapter, network, pieceStore, ibs, namespace.Wrap(ds, datastore.NewKey("/retrievals/provider"))) netwk := rmnet.NewFromLibp2pHost(h)
opt := retrievalimpl.DealDeciderOpt(func(ctx context.Context, state retrievalmarket.ProviderDealState) (bool, string, error) {
b, err := isAcceptingFunc()
if err != nil {
return false, "miner error", err
}
if !b {
log.Warn("retrieval deal acceptance disabled; rejecting retrieval deal proposal from client")
return false, "miner is not accepting retrieval deals", nil
}
return true, "", nil
})
return retrievalimpl.NewProvider(maddr, adapter, netwk, pieceStore, ibs, namespace.Wrap(ds, datastore.NewKey("/retrievals/provider")), opt)
} }
func SectorStorage(mctx helpers.MetricsCtx, lc fx.Lifecycle, ls stores.LocalStorage, si stores.SectorIndex, cfg *ffiwrapper.Config, sc sectorstorage.SealerConfig, urls sectorstorage.URLs, sa sectorstorage.StorageAuth) (*sectorstorage.Manager, error) { func SectorStorage(mctx helpers.MetricsCtx, lc fx.Lifecycle, ls stores.LocalStorage, si stores.SectorIndex, cfg *ffiwrapper.Config, sc sectorstorage.SealerConfig, urls sectorstorage.URLs, sa sectorstorage.StorageAuth) (*sectorstorage.Manager, error) {
@ -379,24 +416,77 @@ func StorageAuth(ctx helpers.MetricsCtx, ca lapi.Common) (sectorstorage.StorageA
return sectorstorage.StorageAuth(headers), nil return sectorstorage.StorageAuth(headers), nil
} }
func NewAcceptingRetrievalDealsConfigFunc(r repo.LockedRepo) (dtypes.AcceptingRetrievalDealsConfigFunc, error) {
return func() (out bool, err error) {
err = readCfg(r, func(cfg *config.StorageMiner) {
out = cfg.Dealmaking.AcceptingRetrievalDeals
})
return
}, nil
}
func NewSetAcceptingRetrievalDealsConfigFunc(r repo.LockedRepo) (dtypes.SetAcceptingRetrievalDealsConfigFunc, error) {
return func(b bool) (err error) {
err = mutateCfg(r, func(cfg *config.StorageMiner) {
cfg.Dealmaking.AcceptingRetrievalDeals = b
})
return
}, nil
}
func NewAcceptingStorageDealsConfigFunc(r repo.LockedRepo) (dtypes.AcceptingStorageDealsConfigFunc, error) { func NewAcceptingStorageDealsConfigFunc(r repo.LockedRepo) (dtypes.AcceptingStorageDealsConfigFunc, error) {
return func() (bool, error) { return func() (out bool, err error) {
raw, err := r.Config() err = readCfg(r, func(cfg *config.StorageMiner) {
if err != nil { out = cfg.Dealmaking.AcceptingStorageDeals
return false, err })
} return
cfg, ok := raw.(*config.StorageMiner)
if !ok {
return false, xerrors.New("expected address of config.StorageMiner")
}
return cfg.Dealmaking.AcceptingStorageDeals, nil
}, nil }, nil
} }
func NewSetAcceptingStorageDealsConfigFunc(r repo.LockedRepo) (dtypes.SetAcceptingStorageDealsConfigFunc, error) { func NewSetAcceptingStorageDealsConfigFunc(r repo.LockedRepo) (dtypes.SetAcceptingStorageDealsConfigFunc, error) {
return func(b bool) error { return func(b bool) (err error) {
err = mutateCfg(r, func(cfg *config.StorageMiner) {
cfg.Dealmaking.AcceptingStorageDeals = b
})
return
}, nil
}
func NewStorageDealPieceCidBlocklistConfigFunc(r repo.LockedRepo) (dtypes.StorageDealPieceCidBlocklistConfigFunc, error) {
return func() (out []cid.Cid, err error) {
err = readCfg(r, func(cfg *config.StorageMiner) {
out = cfg.Dealmaking.PieceCidBlocklist
})
return
}, nil
}
func NewSetStorageDealPieceCidBlocklistConfigFunc(r repo.LockedRepo) (dtypes.SetStorageDealPieceCidBlocklistConfigFunc, error) {
return func(blocklist []cid.Cid) (err error) {
err = mutateCfg(r, func(cfg *config.StorageMiner) {
cfg.Dealmaking.PieceCidBlocklist = blocklist
})
return
}, nil
}
func readCfg(r repo.LockedRepo, accessor func(*config.StorageMiner)) error {
raw, err := r.Config()
if err != nil {
return err
}
cfg, ok := raw.(*config.StorageMiner)
if !ok {
return xerrors.New("expected address of config.StorageMiner")
}
accessor(cfg)
return nil
}
func mutateCfg(r repo.LockedRepo, mutator func(*config.StorageMiner)) error {
var typeErr error var typeErr error
setConfigErr := r.SetConfig(func(raw interface{}) { setConfigErr := r.SetConfig(func(raw interface{}) {
@ -406,9 +496,8 @@ func NewSetAcceptingStorageDealsConfigFunc(r repo.LockedRepo) (dtypes.SetAccepti
return return
} }
cfg.Dealmaking.AcceptingStorageDeals = b mutator(cfg)
}) })
return multierr.Combine(typeErr, setConfigErr) return multierr.Combine(typeErr, setConfigErr)
}, nil
} }

View File

@ -0,0 +1,15 @@
[Unit]
Description=Chainwatch
After=lotus-daemon.service
Requires=lotus-daemon.service
[Service]
Environment=GOLOG_FILE="/var/log/lotus/chainwatch.log"
Environment=GOLOG_LOG_FMT="json"
Environment=LOTUS_DB=""
Environment=LOTUS_PATH="%h/.lotus"
EnvironmentFile=-/etc/lotus/chainwatch.env
ExecStart=/usr/local/bin/chainwatch run
[Install]
WantedBy=multi-user.target

View File

@ -1,14 +1,14 @@
[Unit] [Unit]
Description=Lotus Daemon Description=Lotus Daemon
After=network-online.target After=network-online.target
Wants=network-online.target Requires=network-online.target
[Service] [Service]
Environment=GOLOG_FILE="/var/log/lotus-daemon" Environment=GOLOG_FILE="/var/log/lotus/daemon.log"
Environment=GOLOG_LOG_FMT="json" Environment=GOLOG_LOG_FMT="json"
ExecStart=/usr/local/bin/lotus daemon ExecStart=/usr/local/bin/lotus daemon
Restart=always Restart=always
RestartSec=30 RestartSec=10
MemoryAccounting=true MemoryAccounting=true
MemoryHigh=8G MemoryHigh=8G

View File

@ -2,10 +2,11 @@
Description=Lotus Storage Miner Description=Lotus Storage Miner
After=network.target After=network.target
After=lotus-daemon.service After=lotus-daemon.service
Requires=lotus-daemon.service
[Service] [Service]
ExecStart=/usr/local/bin/lotus-storage-miner run ExecStart=/usr/local/bin/lotus-storage-miner run
Environment=GOLOG_FILE="/var/log/lotus-miner" Environment=GOLOG_FILE="/var/log/lotus/miner.log"
Environment=GOLOG_LOG_FMT="json" Environment=GOLOG_LOG_FMT="json"
[Install] [Install]

View File

@ -39,3 +39,7 @@ func (m *Miner) PledgeSector() error {
func (m *Miner) ForceSectorState(ctx context.Context, id abi.SectorNumber, state sealing.SectorState) error { func (m *Miner) ForceSectorState(ctx context.Context, id abi.SectorNumber, state sealing.SectorState) error {
return m.sealing.ForceSectorState(ctx, id, state) return m.sealing.ForceSectorState(ctx, id, state)
} }
func (m *Miner) RemoveSector(ctx context.Context, id abi.SectorNumber) error {
return m.sealing.Remove(ctx, id)
}

View File

@ -99,10 +99,6 @@ func (s *WindowPoStScheduler) checkSectors(ctx context.Context, check *abi.BitFi
log.Warnw("Checked sectors", "checked", len(tocheck), "good", len(sectors)) log.Warnw("Checked sectors", "checked", len(tocheck), "good", len(sectors))
if len(sectors) == 0 { // nothing to recover
return nil, nil
}
sbf := bitfield.New() sbf := bitfield.New()
for s := range sectors { for s := range sectors {
(&sbf).Set(uint64(s.Number)) (&sbf).Set(uint64(s.Number))
@ -387,25 +383,22 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di miner.DeadlineInfo
return nil, xerrors.Errorf("get need prove sectors: %w", err) return nil, xerrors.Errorf("get need prove sectors: %w", err)
} }
var skipped *abi.BitField
{
good, err := s.checkSectors(ctx, nps) good, err := s.checkSectors(ctx, nps)
if err != nil { if err != nil {
return nil, xerrors.Errorf("checking sectors to skip: %w", err) return nil, xerrors.Errorf("checking sectors to skip: %w", err)
} }
skipped, err = bitfield.SubtractBitField(nps, good) skipped, err := bitfield.SubtractBitField(nps, good)
if err != nil { if err != nil {
return nil, xerrors.Errorf("nps - good: %w", err) return nil, xerrors.Errorf("nps - good: %w", err)
} }
}
skipCount, err := skipped.Count() skipCount, err := skipped.Count()
if err != nil { if err != nil {
return nil, xerrors.Errorf("getting skipped sector count: %w", err) return nil, xerrors.Errorf("getting skipped sector count: %w", err)
} }
ssi, err := s.sortedSectorInfo(ctx, nps, ts) ssi, err := s.sortedSectorInfo(ctx, good, ts)
if err != nil { if err != nil {
return nil, xerrors.Errorf("getting sorted sector info: %w", err) return nil, xerrors.Errorf("getting sorted sector info: %w", err)
} }