Merge pull request #2 from RobQuistNL/patch-2

Update makefile
This commit is contained in:
Rob Quist 2020-06-26 02:10:53 +02:00 committed by GitHub
commit 3d3e384d5c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
73 changed files with 1991 additions and 544 deletions

View File

@ -314,7 +314,7 @@ workflows:
ci:
jobs:
- lint-changes:
args: "--new-from-rev origin/next"
args: "--new-from-rev origin/master"
- mod-tidy-check
- gofmt
- test:

View File

@ -6,7 +6,7 @@ all: build
unexport GOFLAGS
GOVERSION:=$(shell go version | cut -d' ' -f 3 | cut -d. -f 2)
ifeq ($(shell expr $(GOVERSION) \< 13), 1)
ifeq ($(shell expr $(GOVERSION) \< 14), 1)
$(warning Your Golang version is go 1.$(GOVERSION))
$(error Update Golang to version $(shell grep '^go' go.mod))
endif
@ -105,15 +105,17 @@ install:
install-services: install
mkdir -p /usr/local/lib/systemd/system
mkdir -p /var/log/lotus
install -C -m 0644 ./scripts/lotus-daemon.service /usr/local/lib/systemd/system/lotus-daemon.service
install -C -m 0644 ./scripts/lotus-miner.service /usr/local/lib/systemd/system/lotus-miner.service
systemctl daemon-reload
@echo
@echo "lotus and lotus-miner services installed. Don't forget to 'systemctl enable lotus|lotus-miner' for it to be enabled on startup."
@echo "lotus-daemon and lotus-miner services installed. Don't forget to 'systemctl enable lotus-daemon|lotus-miner' for it to be enabled on startup."
clean-services:
rm -f /usr/local/lib/systemd/system/lotus-daemon.service
rm -f /usr/local/lib/systemd/system/lotus-miner.service
rm -f /usr/local/lib/systemd/system/chainwatch.service
systemctl daemon-reload
# TOOLS
@ -160,6 +162,13 @@ chainwatch:
.PHONY: chainwatch
BINS+=chainwatch
install-chainwatch-service: chainwatch
install -C ./chainwatch /usr/local/bin/chainwatch
install -C -m 0644 ./scripts/chainwatch.service /usr/local/lib/systemd/system/chainwatch.service
systemctl daemon-reload
@echo
@echo "chainwatch installed. Don't forget to 'systemctl enable chainwatch' for it to be enabled on startup."
bench:
rm -f bench
go build -o bench ./cmd/lotus-bench

View File

@ -8,6 +8,10 @@ Lotus is an implementation of the Filecoin Distributed Storage Network. For more
For instructions on how to build lotus from source, please visit [https://docs.lotu.sh](https://docs.lotu.sh) or read the source [here](https://github.com/filecoin-project/lotus/tree/master/documentation).
## Reporting a Vulnerability
Please send an email to security@filecoin.org. See our [security policy](SECURITY.md) for more details.
## Development
All work is tracked via issues. An attempt at keeping an up-to-date view on remaining work is in the [lotus testnet github project board](https://github.com/filecoin-project/lotus/projects/1).

29
SECURITY.md Normal file
View File

@ -0,0 +1,29 @@
# Security Policy
## Reporting a Vulnerability
For *critical* bugs, please send an email to security@filecoin.org.
The bug reporting process differs between bugs that are critical and may crash the network, and others that are unlikely to cause problems if malicious parties know about it. For non-critical bugs, please simply file a GitHub [issue](https://github.com/filecoin-project/lotus/issues/new?template=bug_report.md).
Please try to provide a clear description of any bugs reported, along with how to reproduce the bug if possible. More detailed bug reports (especially those with a PoC included) will help us move forward much faster. Additionally, please avoid reporting bugs that already have open issues. Take a moment to search the issue list of the related GitHub repositories before writing up a new report.
Here are some examples of bugs we would consider 'critical':
* If you can spend from a `multisig` wallet you do not control the keys for.
* If you can cause a miner to be slashed without them actually misbehaving.
* If you can maintain power without submitting windowed posts regularly.
* If you can craft a message that causes lotus nodes to panic.
* If you can cause your miner to win significantly more blocks than it should.
* If you can craft a message that causes a persistent fork in the network.
* If you can cause the total amount of Filecoin in the network to no longer be 2 billion.
This is not an exhaustive list, but should provide some idea of what we consider 'critical'.
## Supported Versions
* TODO: This should be defined and set up by Mainnet launch.
| Version | Supported |
| ------- | ------------------ |
| Testnet | :white_check_mark: |

View File

@ -13,11 +13,13 @@ import (
)
type Common interface {
// Auth
// MethodGroup: Auth
AuthVerify(ctx context.Context, token string) ([]auth.Permission, error)
AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error)
// network
// MethodGroup: Net
NetConnectedness(context.Context, peer.ID) (network.Connectedness, error)
NetPeers(context.Context) ([]peer.AddrInfo, error)
@ -27,6 +29,8 @@ type Common interface {
NetFindPeer(context.Context, peer.ID) (peer.AddrInfo, error)
NetPubsubScores(context.Context) ([]PubsubScore, error)
// MethodGroup: Common
// ID returns peerID of libp2p node backing this API
ID(context.Context) (peer.ID, error)
@ -38,6 +42,8 @@ type Common interface {
// trigger graceful shutdown
Shutdown(context.Context) error
Closing(context.Context) (<-chan struct{}, error)
}
// Version provides various build-time information

View File

@ -35,26 +35,71 @@ type FullNode interface {
// ChainNotify returns channel with chain head updates
// First message is guaranteed to be of len == 1, and type == 'current'
ChainNotify(context.Context) (<-chan []*HeadChange, error)
// ChainHead returns the current head of the chain
ChainHead(context.Context) (*types.TipSet, error)
// ChainGetRandomness is used to sample the chain for randomness
ChainGetRandomness(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error)
// ChainGetBlock returns the block specified by the given CID
ChainGetBlock(context.Context, cid.Cid) (*types.BlockHeader, error)
ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error)
ChainGetBlockMessages(context.Context, cid.Cid) (*BlockMessages, error)
ChainGetParentReceipts(context.Context, cid.Cid) ([]*types.MessageReceipt, error)
ChainGetParentMessages(context.Context, cid.Cid) ([]Message, error)
// ChainGetBlockMessages returns messages stored in the specified block
ChainGetBlockMessages(ctx context.Context, blockCid cid.Cid) (*BlockMessages, error)
// ChainGetParentReceipts returns receipts for messages in parent tipset of
// the specified block
ChainGetParentReceipts(ctx context.Context, blockCid cid.Cid) ([]*types.MessageReceipt, error)
// ChainGetParentReceipts returns messages stored in parent tipset of the
// specified block
ChainGetParentMessages(ctx context.Context, blockCid cid.Cid) ([]Message, error)
// ChainGetTipSetByHeight looks back for a tipset at the specified epoch.
// If there are no blocks at the specified epoch, a tipset at higher epoch
// will be returned
ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error)
// ChainReadObj reads ipld nodes referenced by the specified CID from chain
// blockstore and returns raw bytes
ChainReadObj(context.Context, cid.Cid) ([]byte, error)
// ChainHasObj checks if a given CID exists in the chain blockstore
ChainHasObj(context.Context, cid.Cid) (bool, error)
ChainStatObj(context.Context, cid.Cid, cid.Cid) (ObjStat, error)
// ChainSetHead forcefully sets current chain head. Use with caution
ChainSetHead(context.Context, types.TipSetKey) error
// ChainGetGenesis returns the genesis tipset
ChainGetGenesis(context.Context) (*types.TipSet, error)
// ChainTipSetWeight computes weight for the specified tipset
ChainTipSetWeight(context.Context, types.TipSetKey) (types.BigInt, error)
ChainGetNode(ctx context.Context, p string) (*IpldObject, error)
// ChainGetMessage reads a message referenced by the specified CID from the
// chain blockstore
ChainGetMessage(context.Context, cid.Cid) (*types.Message, error)
// ChainGetPath returns a set of revert/apply operations needed to get from
// one tipset to another, for example:
//```
// to
// ^
// from tAA
// ^ ^
// tBA tAB
// ^---*--^
// ^
// tRR
//```
// Would return `[revert(tBA), apply(tAB), apply(tAA)]`
ChainGetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*HeadChange, error)
// ChainExport returns a stream of bytes with CAR dump of chain data
ChainExport(context.Context, types.TipSetKey) (<-chan []byte, error)
// MethodGroup: Sync
@ -63,23 +108,45 @@ type FullNode interface {
// SyncState returns the current status of the lotus sync system
SyncState(context.Context) (*SyncState, error)
// SyncSubmitBlock can be used to submit a newly created block to the
// network through this node
SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) error
// SyncIncomingBlocks returns a channel streaming incoming, potentially not
// yet synced block headers.
SyncIncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error)
// SyncMarkBad marks a blocks as bad, meaning that it won't ever by synced.
// Use with extreme caution
SyncMarkBad(ctx context.Context, bcid cid.Cid) error
// SyncCheckBad checks if a block was marked as bad, and if it was, returns
// the reason
SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error)
// MethodGroup: Mpool
// The Mpool methods are for interacting with the message pool. The message pool
// manages all incoming and outgoing 'messages' going over the network.
// MpoolPending returns pending mempool messages
MpoolPending(context.Context, types.TipSetKey) ([]*types.SignedMessage, error)
// MpoolPush pushes a signed message to mempool
MpoolPush(context.Context, *types.SignedMessage) (cid.Cid, error)
MpoolPushMessage(context.Context, *types.Message) (*types.SignedMessage, error) // get nonce, sign, push
// MpoolPushMessage atomically assigns a nonce, signs, and pushes a message
// to mempool
MpoolPushMessage(context.Context, *types.Message) (*types.SignedMessage, error)
// MpoolGetNonce gets next nonce for the specified sender.
// Note that this method may not be atomic. Use MpoolPushMessage instead
MpoolGetNonce(context.Context, address.Address) (uint64, error)
MpoolSub(context.Context) (<-chan MpoolUpdate, error)
MpoolEstimateGasPrice(context.Context, uint64, address.Address, int64, types.TipSetKey) (types.BigInt, error)
// MpoolEstimateGasPrice estimates what gas price should be used for a
// message to have high likelihood of inclusion in `nblocksincl` epochs
MpoolEstimateGasPrice(ctx context.Context, nblocksincl uint64, sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error)
// MethodGroup: Miner
@ -118,6 +185,7 @@ type FullNode interface {
ClientListDeals(ctx context.Context) ([]DealInfo, error)
ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error)
ClientFindData(ctx context.Context, root cid.Cid) ([]QueryOffer, error)
ClientMinerQueryOffer(ctx context.Context, root cid.Cid, miner address.Address) (QueryOffer, error)
ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *FileRef) error
ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.SignedStorageAsk, error)
ClientCalcCommP(ctx context.Context, inpath string, miner address.Address) (*CommPRet, error)

View File

@ -36,6 +36,7 @@ type StorageMiner interface {
SectorsRefs(context.Context) (map[string][]SealedRef, error)
SectorsUpdate(context.Context, abi.SectorNumber, SectorState) error
SectorRemove(context.Context, abi.SectorNumber) error
StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error)
StorageLocal(ctx context.Context) (map[stores.ID]string, error)
@ -50,11 +51,15 @@ type StorageMiner interface {
MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error
MarketListDeals(ctx context.Context) ([]storagemarket.StorageDeal, error)
MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error)
MarketSetPrice(context.Context, types.BigInt) error
MarketSetAsk(ctx context.Context, price types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error
MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error)
DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error
DealsList(ctx context.Context) ([]storagemarket.StorageDeal, error)
DealsSetAcceptingStorageDeals(context.Context, bool) error
DealsSetAcceptingRetrievalDeals(context.Context, bool) error
DealsPieceCidBlocklist(context.Context) ([]cid.Cid, error)
DealsSetPieceCidBlocklist(context.Context, []cid.Cid) error
StorageAddLocal(ctx context.Context, path string) error
}

View File

@ -50,7 +50,8 @@ type CommonStruct struct {
LogList func(context.Context) ([]string, error) `perm:"write"`
LogSetLevel func(context.Context, string, string) error `perm:"write"`
Shutdown func(context.Context) error `perm:"admin"`
Shutdown func(context.Context) error `perm:"admin"`
Closing func(context.Context) (<-chan struct{}, error) `perm:"read"`
}
}
@ -108,17 +109,18 @@ type FullNodeStruct struct {
WalletImport func(context.Context, *types.KeyInfo) (address.Address, error) `perm:"admin"`
WalletDelete func(context.Context, address.Address) error `perm:"write"`
ClientImport func(ctx context.Context, ref api.FileRef) (cid.Cid, error) `perm:"admin"`
ClientListImports func(ctx context.Context) ([]api.Import, error) `perm:"write"`
ClientHasLocal func(ctx context.Context, root cid.Cid) (bool, error) `perm:"write"`
ClientFindData func(ctx context.Context, root cid.Cid) ([]api.QueryOffer, error) `perm:"read"`
ClientStartDeal func(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) `perm:"admin"`
ClientGetDealInfo func(context.Context, cid.Cid) (*api.DealInfo, error) `perm:"read"`
ClientListDeals func(ctx context.Context) ([]api.DealInfo, error) `perm:"write"`
ClientRetrieve func(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error `perm:"admin"`
ClientQueryAsk func(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.SignedStorageAsk, error) `perm:"read"`
ClientCalcCommP func(ctx context.Context, inpath string, miner address.Address) (*api.CommPRet, error) `perm:"read"`
ClientGenCar func(ctx context.Context, ref api.FileRef, outpath string) error `perm:"write"`
ClientImport func(ctx context.Context, ref api.FileRef) (cid.Cid, error) `perm:"admin"`
ClientListImports func(ctx context.Context) ([]api.Import, error) `perm:"write"`
ClientHasLocal func(ctx context.Context, root cid.Cid) (bool, error) `perm:"write"`
ClientFindData func(ctx context.Context, root cid.Cid) ([]api.QueryOffer, error) `perm:"read"`
ClientMinerQueryOffer func(ctx context.Context, root cid.Cid, miner address.Address) (api.QueryOffer, error) `perm:"read"`
ClientStartDeal func(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) `perm:"admin"`
ClientGetDealInfo func(context.Context, cid.Cid) (*api.DealInfo, error) `perm:"read"`
ClientListDeals func(ctx context.Context) ([]api.DealInfo, error) `perm:"write"`
ClientRetrieve func(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error `perm:"admin"`
ClientQueryAsk func(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.SignedStorageAsk, error) `perm:"read"`
ClientCalcCommP func(ctx context.Context, inpath string, miner address.Address) (*api.CommPRet, error) `perm:"read"`
ClientGenCar func(ctx context.Context, ref api.FileRef, outpath string) error `perm:"write"`
StateNetworkName func(context.Context) (dtypes.NetworkName, error) `perm:"read"`
StateMinerSectors func(context.Context, address.Address, *abi.BitField, bool, types.TipSetKey) ([]*api.ChainSectorInfo, error) `perm:"read"`
@ -192,10 +194,11 @@ type StorageMinerStruct struct {
MiningBase func(context.Context) (*types.TipSet, error) `perm:"read"`
MarketImportDealData func(context.Context, cid.Cid, string) error `perm:"write"`
MarketListDeals func(ctx context.Context) ([]storagemarket.StorageDeal, error) `perm:"read"`
MarketListIncompleteDeals func(ctx context.Context) ([]storagemarket.MinerDeal, error) `perm:"read"`
MarketSetPrice func(context.Context, types.BigInt) error `perm:"admin"`
MarketImportDealData func(context.Context, cid.Cid, string) error `perm:"write"`
MarketListDeals func(ctx context.Context) ([]storagemarket.StorageDeal, error) `perm:"read"`
MarketListIncompleteDeals func(ctx context.Context) ([]storagemarket.MinerDeal, error) `perm:"read"`
MarketSetAsk func(ctx context.Context, price types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error `perm:"admin"`
MarketGetAsk func(ctx context.Context) (*storagemarket.SignedStorageAsk, error) `perm:"read"`
PledgeSector func(context.Context) error `perm:"write"`
@ -203,6 +206,7 @@ type StorageMinerStruct struct {
SectorsList func(context.Context) ([]abi.SectorNumber, error) `perm:"read"`
SectorsRefs func(context.Context) (map[string][]api.SealedRef, error) `perm:"read"`
SectorsUpdate func(context.Context, abi.SectorNumber, api.SectorState) error `perm:"write"`
SectorRemove func(context.Context, abi.SectorNumber) error `perm:"admin"`
WorkerConnect func(context.Context, string) error `perm:"admin"` // TODO: worker perm
WorkerStats func(context.Context) (map[uint64]storiface.WorkerStats, error) `perm:"admin"`
@ -220,9 +224,12 @@ type StorageMinerStruct struct {
StorageLock func(ctx context.Context, sector abi.SectorID, read stores.SectorFileType, write stores.SectorFileType) error `perm:"admin"`
StorageTryLock func(ctx context.Context, sector abi.SectorID, read stores.SectorFileType, write stores.SectorFileType) (bool, error) `perm:"admin"`
DealsImportData func(ctx context.Context, dealPropCid cid.Cid, file string) error `perm:"write"`
DealsList func(ctx context.Context) ([]storagemarket.StorageDeal, error) `perm:"read"`
DealsSetAcceptingStorageDeals func(context.Context, bool) error `perm:"admin"`
DealsImportData func(ctx context.Context, dealPropCid cid.Cid, file string) error `perm:"write"`
DealsList func(ctx context.Context) ([]storagemarket.StorageDeal, error) `perm:"read"`
DealsSetAcceptingStorageDeals func(context.Context, bool) error `perm:"admin"`
DealsSetAcceptingRetrievalDeals func(context.Context, bool) error `perm:"admin"`
DealsPieceCidBlocklist func(context.Context) ([]cid.Cid, error) `perm:"admin"`
DealsSetPieceCidBlocklist func(context.Context, []cid.Cid) error `perm:"read"`
StorageAddLocal func(ctx context.Context, path string) error `perm:"admin"`
}
@ -238,12 +245,14 @@ type WorkerStruct struct {
Paths func(context.Context) ([]stores.StoragePath, error) `perm:"admin"`
Info func(context.Context) (storiface.WorkerInfo, error) `perm:"admin"`
SealPreCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) `perm:"admin"`
SealPreCommit2 func(context.Context, abi.SectorID, storage.PreCommit1Out) (cids storage.SectorCids, err error) `perm:"admin"`
SealCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) `perm:"admin"`
SealCommit2 func(context.Context, abi.SectorID, storage.Commit1Out) (storage.Proof, error) `perm:"admin"`
FinalizeSector func(context.Context, abi.SectorID) error `perm:"admin"`
MoveStorage func(ctx context.Context, sector abi.SectorID) error `perm:"admin"`
SealPreCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) `perm:"admin"`
SealPreCommit2 func(context.Context, abi.SectorID, storage.PreCommit1Out) (cids storage.SectorCids, err error) `perm:"admin"`
SealCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) `perm:"admin"`
SealCommit2 func(context.Context, abi.SectorID, storage.Commit1Out) (storage.Proof, error) `perm:"admin"`
FinalizeSector func(context.Context, abi.SectorID, []storage.Range) error `perm:"admin"`
ReleaseUnsealed func(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error `perm:"admin"`
Remove func(ctx context.Context, sector abi.SectorID) error `perm:"admin"`
MoveStorage func(ctx context.Context, sector abi.SectorID) error `perm:"admin"`
UnsealPiece func(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error `perm:"admin"`
ReadPiece func(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) error `perm:"admin"`
@ -313,6 +322,10 @@ func (c *CommonStruct) Shutdown(ctx context.Context) error {
return c.Internal.Shutdown(ctx)
}
func (c *CommonStruct) Closing(ctx context.Context) (<-chan struct{}, error) {
return c.Internal.Closing(ctx)
}
// FullNodeStruct
func (c *FullNodeStruct) ClientListImports(ctx context.Context) ([]api.Import, error) {
@ -331,6 +344,10 @@ func (c *FullNodeStruct) ClientFindData(ctx context.Context, root cid.Cid) ([]ap
return c.Internal.ClientFindData(ctx, root)
}
func (c *FullNodeStruct) ClientMinerQueryOffer(ctx context.Context, root cid.Cid, miner address.Address) (api.QueryOffer, error) {
return c.Internal.ClientMinerQueryOffer(ctx, root, miner)
}
func (c *FullNodeStruct) ClientStartDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) {
return c.Internal.ClientStartDeal(ctx, params)
}
@ -773,6 +790,10 @@ func (c *StorageMinerStruct) SectorsUpdate(ctx context.Context, id abi.SectorNum
return c.Internal.SectorsUpdate(ctx, id, state)
}
func (c *StorageMinerStruct) SectorRemove(ctx context.Context, number abi.SectorNumber) error {
return c.Internal.SectorRemove(ctx, number)
}
func (c *StorageMinerStruct) WorkerConnect(ctx context.Context, url string) error {
return c.Internal.WorkerConnect(ctx, url)
}
@ -841,8 +862,12 @@ func (c *StorageMinerStruct) MarketListIncompleteDeals(ctx context.Context) ([]s
return c.Internal.MarketListIncompleteDeals(ctx)
}
func (c *StorageMinerStruct) MarketSetPrice(ctx context.Context, p types.BigInt) error {
return c.Internal.MarketSetPrice(ctx, p)
func (c *StorageMinerStruct) MarketSetAsk(ctx context.Context, price types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error {
return c.Internal.MarketSetAsk(ctx, price, duration, minPieceSize, maxPieceSize)
}
func (c *StorageMinerStruct) MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error) {
return c.Internal.MarketGetAsk(ctx)
}
func (c *StorageMinerStruct) DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error {
@ -857,6 +882,18 @@ func (c *StorageMinerStruct) DealsSetAcceptingStorageDeals(ctx context.Context,
return c.Internal.DealsSetAcceptingStorageDeals(ctx, b)
}
func (c *StorageMinerStruct) DealsSetAcceptingRetrievalDeals(ctx context.Context, b bool) error {
return c.Internal.DealsSetAcceptingRetrievalDeals(ctx, b)
}
func (c *StorageMinerStruct) DealsPieceCidBlocklist(ctx context.Context) ([]cid.Cid, error) {
return c.Internal.DealsPieceCidBlocklist(ctx)
}
func (c *StorageMinerStruct) DealsSetPieceCidBlocklist(ctx context.Context, cids []cid.Cid) error {
return c.Internal.DealsSetPieceCidBlocklist(ctx, cids)
}
func (c *StorageMinerStruct) StorageAddLocal(ctx context.Context, path string) error {
return c.Internal.StorageAddLocal(ctx, path)
}
@ -895,8 +932,16 @@ func (w *WorkerStruct) SealCommit2(ctx context.Context, sector abi.SectorID, c1o
return w.Internal.SealCommit2(ctx, sector, c1o)
}
func (w *WorkerStruct) FinalizeSector(ctx context.Context, sector abi.SectorID) error {
return w.Internal.FinalizeSector(ctx, sector)
func (w *WorkerStruct) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error {
return w.Internal.FinalizeSector(ctx, sector, keepUnsealed)
}
func (w *WorkerStruct) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error {
return w.Internal.ReleaseUnsealed(ctx, sector, safeToFree)
}
func (w *WorkerStruct) Remove(ctx context.Context, sector abi.SectorID) error {
return w.Internal.Remove(ctx, sector)
}
func (w *WorkerStruct) MoveStorage(ctx context.Context, sector abi.SectorID) error {

View File

@ -73,9 +73,13 @@ func init() {
addExample(bitfield.NewFromSet([]uint64{5}))
addExample(abi.RegisteredSealProof_StackedDrg32GiBV1)
addExample(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1)
addExample(abi.ChainEpoch(10101))
addExample(crypto.SigTypeBLS)
addExample(int64(9))
addExample(12.3)
addExample(123)
addExample(uintptr(0))
addExample(abi.MethodNum(1))
addExample(exitcode.ExitCode(0))
addExample(crypto.DomainSeparationTag_ElectionProofProduction)
@ -94,17 +98,17 @@ func init() {
addExample(api.PCHInbound)
addExample(time.Minute)
addExample(&types.ExecutionTrace{
Msg: exampleValue(reflect.TypeOf(&types.Message{})).(*types.Message),
MsgRct: exampleValue(reflect.TypeOf(&types.MessageReceipt{})).(*types.MessageReceipt),
Msg: exampleValue(reflect.TypeOf(&types.Message{}), nil).(*types.Message),
MsgRct: exampleValue(reflect.TypeOf(&types.MessageReceipt{}), nil).(*types.MessageReceipt),
})
addExample(map[string]types.Actor{
"t01236": exampleValue(reflect.TypeOf(types.Actor{})).(types.Actor),
"t01236": exampleValue(reflect.TypeOf(types.Actor{}), nil).(types.Actor),
})
addExample(map[string]api.MarketDeal{
"t026363": exampleValue(reflect.TypeOf(api.MarketDeal{})).(api.MarketDeal),
"t026363": exampleValue(reflect.TypeOf(api.MarketDeal{}), nil).(api.MarketDeal),
})
addExample(map[string]api.MarketBalance{
"t026363": exampleValue(reflect.TypeOf(api.MarketBalance{})).(api.MarketBalance),
"t026363": exampleValue(reflect.TypeOf(api.MarketBalance{}), nil).(api.MarketBalance),
})
maddr, err := multiaddr.NewMultiaddr("/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior")
@ -117,7 +121,7 @@ func init() {
}
func exampleValue(t reflect.Type) interface{} {
func exampleValue(t, parent reflect.Type) interface{} {
v, ok := ExampleValues[t]
if ok {
return v
@ -126,25 +130,25 @@ func exampleValue(t reflect.Type) interface{} {
switch t.Kind() {
case reflect.Slice:
out := reflect.New(t).Elem()
reflect.Append(out, reflect.ValueOf(exampleValue(t.Elem())))
reflect.Append(out, reflect.ValueOf(exampleValue(t.Elem(), t)))
return out.Interface()
case reflect.Chan:
return exampleValue(t.Elem())
return exampleValue(t.Elem(), nil)
case reflect.Struct:
es := exampleStruct(t)
es := exampleStruct(t, parent)
v := reflect.ValueOf(es).Elem().Interface()
ExampleValues[t] = v
return v
case reflect.Array:
out := reflect.New(t).Elem()
for i := 0; i < t.Len(); i++ {
out.Index(i).Set(reflect.ValueOf(exampleValue(t.Elem())))
out.Index(i).Set(reflect.ValueOf(exampleValue(t.Elem(), t)))
}
return out.Interface()
case reflect.Ptr:
if t.Elem().Kind() == reflect.Struct {
es := exampleStruct(t.Elem())
es := exampleStruct(t.Elem(), t)
//ExampleValues[t] = es
return es
}
@ -155,12 +159,15 @@ func exampleValue(t reflect.Type) interface{} {
panic(fmt.Sprintf("No example value for type: %s", t))
}
func exampleStruct(t reflect.Type) interface{} {
func exampleStruct(t, parent reflect.Type) interface{} {
ns := reflect.New(t)
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
if f.Type == parent {
continue
}
if strings.Title(f.Name) == f.Name {
ns.Elem().Field(i).Set(reflect.ValueOf(exampleValue(f.Type)))
ns.Elem().Field(i).Set(reflect.ValueOf(exampleValue(f.Type, t)))
}
}
@ -286,17 +293,17 @@ func main() {
ft := m.Func.Type()
for j := 2; j < ft.NumIn(); j++ {
inp := ft.In(j)
args = append(args, exampleValue(inp))
args = append(args, exampleValue(inp, nil))
}
v, err := json.Marshal(args)
v, err := json.MarshalIndent(args, "", " ")
if err != nil {
panic(err)
}
outv := exampleValue(ft.Out(0))
outv := exampleValue(ft.Out(0), nil)
ov, err := json.Marshal(outv)
ov, err := json.MarshalIndent(outv, "", " ")
if err != nil {
panic(err)
}
@ -318,6 +325,15 @@ func main() {
return groupslice[i].GroupName < groupslice[j].GroupName
})
fmt.Printf("# Groups\n")
for _, g := range groupslice {
fmt.Printf("* [%s](#%s)\n", g.GroupName, g.GroupName)
for _, method := range g.Methods {
fmt.Printf(" * [%s](#%s)\n", method.Name, method.Name)
}
}
for _, g := range groupslice {
g := g
fmt.Printf("## %s\n", g.GroupName)
@ -331,8 +347,17 @@ func main() {
fmt.Printf("### %s\n", m.Name)
fmt.Printf("%s\n\n", m.Comment)
fmt.Printf("Inputs: `%s`\n\n", m.InputExample)
fmt.Printf("Response: `%s`\n\n", m.ResponseExample)
if strings.Count(m.InputExample, "\n") > 0 {
fmt.Printf("Inputs:\n```json\n%s\n```\n\n", m.InputExample)
} else {
fmt.Printf("Inputs: `%s`\n\n", m.InputExample)
}
if strings.Count(m.ResponseExample, "\n") > 0 {
fmt.Printf("Response:\n```json\n%s\n```\n\n", m.ResponseExample)
} else {
fmt.Printf("Response: `%s`\n\n", m.ResponseExample)
}
}
}
}

View File

@ -8,6 +8,7 @@ import (
"math/rand"
"os"
"path/filepath"
"sync/atomic"
"testing"
"time"
@ -52,11 +53,11 @@ func TestDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, carExport
}
time.Sleep(time.Second)
mine := true
mine := int64(1)
done := make(chan struct{})
go func() {
defer close(done)
for mine {
for atomic.LoadInt64(&mine) == 1 {
time.Sleep(blocktime)
if err := sn[0].MineOne(ctx, func(bool) {}); err != nil {
t.Error(err)
@ -66,7 +67,7 @@ func TestDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, carExport
makeDeal(t, ctx, 6, client, miner, carExport)
mine = false
atomic.AddInt64(&mine, -1)
fmt.Println("shutting down mining")
<-done
}
@ -89,12 +90,12 @@ func TestDoubleDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration) {
}
time.Sleep(time.Second)
mine := true
mine := int64(1)
done := make(chan struct{})
go func() {
defer close(done)
for mine {
for atomic.LoadInt64(&mine) == 1 {
time.Sleep(blocktime)
if err := sn[0].MineOne(ctx, func(bool) {}); err != nil {
t.Error(err)
@ -105,7 +106,7 @@ func TestDoubleDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration) {
makeDeal(t, ctx, 6, client, miner, false)
makeDeal(t, ctx, 7, client, miner, false)
mine = false
atomic.AddInt64(&mine, -1)
fmt.Println("shutting down mining")
<-done
}

View File

@ -126,6 +126,7 @@ func TestDealMining(t *testing.T, b APIBuilder, blocktime time.Duration, carExpo
minedTwo := make(chan struct{})
go func() {
doneMinedTwo := false
defer close(done)
prevExpect := 0
@ -175,9 +176,9 @@ func TestDealMining(t *testing.T, b APIBuilder, blocktime time.Duration, carExpo
time.Sleep(blocktime)
}
if prevExpect == 2 && expect == 2 && minedTwo != nil {
if prevExpect == 2 && expect == 2 && !doneMinedTwo {
close(minedTwo)
minedTwo = nil
doneMinedTwo = true
}
prevExpect = expect

View File

@ -13,6 +13,10 @@ import (
)
func BuiltinBootstrap() ([]peer.AddrInfo, error) {
if DisableBuiltinAssets {
return nil, nil
}
var out []peer.AddrInfo
b := rice.MustFindBox("bootstrap")

View File

@ -1,12 +1,12 @@
/dns4/bootstrap-0-sin.fil-test.net/tcp/1347/p2p/12D3KooWKNF7vNFEhnvB45E9mw2B5z6t419W3ziZPLdUDVnLLKGs
/ip4/86.109.15.57/tcp/1347/p2p/12D3KooWKNF7vNFEhnvB45E9mw2B5z6t419W3ziZPLdUDVnLLKGs
/dns4/bootstrap-0-dfw.fil-test.net/tcp/1347/p2p/12D3KooWECJTm7RUPyGfNbRwm6y2fK4wA7EB8rDJtWsq5AKi7iDr
/ip4/139.178.84.45/tcp/1347/p2p/12D3KooWECJTm7RUPyGfNbRwm6y2fK4wA7EB8rDJtWsq5AKi7iDr
/dns4/bootstrap-0-fra.fil-test.net/tcp/1347/p2p/12D3KooWC7MD6m7iNCuDsYtNr7xVtazihyVUizBbhmhEiyMAm9ym
/ip4/136.144.49.17/tcp/1347/p2p/12D3KooWC7MD6m7iNCuDsYtNr7xVtazihyVUizBbhmhEiyMAm9ym
/dns4/bootstrap-1-sin.fil-test.net/tcp/1347/p2p/12D3KooWD8eYqsKcEMFax6EbWN3rjA7qFsxCez2rmN8dWqkzgNaN
/ip4/86.109.15.55/tcp/1347/p2p/12D3KooWD8eYqsKcEMFax6EbWN3rjA7qFsxCez2rmN8dWqkzgNaN
/dns4/bootstrap-1-dfw.fil-test.net/tcp/1347/p2p/12D3KooWLB3RR8frLAmaK4ntHC2dwrAjyGzQgyUzWxAum1FxyyqD
/ip4/139.178.84.41/tcp/1347/p2p/12D3KooWLB3RR8frLAmaK4ntHC2dwrAjyGzQgyUzWxAum1FxyyqD
/dns4/bootstrap-1-fra.fil-test.net/tcp/1347/p2p/12D3KooWGPDJAw3HW4uVU3JEQBfFaZ1kdpg4HvvwRMVpUYbzhsLQ
/ip4/136.144.49.131/tcp/1347/p2p/12D3KooWGPDJAw3HW4uVU3JEQBfFaZ1kdpg4HvvwRMVpUYbzhsLQ
/dns4/bootstrap-0-sin.fil-test.net/tcp/1347/p2p/12D3KooWPdUquftaQvoQEtEdsRBAhwD6jopbF2oweVTzR59VbHEd
/ip4/86.109.15.57/tcp/1347/p2p/12D3KooWPdUquftaQvoQEtEdsRBAhwD6jopbF2oweVTzR59VbHEd
/dns4/bootstrap-0-dfw.fil-test.net/tcp/1347/p2p/12D3KooWQSCkHCzosEyrh8FgYfLejKgEPM5VB6qWzZE3yDAuXn8d
/ip4/139.178.84.45/tcp/1347/p2p/12D3KooWQSCkHCzosEyrh8FgYfLejKgEPM5VB6qWzZE3yDAuXn8d
/dns4/bootstrap-0-fra.fil-test.net/tcp/1347/p2p/12D3KooWEXN2eQmoyqnNjde9PBAQfQLHN67jcEdWU6JougWrgXJK
/ip4/136.144.49.17/tcp/1347/p2p/12D3KooWEXN2eQmoyqnNjde9PBAQfQLHN67jcEdWU6JougWrgXJK
/dns4/bootstrap-1-sin.fil-test.net/tcp/1347/p2p/12D3KooWLmJkZd33mJhjg5RrpJ6NFep9SNLXWc4uVngV4TXKwzYw
/ip4/86.109.15.123/tcp/1347/p2p/12D3KooWLmJkZd33mJhjg5RrpJ6NFep9SNLXWc4uVngV4TXKwzYw
/dns4/bootstrap-1-dfw.fil-test.net/tcp/1347/p2p/12D3KooWGXLHjiz6pTRu7x2pkgTVCoxcCiVxcNLpMnWcJ3JiNEy5
/ip4/139.178.86.3/tcp/1347/p2p/12D3KooWGXLHjiz6pTRu7x2pkgTVCoxcCiVxcNLpMnWcJ3JiNEy5
/dns4/bootstrap-1-fra.fil-test.net/tcp/1347/p2p/12D3KooW9szZmKttS9A1FafH3Zc2pxKwwmvCWCGKkRP4KmbhhC4R
/ip4/136.144.49.131/tcp/1347/p2p/12D3KooW9szZmKttS9A1FafH3Zc2pxKwwmvCWCGKkRP4KmbhhC4R

15
build/flags.go Normal file
View File

@ -0,0 +1,15 @@
package build
// DisableBuiltinAssets disables the resolution of go.rice boxes that store
// built-in assets, such as proof parameters, bootstrap peers, genesis blocks,
// etc.
//
// When this value is set to true, it is expected that the user will
// provide any such configurations through the Lotus API itself.
//
// This is useful when you're using Lotus as a library, such as to orchestrate
// test scenarios, or for other purposes where you don't need to use the
// defaults shipped with the binary.
//
// For this flag to be effective, it must be enabled _before_ instantiating Lotus.
var DisableBuiltinAssets = false

Binary file not shown.

View File

@ -121,4 +121,11 @@ const VerifSigCacheSize = 32000
const BlockMessageLimit = 512
const BlockGasLimit = 100_000_000_000
var DrandChain = `{"public_key":"922a2e93828ff83345bae533f5172669a26c02dc76d6bf59c80892e12ab1455c229211886f35bb56af6d5bea981024df","period":25,"genesis_time":1590445175,"hash":"138a324aa6540f93d0dad002aa89454b1bec2b6e948682cde6bd4db40f4b7c9b"}`
var DrandConfig = dtypes.DrandConfig{
Servers: []string{
"https://pl-eu.testnet.drand.sh",
"https://pl-us.testnet.drand.sh",
"https://pl-sin.testnet.drand.sh",
},
ChainInfoJSON: `{"public_key":"922a2e93828ff83345bae533f5172669a26c02dc76d6bf59c80892e12ab1455c229211886f35bb56af6d5bea981024df","period":25,"genesis_time":1590445175,"hash":"138a324aa6540f93d0dad002aa89454b1bec2b6e948682cde6bd4db40f4b7c9b"}`,
}

View File

@ -12,11 +12,10 @@ import (
)
func init() {
power.ConsensusMinerMinPower = big.NewInt(1024 << 20)
power.ConsensusMinerMinPower = big.NewInt(1024 << 30)
miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{
abi.RegisteredSealProof_StackedDrg512MiBV1: {},
abi.RegisteredSealProof_StackedDrg32GiBV1: {},
abi.RegisteredSealProof_StackedDrg64GiBV1: {},
abi.RegisteredSealProof_StackedDrg32GiBV1: {},
abi.RegisteredSealProof_StackedDrg64GiBV1: {},
}
}

View File

@ -53,7 +53,7 @@ func (ve Version) EqMajorMinor(v2 Version) bool {
}
// APIVersion is a semver version of the rpc api exposed
var APIVersion Version = newVer(0, 3, 0)
var APIVersion Version = newVer(0, 4, 0)
//nolint:varcheck,deadcode
const (

View File

@ -17,6 +17,10 @@ type Response struct {
Err error
}
// RandomBeacon represents a system that provides randomness to Lotus.
// Other components interrogate the RandomBeacon to acquire randomness that's
// valid for a specific chain epoch. Also to verify beacon entries that have
// been posted on chain.
type RandomBeacon interface {
Entry(context.Context, uint64) <-chan Response
VerifyEntry(types.BeaconEntry, types.BeaconEntry) error

View File

@ -19,31 +19,15 @@ import (
logging "github.com/ipfs/go-log"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/lotus/chain/beacon"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/lotus/node/modules/dtypes"
)
var log = logging.Logger("drand")
var drandServers = []string{
"https://pl-eu.testnet.drand.sh",
"https://pl-us.testnet.drand.sh",
"https://pl-sin.testnet.drand.sh",
}
var drandChain *dchain.Info
func init() {
var err error
drandChain, err = dchain.InfoFromJSON(bytes.NewReader([]byte(build.DrandChain)))
if err != nil {
panic("could not unmarshal chain info: " + err.Error())
}
}
type drandPeer struct {
addr string
tls bool
@ -57,6 +41,13 @@ func (dp *drandPeer) IsTLS() bool {
return dp.tls
}
// DrandBeacon connects Lotus with a drand network in order to provide
// randomness to the system in a way that's aligned with Filecoin rounds/epochs.
//
// We connect to drand peers via their public HTTP endpoints. The peers are
// enumerated in the drandServers variable.
//
// The root trust for the Drand chain is configured from build.DrandChain.
type DrandBeacon struct {
client dclient.Client
@ -73,16 +64,21 @@ type DrandBeacon struct {
localCache map[uint64]types.BeaconEntry
}
func NewDrandBeacon(genesisTs, interval uint64, ps *pubsub.PubSub) (*DrandBeacon, error) {
func NewDrandBeacon(genesisTs, interval uint64, ps *pubsub.PubSub, config dtypes.DrandConfig) (*DrandBeacon, error) {
if genesisTs == 0 {
panic("what are you doing this cant be zero")
}
drandChain, err := dchain.InfoFromJSON(bytes.NewReader([]byte(config.ChainInfoJSON)))
if err != nil {
return nil, xerrors.Errorf("unable to unmarshal drand chain info: %w", err)
}
dlogger := dlog.NewKitLoggerFrom(kzap.NewZapSugarLogger(
log.SugaredLogger.Desugar(), zapcore.InfoLevel))
var clients []dclient.Client
for _, url := range drandServers {
for _, url := range config.Servers {
hc, err := hclient.NewWithInfo(url, drandChain, nil)
if err != nil {
return nil, xerrors.Errorf("could not create http drand client: %w", err)

View File

@ -7,10 +7,13 @@ import (
dchain "github.com/drand/drand/chain"
hclient "github.com/drand/drand/client/http"
"github.com/stretchr/testify/assert"
"github.com/filecoin-project/lotus/build"
)
func TestPrintGroupInfo(t *testing.T) {
c, err := hclient.New(drandServers[0], nil, nil)
server := build.DrandConfig.Servers[0]
c, err := hclient.New(server, nil, nil)
assert.NoError(t, err)
cg := c.(interface {
FetchChainInfo(groupHash []byte) (*dchain.Info, error)

View File

@ -10,6 +10,7 @@ import (
"golang.org/x/xerrors"
cborutil "github.com/filecoin-project/go-cbor-util"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
@ -27,6 +28,24 @@ const BlockSyncProtocolID = "/fil/sync/blk/0.0.1"
const BlockSyncMaxRequestLength = 800
// BlockSyncService is the component that services BlockSync requests from
// peers.
//
// BlockSync is the basic chain synchronization protocol of Filecoin. BlockSync
// is an RPC-oriented protocol, with a single operation to request blocks.
//
// A request contains a start anchor block (referred to with a CID), and a
// amount of blocks requested beyond the anchor (including the anchor itself).
//
// A client can also pass options, encoded as a 64-bit bitfield. Lotus supports
// two options at the moment:
//
// - include block contents
// - include block messages
//
// The response will include a status code, an optional message, and the
// response payload in case of success. The payload is a slice of serialized
// tipsets.
type BlockSyncService struct {
cs *store.ChainStore
}

View File

@ -64,6 +64,11 @@ func (bs *BlockSync) processStatus(req *BlockSyncRequest, res *BlockSyncResponse
}
}
// GetBlocks fetches count blocks from the network, from the provided tipset
// *backwards*, returning as many tipsets as count.
//
// {hint/usage}: This is used by the Syncer during normal chain syncing and when
// resolving forks.
func (bs *BlockSync) GetBlocks(ctx context.Context, tsk types.TipSetKey, count int) ([]*types.TipSet, error) {
ctx, span := trace.StartSpan(ctx, "bsync.GetBlocks")
defer span.End()
@ -80,7 +85,9 @@ func (bs *BlockSync) GetBlocks(ctx context.Context, tsk types.TipSetKey, count i
Options: BSOptBlocks,
}
// this peerset is sorted by latency and failure counting.
peers := bs.getPeers()
// randomize the first few peers so we don't always pick the same peer
shufflePrefix(peers)
@ -356,6 +363,7 @@ func (bs *BlockSync) RemovePeer(p peer.ID) {
bs.syncPeers.removePeer(p)
}
// getPeers returns a preference-sorted set of peers to query.
func (bs *BlockSync) getPeers() []peer.ID {
return bs.syncPeers.prefSortedPeers()
}

View File

@ -84,6 +84,9 @@ type calledEvents struct {
}
func (e *calledEvents) headChangeCalled(rev, app []*types.TipSet) error {
e.lk.Lock()
defer e.lk.Unlock()
for _, ts := range rev {
e.handleReverts(ts)
e.at = ts.Height()
@ -134,7 +137,6 @@ func (e *calledEvents) checkNewCalls(ts *types.TipSet) {
e.messagesForTs(pts, func(msg *types.Message) {
// TODO: provide receipts
for tid, matchFns := range e.matchers {
var matched bool
for _, matchFn := range matchFns {

View File

@ -26,12 +26,15 @@ type heightEvents struct {
}
func (e *heightEvents) headChangeAt(rev, app []*types.TipSet) error {
ctx, span := trace.StartSpan(e.ctx, "events.HeightHeadChange")
defer span.End()
span.AddAttributes(trace.Int64Attribute("endHeight", int64(app[0].Height())))
span.AddAttributes(trace.Int64Attribute("reverts", int64(len(rev))))
span.AddAttributes(trace.Int64Attribute("applies", int64(len(app))))
e.lk.Lock()
defer e.lk.Unlock()
for _, ts := range rev {
// TODO: log error if h below gcconfidence
// revert height-based triggers
@ -40,7 +43,10 @@ func (e *heightEvents) headChangeAt(rev, app []*types.TipSet) error {
for _, tid := range e.htHeights[h] {
ctx, span := trace.StartSpan(ctx, "events.HeightRevert")
err := e.heightTriggers[tid].revert(ctx, ts)
rev := e.heightTriggers[tid].revert
e.lk.Unlock()
err := rev(ctx, ts)
e.lk.Lock()
e.heightTriggers[tid].called = false
span.End()
@ -98,8 +104,10 @@ func (e *heightEvents) headChangeAt(rev, app []*types.TipSet) error {
ctx, span := trace.StartSpan(ctx, "events.HeightApply")
span.AddAttributes(trace.BoolAttribute("immediate", false))
err = hnd.handle(ctx, incTs, h)
handle := hnd.handle
e.lk.Unlock()
err = handle(ctx, incTs, h)
e.lk.Lock()
span.End()
if err != nil {

View File

@ -32,8 +32,11 @@ func (fts *FullTipSet) Cids() []cid.Cid {
return cids
}
// TipSet returns a narrower view of this FullTipSet elliding the block
// messages.
func (fts *FullTipSet) TipSet() *types.TipSet {
if fts.tipset != nil {
// FIXME: fts.tipset is actually never set. Should it memoize?
return fts.tipset
}

View File

@ -34,7 +34,7 @@ type lbEntry struct {
target types.TipSetKey
}
func (ci *ChainIndex) GetTipsetByHeight(ctx context.Context, from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) {
func (ci *ChainIndex) GetTipsetByHeight(_ context.Context, from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) {
if from.Height()-to <= ci.skipLength {
return ci.walkBack(from, to)
}

View File

@ -52,6 +52,15 @@ var blockValidationCacheKeyPrefix = dstore.NewKey("blockValidation")
// ReorgNotifee represents a callback that gets called upon reorgs.
type ReorgNotifee func(rev, app []*types.TipSet) error
// ChainStore is the main point of access to chain data.
//
// Raw chain data is stored in the Blockstore, with relevant markers (genesis,
// latest head tipset references) being tracked in the Datastore (key-value
// store).
//
// To alleviate disk access, the ChainStore has two ARC caches:
// 1. a tipset cache
// 2. a block => messages references cache.
type ChainStore struct {
bs bstore.Blockstore
ds dstore.Datastore
@ -266,6 +275,9 @@ func (cs *ChainStore) PutTipSet(ctx context.Context, ts *types.TipSet) error {
return nil
}
// MaybeTakeHeavierTipSet evaluates the incoming tipset and locks it in our
// internal state as our new head, if and only if it is heavier than the current
// head.
func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipSet) error {
cs.heaviestLk.Lock()
defer cs.heaviestLk.Unlock()
@ -331,6 +343,9 @@ func (cs *ChainStore) reorgWorker(ctx context.Context, initialNotifees []ReorgNo
return out
}
// takeHeaviestTipSet actually sets the incoming tipset as our head both in
// memory and in the ChainStore. It also sends a notification to deliver to
// ReorgNotifees.
func (cs *ChainStore) takeHeaviestTipSet(ctx context.Context, ts *types.TipSet) error {
_, span := trace.StartSpan(ctx, "takeHeaviestTipSet")
defer span.End()
@ -368,6 +383,7 @@ func (cs *ChainStore) SetHead(ts *types.TipSet) error {
return cs.takeHeaviestTipSet(context.TODO(), ts)
}
// Contains returns whether our BlockStore has all blocks in the supplied TipSet.
func (cs *ChainStore) Contains(ts *types.TipSet) (bool, error) {
for _, c := range ts.Cids() {
has, err := cs.bs.Has(c)
@ -382,6 +398,8 @@ func (cs *ChainStore) Contains(ts *types.TipSet) (bool, error) {
return true, nil
}
// GetBlock fetches a BlockHeader with the supplied CID. It returns
// blockstore.ErrNotFound if the block was not found in the BlockStore.
func (cs *ChainStore) GetBlock(c cid.Cid) (*types.BlockHeader, error) {
sb, err := cs.bs.Get(c)
if err != nil {
@ -474,6 +492,7 @@ func (cs *ChainStore) ReorgOps(a, b *types.TipSet) ([]*types.TipSet, []*types.Ti
return leftChain, rightChain, nil
}
// GetHeaviestTipSet returns the current heaviest tipset known (i.e. our head).
func (cs *ChainStore) GetHeaviestTipSet() *types.TipSet {
cs.heaviestLk.Lock()
defer cs.heaviestLk.Unlock()

View File

@ -53,6 +53,29 @@ var log = logging.Logger("chain")
var LocalIncoming = "incoming"
// Syncer is in charge of running the chain synchronization logic. As such, it
// is tasked with these functions, amongst others:
//
// * Fast-forwards the chain as it learns of new TipSets from the network via
// the SyncManager.
// * Applies the fork choice rule to select the correct side when confronted
// with a fork in the network.
// * Requests block headers and messages from other peers when not available
// in our BlockStore.
// * Tracks blocks marked as bad in a cache.
// * Keeps the BlockStore and ChainStore consistent with our view of the world,
// the latter of which in turn informs other components when a reorg has been
// committed.
//
// The Syncer does not run workers itself. It's mainly concerned with
// ensuring a consistent state of chain consensus. The reactive and network-
// interfacing processes are part of other components, such as the SyncManager
// (which owns the sync scheduler and sync workers), BlockSync, the HELLO
// protocol, and the gossipsub block propagation layer.
//
// {hint/concept} The fork-choice rule as it currently stands is: "pick the
// chain with the heaviest weight, so long as it hasnt deviated one finality
// threshold from our head (900 epochs, parameter determined by spec-actors)".
type Syncer struct {
// The interface for accessing and putting tipsets into local storage
store *store.ChainStore
@ -85,6 +108,7 @@ type Syncer struct {
verifier ffiwrapper.Verifier
}
// NewSyncer creates a new Syncer object.
func NewSyncer(sm *stmgr.StateManager, bsync *blocksync.BlockSync, connmgr connmgr.ConnManager, self peer.ID, beacon beacon.RandomBeacon, verifier ffiwrapper.Verifier) (*Syncer, error) {
gen, err := sm.ChainStore().GetGenesis()
if err != nil {
@ -182,6 +206,11 @@ func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool {
return true
}
// IncomingBlocks spawns a goroutine that subscribes to the local eventbus to
// receive new block headers as they arrive from the network, and sends them to
// the returned channel.
//
// These blocks have not necessarily been incorporated to our view of the chain.
func (syncer *Syncer) IncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error) {
sub := syncer.incoming.Sub(LocalIncoming)
out := make(chan *types.BlockHeader, 10)
@ -209,11 +238,15 @@ func (syncer *Syncer) IncomingBlocks(ctx context.Context) (<-chan *types.BlockHe
return out, nil
}
// ValidateMsgMeta performs structural and content hash validation of the
// messages within this block. If validation passes, it stores the messages in
// the underlying IPLD block store.
func (syncer *Syncer) ValidateMsgMeta(fblk *types.FullBlock) error {
if msgc := len(fblk.BlsMessages) + len(fblk.SecpkMessages); msgc > build.BlockMessageLimit {
return xerrors.Errorf("block %s has too many messages (%d)", fblk.Header.Cid(), msgc)
}
// Collect the CIDs of both types of messages separately: BLS and Secpk.
var bcids, scids []cbg.CBORMarshaler
for _, m := range fblk.BlsMessages {
c := cbg.CborCid(m.Cid())
@ -231,11 +264,14 @@ func (syncer *Syncer) ValidateMsgMeta(fblk *types.FullBlock) error {
blockstore := syncer.store.Blockstore()
bs := cbor.NewCborStore(blockstore)
// Compute the root CID of the combined message trie.
smroot, err := computeMsgMeta(bs, bcids, scids)
if err != nil {
return xerrors.Errorf("validating msgmeta, compute failed: %w", err)
}
// Check that the message trie root matches with what's in the block.
if fblk.Header.Messages != smroot {
return xerrors.Errorf("messages in full block did not match msgmeta root in header (%s != %s)", fblk.Header.Messages, smroot)
}
@ -345,6 +381,8 @@ func zipTipSetAndMessages(bs cbor.IpldStore, ts *types.TipSet, allbmsgs []*types
return fts, nil
}
// computeMsgMeta computes the root CID of the combined arrays of message CIDs
// of both types (BLS and Secpk).
func computeMsgMeta(bs cbor.IpldStore, bmsgCids, smsgCids []cbg.CBORMarshaler) (cid.Cid, error) {
ctx := context.TODO()
bmroot, err := amt.FromArray(ctx, bs, bmsgCids)
@ -368,14 +406,24 @@ func computeMsgMeta(bs cbor.IpldStore, bmsgCids, smsgCids []cbg.CBORMarshaler) (
return mrcid, nil
}
// FetchTipSet tries to load the provided tipset from the store, and falls back
// to the network (BlockSync) by querying the supplied peer if not found
// locally.
//
// {hint/usage} This is used from the HELLO protocol, to fetch the greeting
// peer's heaviest tipset if we don't have it.
func (syncer *Syncer) FetchTipSet(ctx context.Context, p peer.ID, tsk types.TipSetKey) (*store.FullTipSet, error) {
if fts, err := syncer.tryLoadFullTipSet(tsk); err == nil {
return fts, nil
}
// fall back to the network.
return syncer.Bsync.GetFullTipSet(ctx, p, tsk)
}
// tryLoadFullTipSet queries the tipset in the ChainStore, and returns a full
// representation of it containing FullBlocks. If ALL blocks are not found
// locally, it errors entirely with blockstore.ErrNotFound.
func (syncer *Syncer) tryLoadFullTipSet(tsk types.TipSetKey) (*store.FullTipSet, error) {
ts, err := syncer.store.LoadTipSet(tsk)
if err != nil {
@ -400,6 +448,12 @@ func (syncer *Syncer) tryLoadFullTipSet(tsk types.TipSetKey) (*store.FullTipSet,
return fts, nil
}
// Sync tries to advance our view of the chain to `maybeHead`. It does nothing
// if our current head is heavier than the requested tipset, or if we're already
// at the requested head, or if the head is the genesis.
//
// Most of the heavy-lifting logic happens in syncer#collectChain. Refer to the
// godocs on that method for a more detailed view.
func (syncer *Syncer) Sync(ctx context.Context, maybeHead *types.TipSet) error {
ctx, span := trace.StartSpan(ctx, "chain.Sync")
defer span.End()
@ -466,16 +520,27 @@ func (syncer *Syncer) ValidateTipSet(ctx context.Context, fts *store.FullTipSet)
return nil
}
var futures []async.ErrorFuture
for _, b := range fts.Blocks {
if err := syncer.ValidateBlock(ctx, b); err != nil {
if isPermanent(err) {
syncer.bad.Add(b.Cid(), err.Error())
}
return xerrors.Errorf("validating block %s: %w", b.Cid(), err)
}
b := b // rebind to a scoped variable
if err := syncer.sm.ChainStore().AddToTipSetTracker(b.Header); err != nil {
return xerrors.Errorf("failed to add validated header to tipset tracker: %w", err)
futures = append(futures, async.Err(func() error {
if err := syncer.ValidateBlock(ctx, b); err != nil {
if isPermanent(err) {
syncer.bad.Add(b.Cid(), err.Error())
}
return xerrors.Errorf("validating block %s: %w", b.Cid(), err)
}
if err := syncer.sm.ChainStore().AddToTipSetTracker(b.Header); err != nil {
return xerrors.Errorf("failed to add validated header to tipset tracker: %w", err)
}
return nil
}))
}
for _, f := range futures {
if err := f.AwaitContext(ctx); err != nil {
return err
}
}
return nil
@ -993,6 +1058,39 @@ func extractSyncState(ctx context.Context) *SyncerState {
return nil
}
// collectHeaders collects the headers from the blocks between any two tipsets.
//
// `from` is the heaviest/projected/target tipset we have learned about, and
// `to` is usually an anchor tipset we already have in our view of the chain
// (which could be the genesis).
//
// collectHeaders checks if portions of the chain are in our ChainStore; falling
// down to the network to retrieve the missing parts. If during the process, any
// portion we receive is in our denylist (bad list), we short-circuit.
//
// {hint/naming}: `from` and `to` is in inverse order. `from` is the highest,
// and `to` is the lowest. This method traverses the chain backwards.
//
// {hint/usage}: This is used by collectChain, which is in turn called from the
// main Sync method (Syncer#Sync), so it's a pretty central method.
//
// {hint/logic}: The logic of this method is as follows:
//
// 1. Check that the from tipset is not linked to a parent block known to be
// bad.
// 2. Check the consistency of beacon entries in the from tipset. We check
// total equality of the BeaconEntries in each block.
// 3. Travers the chain backwards, for each tipset:
// 3a. Load it from the chainstore; if found, it move on to its parent.
// 3b. Query our peers via BlockSync in batches, requesting up to a
// maximum of 500 tipsets every time.
//
// Once we've concluded, if we find a mismatching tipset at the height where the
// anchor tipset should be, we are facing a fork, and we invoke Syncer#syncFork
// to resolve it. Refer to the godocs there.
//
// All throughout the process, we keep checking if the received blocks are in
// the deny list, and short-circuit the process if so.
func (syncer *Syncer) collectHeaders(ctx context.Context, from *types.TipSet, to *types.TipSet) ([]*types.TipSet, error) {
ctx, span := trace.StartSpan(ctx, "collectHeaders")
defer span.End()
@ -1009,6 +1107,8 @@ func (syncer *Syncer) collectHeaders(ctx context.Context, from *types.TipSet, to
}
}
// Check if the parents of the from block are in the denylist.
// i.e. if a fork of the chain has been requested that we know to be bad.
for _, pcid := range from.Parents().Cids() {
if reason, ok := syncer.bad.Has(pcid); ok {
markBad("linked to %s", pcid)
@ -1079,8 +1179,8 @@ loop:
}
// NB: GetBlocks validates that the blocks are in-fact the ones we
// requested, and that they are correctly linked to eachother. It does
// not validate any state transitions
// requested, and that they are correctly linked to one another. It does
// not validate any state transitions.
window := 500
if gap := int(blockSet[len(blockSet)-1].Height() - untilHeight); gap < window {
window = gap
@ -1121,7 +1221,6 @@ loop:
at = blks[len(blks)-1].Parents()
}
// We have now ascertained that this is *not* a 'fast forward'
if !types.CidArrsEqual(blockSet[len(blockSet)-1].Parents().Cids(), to.Cids()) {
last := blockSet[len(blockSet)-1]
if last.Parents() == to.Parents() {
@ -1129,6 +1228,8 @@ loop:
return blockSet, nil
}
// We have now ascertained that this is *not* a 'fast forward'
log.Warnf("(fork detected) synced header chain (%s - %d) does not link to our best block (%s - %d)", from.Cids(), from.Height(), to.Cids(), to.Height())
fork, err := syncer.syncFork(ctx, last, to)
if err != nil {
@ -1150,6 +1251,12 @@ loop:
var ErrForkTooLong = fmt.Errorf("fork longer than threshold")
// syncFork tries to obtain the chain fragment that links a fork into a common
// ancestor in our view of the chain.
//
// If the fork is too long (build.ForkLengthThreshold), we add the entire subchain to the
// denylist. Else, we find the common ancestor, and add the missing chain
// fragment until the fork point to the returned []TipSet.
func (syncer *Syncer) syncFork(ctx context.Context, from *types.TipSet, to *types.TipSet) ([]*types.TipSet, error) {
tips, err := syncer.Bsync.GetBlocks(ctx, from.Parents(), int(build.ForkLengthThreshold))
if err != nil {
@ -1301,6 +1408,25 @@ func persistMessages(bs bstore.Blockstore, bst *blocksync.BSTipSet) error {
return nil
}
// collectChain tries to advance our view of the chain to the purported head.
//
// It goes through various stages:
//
// 1. StageHeaders: we proceed in the sync process by requesting block headers
// from our peers, moving back from their heads, until we reach a tipset
// that we have in common (such a common tipset must exist, thought it may
// simply be the genesis block).
//
// If the common tipset is our head, we treat the sync as a "fast-forward",
// else we must drop part of our chain to connect to the peer's head
// (referred to as "forking").
//
// 2. StagePersistHeaders: now that we've collected the missing headers,
// augmented by those on the other side of a fork, we persist them to the
// BlockStore.
//
// 3. StageMessages: having acquired the headers and found a common tipset,
// we then move forward, requesting the full blocks, including the messages.
func (syncer *Syncer) collectChain(ctx context.Context, ts *types.TipSet) error {
ctx, span := trace.StartSpan(ctx, "collectChain")
defer span.End()
@ -1350,9 +1476,8 @@ func (syncer *Syncer) collectChain(ctx context.Context, ts *types.TipSet) error
func VerifyElectionPoStVRF(ctx context.Context, worker address.Address, rand []byte, evrf []byte) error {
if build.InsecurePoStValidation {
return nil
} else {
return gen.VerifyVRF(ctx, worker, rand, evrf)
}
return gen.VerifyVRF(ctx, worker, rand, evrf)
}
func (syncer *Syncer) State() []SyncerState {
@ -1363,6 +1488,7 @@ func (syncer *Syncer) State() []SyncerState {
return out
}
// MarkBad manually adds a block to the "bad blocks" cache.
func (syncer *Syncer) MarkBad(blk cid.Cid) {
syncer.bad.Add(blk, "manually marked bad")
}
@ -1370,7 +1496,7 @@ func (syncer *Syncer) MarkBad(blk cid.Cid) {
func (syncer *Syncer) CheckBadBlockCache(blk cid.Cid) (string, bool) {
return syncer.bad.Has(blk)
}
func (syncer *Syncer) getLatestBeaconEntry(ctx context.Context, ts *types.TipSet) (*types.BeaconEntry, error) {
func (syncer *Syncer) getLatestBeaconEntry(_ context.Context, ts *types.TipSet) (*types.BeaconEntry, error) {
cur := ts
for i := 0; i < 20; i++ {
cbe := cur.Blocks()[0].BeaconEntries

View File

@ -76,7 +76,7 @@ func SizeStr(bi BigInt) string {
}
f, _ := r.Float64()
return fmt.Sprintf("%.3g %s", f, byteSizeUnits[i])
return fmt.Sprintf("%.4g %s", f, byteSizeUnits[i])
}
var deciUnits = []string{"", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"}

View File

@ -3,7 +3,12 @@ package types
import (
"bytes"
"math/big"
"math/rand"
"strings"
"testing"
"time"
"github.com/docker/go-units"
"github.com/stretchr/testify/assert"
)
@ -60,8 +65,10 @@ func TestSizeStr(t *testing.T) {
}{
{0, "0 B"},
{1, "1 B"},
{1016, "1016 B"},
{1024, "1 KiB"},
{2000, "1.95 KiB"},
{1000 * 1024, "1000 KiB"},
{2000, "1.953 KiB"},
{5 << 20, "5 MiB"},
{11 << 60, "11 EiB"},
}
@ -71,6 +78,22 @@ func TestSizeStr(t *testing.T) {
}
}
func TestSizeStrUnitsSymmetry(t *testing.T) {
s := rand.NewSource(time.Now().UnixNano())
r := rand.New(s)
for i := 0; i < 1000000; i++ {
n := r.Uint64()
l := strings.ReplaceAll(units.BytesSize(float64(n)), " ", "")
r := strings.ReplaceAll(SizeStr(NewInt(n)), " ", "")
assert.NotContains(t, l, "e+")
assert.NotContains(t, r, "e+")
assert.Equal(t, l, r, "wrong formatting for %d", n)
}
}
func TestSizeStrBig(t *testing.T) {
ZiB := big.NewInt(50000)
ZiB = ZiB.Lsh(ZiB, 70)

View File

@ -21,16 +21,16 @@ type ExecutionTrace struct {
type GasTrace struct {
Name string
Location []Loc
TotalGas int64
ComputeGas int64
StorageGas int64
TotalVirtualGas int64
VirtualComputeGas int64
VirtualStorageGas int64
Location []Loc `json:"loc"`
TotalGas int64 `json:"tg"`
ComputeGas int64 `json:"cg"`
StorageGas int64 `json:"sg"`
TotalVirtualGas int64 `json:"vtg"`
VirtualComputeGas int64 `json:"vcg"`
VirtualStorageGas int64 `json:"vsg"`
TimeTaken time.Duration
Extra interface{} `json:",omitempty"`
TimeTaken time.Duration `json:"tt"`
Extra interface{} `json:"ex,omitempty"`
Callers []uintptr `json:"-"`
}

View File

@ -186,6 +186,15 @@ func (ps pricedSyscalls) VerifyConsensusFault(h1 []byte, h2 []byte, extra []byte
}
func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]abi.SealVerifyInfo) (map[address.Address][]bool, error) {
ps.chargeGas(newGasCharge("BatchVerifySeals", 0, 0)) // TODO: this is only called by the cron actor. Should we even charge gas?
var gasChargeSum GasCharge
gasChargeSum.Name = "BatchVerifySeals"
ps.chargeGas(gasChargeSum) // TODO: this is only called by the cron actor. Should we even charge gas?
for _, svis := range inp {
for _, svi := range svis {
ch := ps.pl.OnVerifySeal(svi)
ps.chargeGas(newGasCharge("BatchVerifySingle", 0, 0).WithVirtual(ch.VirtualCompute+ch.ComputeGas, 0))
}
}
return ps.under.BatchVerifySeals(inp)
}

View File

@ -103,17 +103,17 @@ func (pl *pricelistV0) OnMethodInvocation(value abi.TokenAmount, methodNum abi.M
if methodNum != builtin.MethodSend {
ret += pl.sendInvokeMethod
}
return newGasCharge("OnMethodInvocation", ret, 0)
return newGasCharge("OnMethodInvocation", ret, 0).WithVirtual(ret*15000, 0)
}
// OnIpldGet returns the gas used for storing an object
func (pl *pricelistV0) OnIpldGet(dataSize int) GasCharge {
return newGasCharge("OnIpldGet", pl.ipldGetBase+int64(dataSize)*pl.ipldGetPerByte, 0).WithExtra(dataSize)
return newGasCharge("OnIpldGet", pl.ipldGetBase+int64(dataSize)*pl.ipldGetPerByte, 0).WithExtra(dataSize).WithVirtual(pl.ipldGetBase*13750+(pl.ipldGetPerByte*100), 0)
}
// OnIpldPut returns the gas used for storing an object
func (pl *pricelistV0) OnIpldPut(dataSize int) GasCharge {
return newGasCharge("OnIpldPut", pl.ipldPutBase, int64(dataSize)*pl.ipldPutPerByte).WithExtra(dataSize)
return newGasCharge("OnIpldPut", pl.ipldPutBase, int64(dataSize)*pl.ipldPutPerByte).WithExtra(dataSize).WithVirtual(pl.ipldPutBase*8700+(pl.ipldPutPerByte*100), 0)
}
// OnCreateActor returns the gas used for creating an actor
@ -144,13 +144,13 @@ func (pl *pricelistV0) OnHashing(dataSize int) GasCharge {
// OnComputeUnsealedSectorCid
func (pl *pricelistV0) OnComputeUnsealedSectorCid(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) GasCharge {
// TODO: this needs more cost tunning, check with @lotus
return newGasCharge("OnComputeUnsealedSectorCid", pl.computeUnsealedSectorCidBase, 0)
return newGasCharge("OnComputeUnsealedSectorCid", pl.computeUnsealedSectorCidBase, 0).WithVirtual(pl.computeUnsealedSectorCidBase*24500, 0)
}
// OnVerifySeal
func (pl *pricelistV0) OnVerifySeal(info abi.SealVerifyInfo) GasCharge {
// TODO: this needs more cost tunning, check with @lotus
return newGasCharge("OnVerifySeal", pl.verifySealBase, 0)
return newGasCharge("OnVerifySeal", pl.verifySealBase, 0).WithVirtual(pl.verifySealBase*177500, 0)
}
// OnVerifyPost

View File

@ -253,7 +253,7 @@ func (rt *Runtime) CreateActor(codeID cid.Cid, address address.Address) {
rt.Abortf(exitcode.SysErrorIllegalArgument, "Actor address already exists")
}
rt.ChargeGas(rt.Pricelist().OnCreateActor())
rt.chargeGas(rt.Pricelist().OnCreateActor())
err = rt.state.SetActor(address, &types.Actor{
Code: codeID,
@ -267,7 +267,7 @@ func (rt *Runtime) CreateActor(codeID cid.Cid, address address.Address) {
}
func (rt *Runtime) DeleteActor(addr address.Address) {
rt.ChargeGas(rt.Pricelist().OnDeleteActor())
rt.chargeGas(rt.Pricelist().OnDeleteActor())
act, err := rt.state.GetActor(rt.Message().Receiver())
if err != nil {
if xerrors.Is(err, types.ErrActorNotFound) {
@ -408,7 +408,7 @@ func (rt *Runtime) internalSend(from, to address.Address, method abi.MethodNum,
if subrt != nil {
rt.numActorsCreated = subrt.numActorsCreated
}
rt.executionTrace.Subcalls = append(rt.executionTrace.Subcalls, subrt.executionTrace) //&er)
rt.executionTrace.Subcalls = append(rt.executionTrace.Subcalls, subrt.executionTrace)
return ret, errSend
}
@ -496,7 +496,15 @@ func (rt *Runtime) finilizeGasTracing() {
}
}
func (rt *Runtime) ChargeGas(gas GasCharge) {
// ChargeGas is spec actors function
func (rt *Runtime) ChargeGas(name string, compute int64, virtual int64) {
err := rt.chargeGasInternal(newGasCharge(name, compute, 0).WithVirtual(virtual, 0), 1)
if err != nil {
panic(err)
}
}
func (rt *Runtime) chargeGas(gas GasCharge) {
err := rt.chargeGasInternal(gas, 1)
if err != nil {
panic(err)

View File

@ -241,10 +241,12 @@ func (ss *syscallShim) VerifySignature(sig crypto.Signature, addr address.Addres
return sigs.Verify(&sig, kaddr, input)
}
var BatchSealVerifyParallelism = goruntime.NumCPU()
func (ss *syscallShim) BatchVerifySeals(inp map[address.Address][]abi.SealVerifyInfo) (map[address.Address][]bool, error) {
out := make(map[address.Address][]bool)
sema := make(chan struct{}, goruntime.NumCPU())
sema := make(chan struct{}, BatchSealVerifyParallelism)
var wg sync.WaitGroup
for addr, seals := range inp {

View File

@ -19,6 +19,7 @@ import (
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/builtin/market"
"github.com/filecoin-project/lotus/api"
lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
)
@ -391,6 +392,10 @@ var clientRetrieveCmd = &cli.Command{
Name: "car",
Usage: "export to a car file instead of a regular file",
},
&cli.StringFlag{
Name: "miner",
Usage: "miner address for retrieval, if not present it'll use local discovery",
},
},
Action: func(cctx *cli.Context) error {
if cctx.NArg() != 2 {
@ -398,7 +403,7 @@ var clientRetrieveCmd = &cli.Command{
return nil
}
api, closer, err := GetFullNodeAPI(cctx)
fapi, closer, err := GetFullNodeAPI(cctx)
if err != nil {
return err
}
@ -409,7 +414,7 @@ var clientRetrieveCmd = &cli.Command{
if cctx.String("address") != "" {
payer, err = address.NewFromString(cctx.String("address"))
} else {
payer, err = api.WalletDefaultAddress(ctx)
payer, err = fapi.WalletDefaultAddress(ctx)
}
if err != nil {
return err
@ -432,23 +437,39 @@ var clientRetrieveCmd = &cli.Command{
return nil
}*/ // TODO: fix
offers, err := api.ClientFindData(ctx, file)
if err != nil {
return err
var offer api.QueryOffer
minerStrAddr := cctx.String("miner")
if minerStrAddr == "" { // Local discovery
offers, err := fapi.ClientFindData(ctx, file)
if err != nil {
return err
}
// TODO: parse offer strings from `client find`, make this smarter
if len(offers) < 1 {
fmt.Println("Failed to find file")
return nil
}
offer = offers[0]
} else { // Directed retrieval
minerAddr, err := address.NewFromString(minerStrAddr)
if err != nil {
return err
}
offer, err = fapi.ClientMinerQueryOffer(ctx, file, minerAddr)
if err != nil {
return err
}
}
// TODO: parse offer strings from `client find`, make this smarter
if len(offers) < 1 {
fmt.Println("Failed to find file")
return nil
if offer.Err != "" {
return fmt.Errorf("The received offer errored: %s", offer.Err)
}
ref := &lapi.FileRef{
Path: cctx.Args().Get(1),
IsCAR: cctx.Bool("car"),
}
if err := api.ClientRetrieve(ctx, offers[0].Order(payer), ref); err != nil {
if err := fapi.ClientRetrieve(ctx, offer.Order(payer), ref); err != nil {
return xerrors.Errorf("Retrieval Failed: %w", err)
}

View File

@ -4,8 +4,11 @@ import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math"
"os"
"runtime"
"runtime/pprof"
"sort"
"time"
@ -44,8 +47,14 @@ var importBenchCmd = &cli.Command{
Name: "height",
Usage: "halt validation after given height",
},
&cli.IntFlag{
Name: "batch-seal-verify-threads",
Usage: "set the parallelism factor for batch seal verification",
Value: runtime.NumCPU(),
},
},
Action: func(cctx *cli.Context) error {
vm.BatchSealVerifyParallelism = cctx.Int("batch-seal-verify-threads")
if !cctx.Args().Present() {
fmt.Println("must pass car file of chain to benchmark importing")
return nil
@ -111,14 +120,22 @@ var importBenchCmd = &cli.Command{
ts = next
}
out := make([]TipSetExec, 0, len(tschain))
ibj, err := os.Create("import-bench.json")
if err != nil {
return err
}
defer ibj.Close() //nolint:errcheck
enc := json.NewEncoder(ibj)
var lastTse *TipSetExec
lastState := tschain[len(tschain)-1].ParentState()
for i := len(tschain) - 2; i >= 0; i-- {
cur := tschain[i]
log.Infof("computing state (height: %d, ts=%s)", cur.Height(), cur.Cids())
if cur.ParentState() != lastState {
lastTrace := out[len(out)-1].Trace
lastTrace := lastTse.Trace
d, err := json.MarshalIndent(lastTrace, "", " ")
if err != nil {
panic(err)
@ -132,36 +149,98 @@ var importBenchCmd = &cli.Command{
if err != nil {
return err
}
out = append(out, TipSetExec{
stripCallers(trace)
lastTse = &TipSetExec{
TipSet: cur.Key(),
Trace: trace,
Duration: time.Since(start),
})
}
lastState = st
if err := enc.Encode(lastTse); err != nil {
return xerrors.Errorf("failed to write out tipsetexec: %w", err)
}
}
pprof.StopCPUProfile()
ibj, err := os.Create("import-bench.json")
if err != nil {
return err
}
defer ibj.Close() //nolint:errcheck
if err := json.NewEncoder(ibj).Encode(out); err != nil {
return err
}
return nil
},
}
func walkExecutionTrace(et *types.ExecutionTrace) {
for _, gc := range et.GasCharges {
gc.Callers = nil
}
for _, sub := range et.Subcalls {
walkExecutionTrace(&sub) //nolint:scopelint,gosec
}
}
func stripCallers(trace []*api.InvocResult) {
for _, t := range trace {
walkExecutionTrace(&t.ExecutionTrace)
}
}
type Invocation struct {
TipSet types.TipSetKey
Invoc *api.InvocResult
}
const GasPerNs = 10
func countGasCosts(et *types.ExecutionTrace) (int64, int64) {
var cgas, vgas int64
for _, gc := range et.GasCharges {
cgas += gc.ComputeGas
vgas += gc.VirtualComputeGas
}
for _, sub := range et.Subcalls {
c, v := countGasCosts(&sub)
cgas += c
vgas += v
}
return cgas, vgas
}
func compStats(vals []float64) (float64, float64) {
var sum float64
for _, v := range vals {
sum += v
}
av := sum / float64(len(vals))
var varsum float64
for _, v := range vals {
delta := av - v
varsum += delta * delta
}
return av, math.Sqrt(varsum / float64(len(vals)))
}
func tallyGasCharges(charges map[string][]float64, et *types.ExecutionTrace) {
for _, gc := range et.GasCharges {
compGas := gc.ComputeGas + gc.VirtualComputeGas
ratio := float64(compGas) / float64(gc.TimeTaken.Nanoseconds())
charges[gc.Name] = append(charges[gc.Name], 1/(ratio/GasPerNs))
//fmt.Printf("%s: %d, %s: %0.2f\n", gc.Name, compGas, gc.TimeTaken, 1/(ratio/GasPerNs))
for _, sub := range et.Subcalls {
tallyGasCharges(charges, &sub)
}
}
}
var importAnalyzeCmd = &cli.Command{
Name: "analyze",
Action: func(cctx *cli.Context) error {
@ -176,10 +255,19 @@ var importAnalyzeCmd = &cli.Command{
}
var results []TipSetExec
if err := json.NewDecoder(fi).Decode(&results); err != nil {
return err
for {
var tse TipSetExec
if err := json.NewDecoder(fi).Decode(&tse); err != nil {
if err != io.EOF {
return err
}
break
}
results = append(results, tse)
}
chargeDeltas := make(map[string][]float64)
var invocs []Invocation
var totalTime time.Duration
for i, r := range results {
@ -191,9 +279,29 @@ var importAnalyzeCmd = &cli.Command{
TipSet: r.TipSet,
Invoc: inv,
})
cgas, vgas := countGasCosts(&inv.ExecutionTrace)
fmt.Printf("Invocation: %d %s: %s %d -> %0.2f\n", inv.Msg.Method, inv.Msg.To, inv.Duration, cgas+vgas, float64(GasPerNs*inv.Duration.Nanoseconds())/float64(cgas+vgas))
tallyGasCharges(chargeDeltas, &inv.ExecutionTrace)
}
}
var keys []string
for k := range chargeDeltas {
keys = append(keys, k)
}
fmt.Println("Gas Price Deltas")
sort.Strings(keys)
for _, k := range keys {
vals := chargeDeltas[k]
av, stdev := compStats(vals)
fmt.Printf("%s: incr by %f (%f)\n", k, av, stdev)
}
sort.Slice(invocs, func(i, j int) bool {
return invocs[i].Invoc.Duration > invocs[j].Invoc.Duration
})

View File

@ -139,6 +139,10 @@ var sealBenchCmd = &cli.Command{
Name: "num-sectors",
Value: 1,
},
&cli.IntFlag{
Name: "parallel",
Value: 1,
},
},
Action: func(c *cli.Context) error {
if c.Bool("no-gpu") {
@ -235,7 +239,12 @@ var sealBenchCmd = &cli.Command{
if robench == "" {
var err error
sealTimings, sealedSectors, err = runSeals(sb, sbfs, c.Int("num-sectors"), mid, sectorSize, []byte(c.String("ticket-preimage")), c.String("save-commit2-input"), c.Bool("skip-commit2"), c.Bool("skip-unseal"))
parCfg := ParCfg{
PreCommit1: c.Int("parallel"),
PreCommit2: 1,
Commit: 1,
}
sealTimings, sealedSectors, err = runSeals(sb, sbfs, c.Int("num-sectors"), parCfg, mid, sectorSize, []byte(c.String("ticket-preimage")), c.String("save-commit2-input"), c.Bool("skip-commit2"), c.Bool("skip-unseal"))
if err != nil {
return xerrors.Errorf("failed to run seals: %w", err)
}
@ -307,7 +316,7 @@ var sealBenchCmd = &cli.Command{
return err
}
winnnigpost1 := time.Now()
winningpost1 := time.Now()
log.Info("computing winning post snark (hot)")
proof2, err := sb.GenerateWinningPoSt(context.TODO(), mid, candidates, challenge[:])
@ -331,7 +340,7 @@ var sealBenchCmd = &cli.Command{
log.Error("post verification failed")
}
verifyWinnnigPost1 := time.Now()
verifyWinningPost1 := time.Now()
pvi2 := abi.WinningPoStVerifyInfo{
Randomness: abi.PoStRandomness(challenge[:]),
@ -398,10 +407,10 @@ var sealBenchCmd = &cli.Command{
verifyWindowpost2 := time.Now()
bo.PostGenerateCandidates = gencandidates.Sub(beforePost)
bo.PostWinningProofCold = winnnigpost1.Sub(gencandidates)
bo.PostWinningProofHot = winnningpost2.Sub(winnnigpost1)
bo.VerifyWinningPostCold = verifyWinnnigPost1.Sub(winnningpost2)
bo.VerifyWinningPostHot = verifyWinningPost2.Sub(verifyWinnnigPost1)
bo.PostWinningProofCold = winningpost1.Sub(gencandidates)
bo.PostWinningProofHot = winnningpost2.Sub(winningpost1)
bo.VerifyWinningPostCold = verifyWinningPost1.Sub(winnningpost2)
bo.VerifyWinningPostHot = verifyWinningPost2.Sub(verifyWinningPost1)
bo.PostWindowProofCold = windowpost1.Sub(verifyWinningPost2)
bo.PostWindowProofHot = windowpost2.Sub(windowpost1)
@ -432,10 +441,10 @@ var sealBenchCmd = &cli.Command{
}
if !c.Bool("skip-commit2") {
fmt.Printf("generate candidates: %s (%s)\n", bo.PostGenerateCandidates, bps(bo.SectorSize*abi.SectorSize(len(bo.SealingResults)), bo.PostGenerateCandidates))
fmt.Printf("compute winnnig post proof (cold): %s\n", bo.PostWinningProofCold)
fmt.Printf("compute winnnig post proof (hot): %s\n", bo.PostWinningProofHot)
fmt.Printf("verify winnnig post proof (cold): %s\n", bo.VerifyWinningPostCold)
fmt.Printf("verify winnnig post proof (hot): %s\n\n", bo.VerifyWinningPostHot)
fmt.Printf("compute winning post proof (cold): %s\n", bo.PostWinningProofCold)
fmt.Printf("compute winning post proof (hot): %s\n", bo.PostWinningProofHot)
fmt.Printf("verify winning post proof (cold): %s\n", bo.VerifyWinningPostCold)
fmt.Printf("verify winning post proof (hot): %s\n\n", bo.VerifyWinningPostHot)
fmt.Printf("compute window post proof (cold): %s\n", bo.PostWindowProofCold)
fmt.Printf("compute window post proof (hot): %s\n", bo.PostWindowProofHot)
@ -447,9 +456,23 @@ var sealBenchCmd = &cli.Command{
},
}
func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, mid abi.ActorID, sectorSize abi.SectorSize, ticketPreimage []byte, saveC2inp string, skipc2, skipunseal bool) ([]SealingResult, []abi.SectorInfo, error) {
var sealTimings []SealingResult
var sealedSectors []abi.SectorInfo
type ParCfg struct {
PreCommit1 int
PreCommit2 int
Commit int
}
func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par ParCfg, mid abi.ActorID, sectorSize abi.SectorSize, ticketPreimage []byte, saveC2inp string, skipc2, skipunseal bool) ([]SealingResult, []abi.SectorInfo, error) {
var pieces []abi.PieceInfo
sealTimings := make([]SealingResult, numSectors)
sealedSectors := make([]abi.SectorInfo, numSectors)
preCommit2Sema := make(chan struct{}, par.PreCommit2)
commitSema := make(chan struct{}, par.Commit)
if numSectors%par.PreCommit1 != 0 {
return nil, nil, fmt.Errorf("parallelism factor must cleanly divide numSectors")
}
for i := abi.SectorNumber(1); i <= abi.SectorNumber(numSectors); i++ {
sid := abi.SectorID{
@ -458,7 +481,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, mid
}
start := time.Now()
log.Info("Writing piece into sector...")
log.Infof("[%d] Writing piece into sector...", i)
r := rand.New(rand.NewSource(100 + int64(i)))
@ -467,129 +490,168 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, mid
return nil, nil, err
}
addpiece := time.Now()
pieces = append(pieces, pi)
trand := blake2b.Sum256(ticketPreimage)
ticket := abi.SealRandomness(trand[:])
sealTimings[i-1].AddPiece = time.Since(start)
}
log.Info("Running replication(1)...")
pieces := []abi.PieceInfo{pi}
pc1o, err := sb.SealPreCommit1(context.TODO(), sid, ticket, pieces)
if err != nil {
return nil, nil, xerrors.Errorf("commit: %w", err)
}
sectorsPerWorker := numSectors / par.PreCommit1
precommit1 := time.Now()
errs := make(chan error, par.PreCommit1)
for wid := 0; wid < par.PreCommit1; wid++ {
go func(worker int) {
sealerr := func() error {
start := 1 + (worker * sectorsPerWorker)
end := start + sectorsPerWorker
for i := abi.SectorNumber(start); i < abi.SectorNumber(end); i++ {
ix := int(i - 1)
sid := abi.SectorID{
Miner: mid,
Number: i,
}
log.Info("Running replication(2)...")
cids, err := sb.SealPreCommit2(context.TODO(), sid, pc1o)
if err != nil {
return nil, nil, xerrors.Errorf("commit: %w", err)
}
start := time.Now()
precommit2 := time.Now()
trand := blake2b.Sum256(ticketPreimage)
ticket := abi.SealRandomness(trand[:])
sealedSectors = append(sealedSectors, abi.SectorInfo{
SealProof: sb.SealProofType(),
SectorNumber: i,
SealedCID: cids.Sealed,
})
log.Infof("[%d] Running replication(1)...", i)
pieces := []abi.PieceInfo{pieces[ix]}
pc1o, err := sb.SealPreCommit1(context.TODO(), sid, ticket, pieces)
if err != nil {
return xerrors.Errorf("commit: %w", err)
}
seed := lapi.SealSeed{
Epoch: 101,
Value: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 255},
}
precommit1 := time.Now()
log.Info("Generating PoRep for sector (1)")
c1o, err := sb.SealCommit1(context.TODO(), sid, ticket, seed.Value, pieces, cids)
preCommit2Sema <- struct{}{}
pc2Start := time.Now()
log.Infof("[%d] Running replication(2)...", i)
cids, err := sb.SealPreCommit2(context.TODO(), sid, pc1o)
if err != nil {
return xerrors.Errorf("commit: %w", err)
}
precommit2 := time.Now()
<-preCommit2Sema
sealedSectors[ix] = abi.SectorInfo{
SealProof: sb.SealProofType(),
SectorNumber: i,
SealedCID: cids.Sealed,
}
seed := lapi.SealSeed{
Epoch: 101,
Value: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 255},
}
commitSema <- struct{}{}
commitStart := time.Now()
log.Infof("[%d] Generating PoRep for sector (1)", i)
c1o, err := sb.SealCommit1(context.TODO(), sid, ticket, seed.Value, pieces, cids)
if err != nil {
return err
}
sealcommit1 := time.Now()
log.Infof("[%d] Generating PoRep for sector (2)", i)
if saveC2inp != "" {
c2in := Commit2In{
SectorNum: int64(i),
Phase1Out: c1o,
SectorSize: uint64(sectorSize),
}
b, err := json.Marshal(&c2in)
if err != nil {
return err
}
if err := ioutil.WriteFile(saveC2inp, b, 0664); err != nil {
log.Warnf("%+v", err)
}
}
var proof storage.Proof
if !skipc2 {
proof, err = sb.SealCommit2(context.TODO(), sid, c1o)
if err != nil {
return err
}
}
sealcommit2 := time.Now()
<-commitSema
if !skipc2 {
svi := abi.SealVerifyInfo{
SectorID: abi.SectorID{Miner: mid, Number: i},
SealedCID: cids.Sealed,
SealProof: sb.SealProofType(),
Proof: proof,
DealIDs: nil,
Randomness: ticket,
InteractiveRandomness: seed.Value,
UnsealedCID: cids.Unsealed,
}
ok, err := ffiwrapper.ProofVerifier.VerifySeal(svi)
if err != nil {
return err
}
if !ok {
return xerrors.Errorf("porep proof for sector %d was invalid", i)
}
}
verifySeal := time.Now()
if !skipunseal {
log.Infof("[%d] Unsealing sector", i)
{
p, done, err := sbfs.AcquireSector(context.TODO(), abi.SectorID{Miner: mid, Number: 1}, stores.FTUnsealed, stores.FTNone, true)
if err != nil {
return xerrors.Errorf("acquire unsealed sector for removing: %w", err)
}
done()
if err := os.Remove(p.Unsealed); err != nil {
return xerrors.Errorf("removing unsealed sector: %w", err)
}
}
err := sb.UnsealPiece(context.TODO(), abi.SectorID{Miner: mid, Number: 1}, 0, abi.PaddedPieceSize(sectorSize).Unpadded(), ticket, cids.Unsealed)
if err != nil {
return err
}
}
unseal := time.Now()
sealTimings[ix].PreCommit1 = precommit1.Sub(start)
sealTimings[ix].PreCommit2 = precommit2.Sub(pc2Start)
sealTimings[ix].Commit1 = sealcommit1.Sub(commitStart)
sealTimings[ix].Commit2 = sealcommit2.Sub(sealcommit1)
sealTimings[ix].Verify = verifySeal.Sub(sealcommit2)
sealTimings[ix].Unseal = unseal.Sub(verifySeal)
}
return nil
}()
if sealerr != nil {
errs <- sealerr
return
}
errs <- nil
}(wid)
}
for i := 0; i < par.PreCommit1; i++ {
err := <-errs
if err != nil {
return nil, nil, err
}
sealcommit1 := time.Now()
log.Info("Generating PoRep for sector (2)")
if saveC2inp != "" {
c2in := Commit2In{
SectorNum: int64(i),
Phase1Out: c1o,
SectorSize: uint64(sectorSize),
}
b, err := json.Marshal(&c2in)
if err != nil {
return nil, nil, err
}
if err := ioutil.WriteFile(saveC2inp, b, 0664); err != nil {
log.Warnf("%+v", err)
}
}
var proof storage.Proof
if !skipc2 {
proof, err = sb.SealCommit2(context.TODO(), sid, c1o)
if err != nil {
return nil, nil, err
}
}
sealcommit2 := time.Now()
if !skipc2 {
svi := abi.SealVerifyInfo{
SectorID: abi.SectorID{Miner: mid, Number: i},
SealedCID: cids.Sealed,
SealProof: sb.SealProofType(),
Proof: proof,
DealIDs: nil,
Randomness: ticket,
InteractiveRandomness: seed.Value,
UnsealedCID: cids.Unsealed,
}
ok, err := ffiwrapper.ProofVerifier.VerifySeal(svi)
if err != nil {
return nil, nil, err
}
if !ok {
return nil, nil, xerrors.Errorf("porep proof for sector %d was invalid", i)
}
}
verifySeal := time.Now()
if !skipunseal {
log.Info("Unsealing sector")
{
p, done, err := sbfs.AcquireSector(context.TODO(), abi.SectorID{Miner: mid, Number: 1}, stores.FTUnsealed, stores.FTNone, true)
if err != nil {
return nil, nil, xerrors.Errorf("acquire unsealed sector for removing: %w", err)
}
done()
if err := os.Remove(p.Unsealed); err != nil {
return nil, nil, xerrors.Errorf("removing unsealed sector: %w", err)
}
}
err := sb.UnsealPiece(context.TODO(), abi.SectorID{Miner: mid, Number: 1}, 0, abi.PaddedPieceSize(sectorSize).Unpadded(), ticket, cids.Unsealed)
if err != nil {
return nil, nil, err
}
}
unseal := time.Now()
sealTimings = append(sealTimings, SealingResult{
AddPiece: addpiece.Sub(start),
PreCommit1: precommit1.Sub(addpiece),
PreCommit2: precommit2.Sub(precommit1),
Commit1: sealcommit1.Sub(precommit2),
Commit2: sealcommit2.Sub(sealcommit1),
Verify: verifySeal.Sub(sealcommit2),
Unseal: unseal.Sub(verifySeal),
})
}
return sealTimings, sealedSectors, nil

View File

@ -118,7 +118,7 @@ create unique index if not exists block_cid_uindex
create materialized view if not exists state_heights
as select distinct height, parentstateroot from blocks;
create unique index if not exists state_heights_uindex
create index if not exists state_heights_index
on state_heights (height);
create index if not exists state_heights_height_index

View File

@ -53,6 +53,7 @@ type minerKey struct {
addr address.Address
act types.Actor
stateroot cid.Cid
tsKey types.TipSetKey
}
type minerInfo struct {
@ -66,10 +67,11 @@ type minerInfo struct {
type actorInfo struct {
stateroot cid.Cid
tsKey types.TipSetKey
state string
}
func syncHead(ctx context.Context, api api.FullNode, st *storage, ts *types.TipSet, maxBatch int) {
func syncHead(ctx context.Context, api api.FullNode, st *storage, headTs *types.TipSet, maxBatch int) {
var alk sync.Mutex
log.Infof("Getting synced block list")
@ -81,7 +83,7 @@ func syncHead(ctx context.Context, api api.FullNode, st *storage, ts *types.TipS
allToSync := map[cid.Cid]*types.BlockHeader{}
toVisit := list.New()
for _, header := range ts.Blocks() {
for _, header := range headTs.Blocks() {
toVisit.PushBack(header)
}
@ -116,7 +118,7 @@ func syncHead(ctx context.Context, api api.FullNode, st *storage, ts *types.TipS
for len(allToSync) > 0 {
actors := map[address.Address]map[types.Actor]actorInfo{}
addresses := map[address.Address]address.Address{}
addressToID := map[address.Address]address.Address{}
minH := abi.ChainEpoch(math.MaxInt64)
for _, header := range allToSync {
@ -129,7 +131,7 @@ func syncHead(ctx context.Context, api api.FullNode, st *storage, ts *types.TipS
for c, header := range allToSync {
if header.Height < minH+abi.ChainEpoch(maxBatch) {
toSync[c] = header
addresses[header.Miner] = address.Undef
addressToID[header.Miner] = address.Undef
}
}
for c := range toSync {
@ -146,20 +148,20 @@ func syncHead(ctx context.Context, api api.FullNode, st *storage, ts *types.TipS
}
if len(bh.Parents) == 0 { // genesis case
ts, _ := types.NewTipSet([]*types.BlockHeader{bh})
aadrs, err := api.StateListActors(ctx, ts.Key())
genesisTs, _ := types.NewTipSet([]*types.BlockHeader{bh})
aadrs, err := api.StateListActors(ctx, genesisTs.Key())
if err != nil {
log.Error(err)
return
}
parmap.Par(50, aadrs, func(addr address.Address) {
act, err := api.StateGetActor(ctx, addr, ts.Key())
act, err := api.StateGetActor(ctx, addr, genesisTs.Key())
if err != nil {
log.Error(err)
return
}
ast, err := api.StateReadState(ctx, act, ts.Key())
ast, err := api.StateReadState(ctx, act, genesisTs.Key())
if err != nil {
log.Error(err)
return
@ -177,9 +179,10 @@ func syncHead(ctx context.Context, api api.FullNode, st *storage, ts *types.TipS
}
actors[addr][*act] = actorInfo{
stateroot: bh.ParentStateRoot,
tsKey: genesisTs.Key(),
state: string(state),
}
addresses[addr] = address.Undef
addressToID[addr] = address.Undef
alk.Unlock()
})
@ -206,11 +209,13 @@ func syncHead(ctx context.Context, api api.FullNode, st *storage, ts *types.TipS
log.Error(err)
return
}
ast, err := api.StateReadState(ctx, &act, pts.Key())
if err != nil {
log.Error(err)
return
}
state, err := json.Marshal(ast.State)
if err != nil {
log.Error(err)
@ -225,8 +230,9 @@ func syncHead(ctx context.Context, api api.FullNode, st *storage, ts *types.TipS
actors[addr][act] = actorInfo{
stateroot: bh.ParentStateRoot,
state: string(state),
tsKey: pts.Key(),
}
addresses[addr] = address.Undef
addressToID[addr] = address.Undef
alk.Unlock()
}
})
@ -238,18 +244,20 @@ func syncHead(ctx context.Context, api api.FullNode, st *storage, ts *types.TipS
log.Infof("Resolving addresses")
for _, message := range msgs {
addresses[message.To] = address.Undef
addresses[message.From] = address.Undef
addressToID[message.To] = address.Undef
addressToID[message.From] = address.Undef
}
parmap.Par(50, parmap.KMapArr(addresses), func(addr address.Address) {
parmap.Par(50, parmap.KMapArr(addressToID), func(addr address.Address) {
// FIXME: cannot use EmptyTSK here since actorID's can change during reorgs, need to use the corresponding tipset.
// TODO: figure out a way to get the corresponding tipset...
raddr, err := api.StateLookupID(ctx, addr, types.EmptyTSK)
if err != nil {
log.Warn(err)
return
}
alk.Lock()
addresses[addr] = raddr
addressToID[addr] = raddr
alk.Unlock()
})
@ -267,6 +275,7 @@ func syncHead(ctx context.Context, api api.FullNode, st *storage, ts *types.TipS
addr: addr,
act: actor,
stateroot: c.stateroot,
tsKey: c.tsKey,
}] = &minerInfo{}
}
}
@ -274,14 +283,17 @@ func syncHead(ctx context.Context, api api.FullNode, st *storage, ts *types.TipS
parmap.Par(50, parmap.KVMapArr(miners), func(it func() (minerKey, *minerInfo)) {
k, info := it()
pow, err := api.StateMinerPower(ctx, k.addr, types.EmptyTSK)
// TODO: get the storage power actors state and and pull the miner power from there, currently this hits the
// storage power actor once for each miner for each tipset, we can do better by just getting it for each tipset
// and reading each miner power from the result.
pow, err := api.StateMinerPower(ctx, k.addr, k.tsKey)
if err != nil {
log.Error(err)
// Not sure why this would fail, but its probably worth continuing
}
info.power = pow.MinerPower.QualityAdjPower
sszs, err := api.StateMinerSectorCount(ctx, k.addr, types.EmptyTSK)
sszs, err := api.StateMinerSectorCount(ctx, k.addr, k.tsKey)
if err != nil {
log.Error(err)
return
@ -316,7 +328,7 @@ func syncHead(ctx context.Context, api api.FullNode, st *storage, ts *types.TipS
log.Info("Storing address mapping")
if err := st.storeAddressMap(addresses); err != nil {
if err := st.storeAddressMap(addressToID); err != nil {
log.Error(err)
return
}
@ -361,7 +373,7 @@ func syncHead(ctx context.Context, api api.FullNode, st *storage, ts *types.TipS
log.Infof("Get deals")
// TODO: incremental, gather expired
deals, err := api.StateMarketDeals(ctx, ts.Key())
deals, err := api.StateMarketDeals(ctx, headTs.Key())
if err != nil {
log.Error(err)
return

View File

@ -3,11 +3,14 @@ package main
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"os"
"path/filepath"
"syscall"
"time"
"github.com/google/uuid"
"github.com/gorilla/mux"
@ -117,11 +120,19 @@ var runCmd = &cli.Command{
}
// Connect to storage-miner
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return xerrors.Errorf("getting miner api: %w", err)
var nodeApi api.StorageMiner
var closer func()
var err error
for {
nodeApi, closer, err = lcli.GetStorageMinerAPI(cctx)
if err == nil {
break
}
fmt.Printf("\r\x1b[0KConnecting to miner API... (%s)", err)
time.Sleep(time.Second)
continue
}
defer closer()
ctx := lcli.ReqContext(cctx)
ctx, cancel := context.WithCancel(ctx)
@ -136,6 +147,8 @@ var runCmd = &cli.Command{
}
log.Infof("Remote version %s", v)
watchMinerConn(ctx, cctx, nodeApi)
// Check params
act, err := nodeApi.ActorAddress(ctx)
@ -317,3 +330,42 @@ var runCmd = &cli.Command{
return srv.Serve(nl)
},
}
func watchMinerConn(ctx context.Context, cctx *cli.Context, nodeApi api.StorageMiner) {
go func() {
closing, err := nodeApi.Closing(ctx)
if err != nil {
log.Errorf("failed to get remote closing channel: %+v", err)
}
select {
case <-closing:
case <-ctx.Done():
}
if ctx.Err() != nil {
return // graceful shutdown
}
log.Warnf("Connection with miner node lost, restarting")
exe, err := os.Executable()
if err != nil {
log.Errorf("getting executable for auto-restart: %+v", err)
}
log.Sync()
// TODO: there are probably cleaner/more graceful ways to restart,
// but this is good enough for now (FSM can recover from the mess this creates)
if err := syscall.Exec(exe, []string{exe, "run",
fmt.Sprintf("--address=%s", cctx.String("address")),
fmt.Sprintf("--no-local-storage=%t", cctx.Bool("no-local-storage")),
fmt.Sprintf("--precommit1=%t", cctx.Bool("precommit1")),
fmt.Sprintf("--precommit2=%t", cctx.Bool("precommit2")),
fmt.Sprintf("--commit=%t", cctx.Bool("commit")),
}, os.Environ()); err != nil {
fmt.Println(err)
}
}()
}

View File

@ -88,7 +88,7 @@ func PreSeal(maddr address.Address, spt abi.RegisteredSealProof, offset abi.Sect
return nil, nil, xerrors.Errorf("commit: %w", err)
}
if err := sb.FinalizeSector(context.TODO(), sid); err != nil {
if err := sb.FinalizeSector(context.TODO(), sid, nil); err != nil {
return nil, nil, xerrors.Errorf("trim cache: %w", err)
}

View File

@ -0,0 +1,94 @@
package main
import (
"fmt"
ma "github.com/multiformats/go-multiaddr"
"github.com/urfave/cli/v2"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
)
var actorCmd = &cli.Command{
Name: "actor",
Usage: "manipulate the miner actor",
Subcommands: []*cli.Command{
actorSetAddrsCmd,
},
}
var actorSetAddrsCmd = &cli.Command{
Name: "set-addrs",
Usage: "set addresses that your miner can be publically dialed on",
Flags: []cli.Flag{
&cli.Int64Flag{
Name: "gas-limit",
Usage: "set gas limit",
Value: 100000,
},
},
Action: func(cctx *cli.Context) error {
nodeAPI, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
api, acloser, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer acloser()
ctx := lcli.ReqContext(cctx)
var addrs []abi.Multiaddrs
for _, a := range cctx.Args().Slice() {
maddr, err := ma.NewMultiaddr(a)
if err != nil {
return fmt.Errorf("failed to parse %q as a multiaddr: %w", a, err)
}
addrs = append(addrs, maddr.Bytes())
}
maddr, err := nodeAPI.ActorAddress(ctx)
if err != nil {
return err
}
minfo, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
if err != nil {
return err
}
params, err := actors.SerializeParams(&miner.ChangeMultiaddrsParams{NewMultiaddrs: addrs})
if err != nil {
return err
}
gasLimit := cctx.Int64("gas-limit")
smsg, err := api.MpoolPushMessage(ctx, &types.Message{
To: maddr,
From: minfo.Worker,
Value: types.NewInt(0),
GasPrice: types.NewInt(1),
GasLimit: gasLimit,
Method: 18,
Params: params,
})
if err != nil {
return err
}
fmt.Printf("Requested multiaddrs change in message %s\n", smsg.Cid())
return nil
},
}

View File

@ -12,6 +12,7 @@ import (
"golang.org/x/xerrors"
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/filecoin-project/specs-actors/actors/builtin/power"
sealing "github.com/filecoin-project/storage-fsm"
"github.com/filecoin-project/lotus/api"
@ -119,16 +120,20 @@ var infoCmd = &cli.Command{
faultyPercentage)
}
expWinChance := float64(types.BigMul(qpercI, types.NewInt(build.BlocksPerEpoch)).Int64()) / 1000000
if expWinChance > 0 {
if expWinChance > 1 {
expWinChance = 1
}
winRate := time.Duration(float64(time.Second*build.BlockDelay) / expWinChance)
winPerDay := float64(time.Hour*24) / float64(winRate)
if pow.MinerPower.RawBytePower.LessThan(power.ConsensusMinerMinPower) {
fmt.Print("Below minimum power threshold, no blocks will be won")
} else {
expWinChance := float64(types.BigMul(qpercI, types.NewInt(build.BlocksPerEpoch)).Int64()) / 1000000
if expWinChance > 0 {
if expWinChance > 1 {
expWinChance = 1
}
winRate := time.Duration(float64(time.Second*build.BlockDelay) / expWinChance)
winPerDay := float64(time.Hour*24) / float64(winRate)
fmt.Print("Expected block win rate: ")
color.Blue("%.4f/day (every %s)", winPerDay, winRate.Truncate(time.Second))
fmt.Print("Expected block win rate: ")
color.Blue("%.4f/day (every %s)", winPerDay, winRate.Truncate(time.Second))
}
}
fmt.Println()

View File

@ -460,7 +460,7 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api lapi.FullNode,
}
if cerr != nil {
return xerrors.Errorf("failed to configure storage miner: %w", err)
return xerrors.Errorf("failed to configure storage miner: %w", cerr)
}
}

View File

@ -22,7 +22,9 @@ func main() {
lotuslog.SetupLogLevels()
local := []*cli.Command{
dealsCmd,
actorCmd,
storageDealsCmd,
retrievalDealsCmd,
infoCmd,
initCmd,
rewardsCmd,
@ -30,7 +32,6 @@ func main() {
stopCmd,
sectorsCmd,
storageCmd,
setPriceCmd,
workersCmd,
provingCmd,
}

View File

@ -1,15 +1,55 @@
package main
import (
"bufio"
"encoding/json"
"fmt"
"os"
"path/filepath"
"text/tabwriter"
"time"
"github.com/docker/go-units"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-cidutil/cidenc"
"github.com/multiformats/go-multibase"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/ipfs/go-cid"
"github.com/urfave/cli/v2"
)
var CidBaseFlag = cli.StringFlag{
Name: "cid-base",
Hidden: true,
Value: "base32",
Usage: "Multibase encoding used for version 1 CIDs in output.",
DefaultText: "base32",
}
// GetCidEncoder returns an encoder using the `cid-base` flag if provided, or
// the default (Base32) encoder if not.
func GetCidEncoder(cctx *cli.Context) (cidenc.Encoder, error) {
val := cctx.String("cid-base")
e := cidenc.Encoder{Base: multibase.MustNewEncoder(multibase.Base32)}
if val != "" {
var err error
e.Base, err = multibase.EncoderByName(val)
if err != nil {
return e, err
}
}
return e, nil
}
var enableCmd = &cli.Command{
Name: "enable",
Usage: "Configure the miner to consider storage deal proposals",
@ -40,40 +80,156 @@ var disableCmd = &cli.Command{
},
}
var setPriceCmd = &cli.Command{
Name: "set-price",
Usage: "Set price that miner will accept storage deals at (FIL / GiB / Epoch)",
Flags: []cli.Flag{},
var setAskCmd = &cli.Command{
Name: "set-ask",
Usage: "Configure the miner's ask",
Flags: []cli.Flag{
&cli.Uint64Flag{
Name: "price",
Usage: "Set the price of the ask (specified as FIL / GiB / Epoch) to `PRICE`",
Required: true,
},
&cli.StringFlag{
Name: "duration",
Usage: "Set duration of ask (a quantity of time after which the ask expires) `DURATION`",
DefaultText: "720h0m0s",
Value: "720h0m0s",
},
&cli.StringFlag{
Name: "min-piece-size",
Usage: "Set minimum piece size (w/bit-padding, in bytes) in ask to `SIZE`",
DefaultText: "256B",
Value: "256B",
},
&cli.StringFlag{
Name: "max-piece-size",
Usage: "Set maximum piece size (w/bit-padding, in bytes) in ask to `SIZE`",
DefaultText: "miner sector size",
},
},
Action: func(cctx *cli.Context) error {
ctx := lcli.DaemonContext(cctx)
api, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := lcli.DaemonContext(cctx)
pri := types.NewInt(cctx.Uint64("price"))
if !cctx.Args().Present() {
return fmt.Errorf("must specify price to set")
dur, err := time.ParseDuration(cctx.String("duration"))
if err != nil {
return xerrors.Errorf("cannot parse duration: %w", err)
}
fp, err := types.ParseFIL(cctx.Args().First())
qty := dur.Seconds() / build.BlockDelay
min, err := units.RAMInBytes(cctx.String("min-piece-size"))
if err != nil {
return xerrors.Errorf("cannot parse min-piece-size to quantity of bytes: %w", err)
}
if min < 256 {
return xerrors.New("minimum piece size (w/bit-padding) is 256B")
}
max, err := units.RAMInBytes(cctx.String("max-piece-size"))
if err != nil {
return xerrors.Errorf("cannot parse max-piece-size to quantity of bytes: %w", err)
}
maddr, err := api.ActorAddress(ctx)
if err != nil {
return err
}
return api.MarketSetPrice(ctx, types.BigInt(fp))
ssize, err := api.ActorSectorSize(ctx, maddr)
if err != nil {
return err
}
smax := int64(ssize)
if max == 0 {
max = smax
}
if max > smax {
return xerrors.Errorf("max piece size (w/bit-padding) %s cannot exceed miner sector size %s", types.SizeStr(types.NewInt(uint64(max))), types.SizeStr(types.NewInt(uint64(smax))))
}
return api.MarketSetAsk(ctx, pri, abi.ChainEpoch(qty), abi.PaddedPieceSize(min), abi.PaddedPieceSize(max))
},
}
var dealsCmd = &cli.Command{
Name: "deals",
Usage: "interact with your deals",
var getAskCmd = &cli.Command{
Name: "get-ask",
Usage: "Print the miner's ask",
Flags: []cli.Flag{},
Action: func(cctx *cli.Context) error {
ctx := lcli.DaemonContext(cctx)
fnapi, closer, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
smapi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
sask, err := smapi.MarketGetAsk(ctx)
if err != nil {
return err
}
var ask *storagemarket.StorageAsk
if sask != nil && sask.Ask != nil {
ask = sask.Ask
}
w := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0)
fmt.Fprintf(w, "Price per GiB / Epoch\tMin. Piece Size (w/bit-padding)\tMax. Piece Size (w/bit-padding)\tExpiry (Epoch)\tExpiry (Appx. Rem. Time)\tSeq. No.\n")
if ask == nil {
fmt.Fprintf(w, "<miner does not have an ask>\n")
return w.Flush()
}
head, err := fnapi.ChainHead(ctx)
if err != nil {
return err
}
dlt := ask.Expiry - head.Height()
rem := "<expired>"
if dlt > 0 {
rem = (time.Second * time.Duration(dlt*build.BlockDelay)).String()
}
fmt.Fprintf(w, "%s\t%s\t%s\t%d\t%s\t%d\n", ask.Price, types.SizeStr(types.NewInt(uint64(ask.MinPieceSize))), types.SizeStr(types.NewInt(uint64(ask.MaxPieceSize))), ask.Expiry, rem, ask.SeqNo)
return w.Flush()
},
}
var storageDealsCmd = &cli.Command{
Name: "storage-deals",
Usage: "Manage storage deals and related configuration",
Subcommands: []*cli.Command{
dealsImportDataCmd,
dealsListCmd,
enableCmd,
disableCmd,
setAskCmd,
getAskCmd,
setBlocklistCmd,
getBlocklistCmd,
resetBlocklistCmd,
},
}
@ -132,3 +288,96 @@ var dealsListCmd = &cli.Command{
return nil
},
}
var getBlocklistCmd = &cli.Command{
Name: "get-blocklist",
Usage: "List the contents of the storage miner's piece CID blocklist",
Flags: []cli.Flag{
&CidBaseFlag,
},
Action: func(cctx *cli.Context) error {
api, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
blocklist, err := api.DealsPieceCidBlocklist(lcli.DaemonContext(cctx))
if err != nil {
return err
}
encoder, err := GetCidEncoder(cctx)
if err != nil {
return err
}
for idx := range blocklist {
fmt.Println(encoder.Encode(blocklist[idx]))
}
return nil
},
}
var setBlocklistCmd = &cli.Command{
Name: "set-blocklist",
Usage: "Set the storage miner's list of blocklisted piece CIDs",
ArgsUsage: "[<path-of-file-containing-newline-delimited-piece-CIDs> (optional, will read from stdin if omitted)]",
Flags: []cli.Flag{},
Action: func(cctx *cli.Context) error {
api, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
scanner := bufio.NewScanner(os.Stdin)
if cctx.Args().Present() && cctx.Args().First() != "-" {
absPath, err := filepath.Abs(cctx.Args().First())
if err != nil {
return err
}
file, err := os.Open(absPath)
if err != nil {
log.Fatal(err)
}
defer file.Close() //nolint:errcheck
scanner = bufio.NewScanner(file)
}
var blocklist []cid.Cid
for scanner.Scan() {
decoded, err := cid.Decode(scanner.Text())
if err != nil {
return err
}
blocklist = append(blocklist, decoded)
}
err = scanner.Err()
if err != nil {
return err
}
return api.DealsSetPieceCidBlocklist(lcli.DaemonContext(cctx), blocklist)
},
}
var resetBlocklistCmd = &cli.Command{
Name: "reset-blocklist",
Usage: "Remove all entries from the storage miner's piece CID blocklist",
Flags: []cli.Flag{},
Action: func(cctx *cli.Context) error {
api, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
return api.DealsSetPieceCidBlocklist(lcli.DaemonContext(cctx), []cid.Cid{})
},
}

View File

@ -26,6 +26,75 @@ var provingCmd = &cli.Command{
Subcommands: []*cli.Command{
provingInfoCmd,
provingDeadlinesCmd,
provingFaultsCmd,
},
}
var provingFaultsCmd = &cli.Command{
Name: "faults",
Usage: "View the currently known proving faulty sectors information",
Action: func(cctx *cli.Context) error {
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
api, acloser, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer acloser()
ctx := lcli.ReqContext(cctx)
maddr, err := nodeApi.ActorAddress(ctx)
if err != nil {
return xerrors.Errorf("getting actor address: %w", err)
}
var mas miner.State
{
mact, err := api.StateGetActor(ctx, maddr, types.EmptyTSK)
if err != nil {
return err
}
rmas, err := api.ChainReadObj(ctx, mact.Head)
if err != nil {
return err
}
if err := mas.UnmarshalCBOR(bytes.NewReader(rmas)); err != nil {
return err
}
}
faults, err := mas.Faults.All(100000000000)
if err != nil {
return err
}
if len(faults) == 0 {
fmt.Println("no faulty sectors")
}
head, err := api.ChainHead(ctx)
if err != nil {
return xerrors.Errorf("getting chain head: %w", err)
}
deadlines, err := api.StateMinerDeadlines(ctx, maddr, head.Key())
if err != nil {
return xerrors.Errorf("getting miner deadlines: %w", err)
}
tw := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0)
_, _ = fmt.Fprintln(tw, "deadline\tsectors")
for deadline, sectors := range deadlines.Due {
intersectSectors, _ := bitfield.IntersectBitField(sectors, mas.Faults)
if intersectSectors != nil {
allSectors, _ := intersectSectors.All(100000000000)
for _, num := range allSectors {
_, _ = fmt.Fprintf(tw, "%d\t%d\n", deadline, num)
}
}
}
return tw.Flush()
},
}

View File

@ -0,0 +1,45 @@
package main
import (
lcli "github.com/filecoin-project/lotus/cli"
"github.com/urfave/cli/v2"
)
var retrievalDealsCmd = &cli.Command{
Name: "retrieval-deals",
Usage: "Manage retrieval deals and related configuration",
Subcommands: []*cli.Command{
enableRetrievalCmd,
disableRetrievalCmd,
},
}
var enableRetrievalCmd = &cli.Command{
Name: "enable",
Usage: "Configure the miner to consider retrieval deal proposals",
Flags: []cli.Flag{},
Action: func(cctx *cli.Context) error {
api, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
return api.DealsSetAcceptingRetrievalDeals(lcli.DaemonContext(cctx), true)
},
}
var disableRetrievalCmd = &cli.Command{
Name: "disable",
Usage: "Configure the miner to reject all retrieval deal proposals",
Flags: []cli.Flag{},
Action: func(cctx *cli.Context) error {
api, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
return api.DealsSetAcceptingRetrievalDeals(lcli.DaemonContext(cctx), false)
},
}

View File

@ -27,6 +27,7 @@ var sectorsCmd = &cli.Command{
sectorsRefsCmd,
sectorsUpdateCmd,
sectorsPledgeCmd,
sectorsRemoveCmd,
},
}
@ -46,8 +47,9 @@ var sectorsPledgeCmd = &cli.Command{
}
var sectorsStatusCmd = &cli.Command{
Name: "status",
Usage: "Get the seal status of a sector by its ID",
Name: "status",
Usage: "Get the seal status of a sector by its number",
ArgsUsage: "<sectorNum>",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "log",
@ -63,7 +65,7 @@ var sectorsStatusCmd = &cli.Command{
ctx := lcli.ReqContext(cctx)
if !cctx.Args().Present() {
return fmt.Errorf("must specify sector ID to get status of")
return fmt.Errorf("must specify sector number to get status of")
}
id, err := strconv.ParseUint(cctx.Args().First(), 10, 64)
@ -208,6 +210,39 @@ var sectorsRefsCmd = &cli.Command{
},
}
var sectorsRemoveCmd = &cli.Command{
Name: "remove",
Usage: "Forcefully remove a sector (WARNING: This means losing power and collateral for the removed sector)",
ArgsUsage: "<sectorNum>",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "really-do-it",
Usage: "pass this flag if you know what you are doing",
},
},
Action: func(cctx *cli.Context) error {
if !cctx.Bool("really-do-it") {
return xerrors.Errorf("this is a command for advanced users, only use it if you are sure of what you are doing")
}
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := lcli.ReqContext(cctx)
if cctx.Args().Len() != 1 {
return xerrors.Errorf("must pass sector number")
}
id, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64)
if err != nil {
return xerrors.Errorf("could not parse sector number: %w", err)
}
return nodeApi.SectorRemove(ctx, abi.SectorNumber(id))
},
}
var sectorsUpdateCmd = &cli.Command{
Name: "update-state",
Usage: "ADVANCED: manually update the state of a sector, this may aid in error recovery",
@ -228,12 +263,12 @@ var sectorsUpdateCmd = &cli.Command{
defer closer()
ctx := lcli.ReqContext(cctx)
if cctx.Args().Len() < 2 {
return xerrors.Errorf("must pass sector ID and new state")
return xerrors.Errorf("must pass sector number and new state")
}
id, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64)
if err != nil {
return xerrors.Errorf("could not parse sector ID: %w", err)
return xerrors.Errorf("could not parse sector number: %w", err)
}
return nodeApi.SectorsUpdate(ctx, abi.SectorNumber(id), api.SectorState(cctx.Args().Get(1)))

View File

@ -63,10 +63,6 @@
"title": "Proof-of-Spacetime(s)",
"value": "Filecoin is a protocol token whose blockchain runs on a novel proof, called Proof-of-Spacetime, where blocks are created by miners that are storing data."
},
"lotus-testnet": {
"title": "Filecoin Testnet",
"value": "Until we launch, we are making lots of changes to Lotus. The Testnet is expected to bring a few significant fixes/improvements. During Testnet, you can retrieve test filecoin from our network faucet to use as collateral to start mining. Test filecoin do not have any value the official filecoin tokens will not be released until Mainnet launch."
},
"filecoin-testnet": {
"title": "Filecoin Testnet",
"value": "Until we launch, we are making lots of changes to Lotus. The Testnet is expected to bring a few significant fixes/improvements. During Testnet, you can retrieve test filecoin from our network faucet to use as collateral to start mining. Test filecoin do not have any value the official filecoin tokens will not be released until Mainnet launch."

View File

@ -1,11 +1,11 @@
# Lotus
Lotus is an implementation of the [Filecoin Distributed Storage Network](https://filecoin.io/).
A Lotus node syncs blockchains that follow the
A Lotus node syncs blockchains that follow the
Filecoin protocol, validating the blocks and state transitions.
The specification for the Filecoin protocol can be found [here](https://filecoin-project.github.io/specs/).
For information on how to setup and operate a Lotus node,
For information on how to setup and operate a Lotus node,
please follow the instructions [here](https://lotu.sh/en+getting-started).
# Components
@ -24,7 +24,7 @@ FIXME: No mention of block production here, cross-reference with schomatis's min
- Other PL dependencies (IPFS, libp2p, IPLD? FIXME, missing)
- External libraries used by Lotus and other deps (FIXME, missing)
# Preliminaries
# Preliminaries
We discuss some key Filecoin concepts here, aiming to explain them by contrasting them with analogous concepts
in other well-known blockchains like Ethereum. We only provide brief descriptions here; elaboration
@ -34,10 +34,10 @@ can be found in the [spec](https://filecoin-project.github.io/specs/).
Unlike in Ethereum, a block can have multiple parents in Filecoin. We thus refer to the parent set of a block,
instead of a single parent.
A [tipset](https://filecoin-project.github.io/specs/#systems__filecoin_blockchain__struct__tipset)
is any set of blocks that share the same parent set.
A [tipset](https://filecoin-project.github.io/specs/#systems__filecoin_blockchain__struct__tipset)
is any set of blocks that share the same parent set.
There is no concept of "block difficulty" in Filecoin. Instead,
There is no concept of "block difficulty" in Filecoin. Instead,
the weight of a tipset is simply the number of blocks in the chain that ends in that tipset. Note that a longer chain
can have less weight than a shorter chain with more blocks per tipset.
@ -49,8 +49,8 @@ We call the heaviest tipset in a chain the "head" of the chain.
### Actors and Messages
An [Actor](https://filecoin-project.github.io/specs/#systems__filecoin_vm__actor)
is analogous to a smart contract in Ethereum. Filecoin does not allow users to define their own
actors, but comes with several [builtin actors](https://github.com/filecoin-project/specs-actors),
is analogous to a smart contract in Ethereum. Filecoin does not allow users to define their own
actors, but comes with several [builtin actors](https://github.com/filecoin-project/specs-actors),
which can be thought of as pre-compiled contracts.
A [Message](https://filecoin-project.github.io/specs/#systems__filecoin_vm__message)
@ -70,8 +70,8 @@ We now discuss the various stages of the sync process.
## Sync setup
When a Lotus node connects to a new peer, we exchange the head of our chain
with the new peer through [the `hello` protocol](https://github.com/filecoin-project/lotus/blob/master/node/hello/hello.go).
When a Lotus node connects to a new peer, we exchange the head of our chain
with the new peer through [the `hello` protocol](https://github.com/filecoin-project/lotus/blob/master/node/hello/hello.go).
If the peer's head is heavier than ours, we try to sync to it. Note
that we do NOT update our chain head at this stage.
@ -79,7 +79,7 @@ that we do NOT update our chain head at this stage.
Note: The API refers to these stages as `StageHeaders` and `StagePersistHeaders`.
We proceed in the sync process by requesting block headers from the peer,
We proceed in the sync process by requesting block headers from the peer,
moving back from their head, until we reach a tipset that we have in common
(such a common tipset must exist, thought it may simply be the genesis block).
The functionality can be found in `Syncer::collectHeaders()`.
@ -90,7 +90,7 @@ drop part of our chain to connect to the peer's head (referred to as "forking").
FIXME: This next para might be best replaced with a link to the validation doc
Some of the possible causes of failure in this stage include:
- The chain is linked to a block that we have previously marked as bad,
- The chain is linked to a block that we have previously marked as bad,
and stored in a [`BadBlockCache`](https://github.com/filecoin-project/lotus/blob/master/chain/badtscache.go).
- The beacon entries in a block are inconsistent (FIXME: more details about what is validated here wouldn't be bad).
- Switching to this new chain would involve a chain reorganization beyond the allowed threshold (SPECK-CHECK).
@ -101,7 +101,7 @@ Note: The API refers to this stage as `StageMessages`.
Having acquired the headers and found a common tipset, we then move forward, requesting the full blocks, including the messages.
For each block, we first confirm the syntactic validity of the block (SPECK-CHECK),
For each block, we first confirm the syntactic validity of the block (SPECK-CHECK),
which includes the syntactic validity of messages included
in the block.
We then apply the messages, running all the state transitions, and compare the state root we calculate with the provided state root.
@ -121,11 +121,11 @@ syntactic validation of messages.
Note: The API refers to this stage as `StageSyncComplete`.
If all validations pass we will now set that head as our heaviest tipset in
If all validations pass we will now set that head as our heaviest tipset in
[`ChainStore`](https://github.com/filecoin-project/lotus/blob/master/chain/store/store.go).
We already have the full state, since we calculated
it during the sync process.
FIXME (aayush) I don't fuilly understand the next 2 paragraphs, but it seems important. Confirm and polish.
Relevant issue in IPFS: https://github.com/ipfs/ipfs-docs/issues/264
@ -135,7 +135,7 @@ FIXME: Create a further reading appendix, move this next para to it, along with
extraneous content
This is one of the few items we store in `Datastore` by key, location, allowing its contents to change on every sync. This is reflected in the `(*ChainStore) writeHead()` function (called by `takeHeaviestTipSet()` above) where we reference the pointer by the explicit `chainHeadKey` address (the string `"head"`, not a hash embedded in a CID), and similarly in `(*ChainStore).Load()` when we start the node and create the `ChainStore`. Compare this to a Filecoin block or message which are immutable, stored in the `Blockstore` by CID, once created they never change.
## Keeping up with the chain
## Keeping up with the chain
A Lotus node also listens for new blocks broadcast by its peers over the `gossipsub` channel (see FIXME for more).
If we have validated such a block's parent tipset, and adding it to our tipset at its height would lead to a heavier
@ -144,11 +144,11 @@ process (indeed, it's the same codepath).
# State
In Filecoin, the chain state at any given point is a collection of data stored under a root CID
In Filecoin, the chain state at any given point is a collection of data stored under a root CID
encapsulated in the [`StateTree`](https://github.com/filecoin-project/lotus/blob/master/chain/state/statetree.go),
and accessed through the
and accessed through the
[`StateManager`](https://github.com/filecoin-project/lotus/blob/master/chain/stmgr/stmgr.go).
The state at the chain's head is thus easily tracked and updated in a state root CID.
The state at the chain's head is thus easily tracked and updated in a state root CID.
(FIXME: Talk about CIDs somewhere, we might want to explain some of the modify/flush/update-root mechanism here.))
## Calculating a Tipset State
@ -156,7 +156,7 @@ The state at the chain's head is thus easily tracked and updated in a state root
Recall that a tipset is a set of blocks that have identical parents (that is, that are built on top of the same tipset).
The genesis tipset comprises the genesis block(s), and has some state corresponding to it.
The methods `TipSetState()` and `computeTipSetState()` in
The methods `TipSetState()` and `computeTipSetState()` in
[`StateManager`](https://github.com/filecoin-project/lotus/blob/master/chain/stmgr/stmgr.go)
are responsible for computing
the state that results from applying a tipset. This involves applying all the messages included
@ -168,25 +168,25 @@ State Root (which is to be expected, since they have the same parent tipset)
### Preparing to apply a tipset
When `StateManager::computeTipsetState()` is called with a tipset, `ts`,
When `StateManager::computeTipsetState()` is called with a tipset, `ts`,
it retrieves the parent state root of the blocks in `ts`. It also creates a list of `BlockMessages`, which wraps the BLS
and SecP messages in a block along with the miner that produced the block.
and SecP messages in a block along with the miner that produced the block.
Control then flows to `StateManager::ApplyBlocks()`, which builds a VM to apply the messages given to it. The VM
Control then flows to `StateManager::ApplyBlocks()`, which builds a VM to apply the messages given to it. The VM
is initialized with the parent state root of the blocks in `ts`. We apply the blocks in `ts` in order (see FIXME for
ordering of blocks in a tipset).
### Applying a block
For each block, we prepare to apply the ordered messages (first BLS, then SecP). Before applying a message, we check if
For each block, we prepare to apply the ordered messages (first BLS, then SecP). Before applying a message, we check if
we have already applied a message with that CID within the scope of this method. If so, we simply skip that message;
this is how duplicate messages included in the same tipset are skipped (with only the miner of the "first" block to
include the message getting the reward). For the actual process of message application, see FIXME (need an
internal link here), for now we
simply assume that the outcome of the VM applying a message is either an error, or a
internal link here), for now we
simply assume that the outcome of the VM applying a message is either an error, or a
[`MessageReceipt`](https://github.com/filecoin-project/lotus/blob/master/chain/types/message_receipt.go)
and some
other information.
other information.
We treat an error from the VM as a showstopper; there is no recovery, and no meaningful state can be computed for `ts`.
Given a successful receipt, we add the rewards and penalties to what the miner has earned so far. Once all the messages
@ -205,8 +205,8 @@ is the computed state of the tipset.
# Virtual Machine
The Virtual Machine (VM) is responsible for executing messages.
The [Lotus Virtual Machine](https://github.com/filecoin-project/lotus/blob/master/chain/vm/vm.go)
The Virtual Machine (VM) is responsible for executing messages.
The [Lotus Virtual Machine](https://github.com/filecoin-project/lotus/blob/master/chain/vm/vm.go)
invokes the appropriate methods in the builtin actors, and provides
a [`Runtime`](https://github.com/filecoin-project/specs-actors/blob/master/actors/runtime/runtime.go)
interface to the [builtin actors](https://github.com/filecoin-project/specs-actors)
@ -233,10 +233,10 @@ It then transfers the message's value to the recipient, creating a new account a
### Method Invocation
We use reflection to translate a Filecoin message for the VM to an actual Go function, relying on the VM's
[`invoker`](https://github.com/filecoin-project/lotus/blob/master/chain/vm/invoker.go) structure.
[`invoker`](https://github.com/filecoin-project/lotus/blob/master/chain/vm/invoker.go) structure.
Each actor has its own set of codes defined in `specs-actors/actors/builtin/methods.go`.
The `invoker` structure maps the builtin actors' CIDs
to a list of `invokeFunc` (one per exported method), which each take the `Runtime` (for state manipulation)
to a list of `invokeFunc` (one per exported method), which each take the `Runtime` (for state manipulation)
and the serialized input parameters.
FIXME (aayush) Polish this next para.
@ -245,39 +245,39 @@ The basic layout (without reflection details) of `(*invoker).transform()` is as
### Returning from the VM
Once method invocation is complete (including any subcalls), we return to `ApplyMessage()`, which receives
the serialized response and the [`ActorError`](https://github.com/filecoin-project/lotus/blob/master/chain/actors/aerrors/error.go).
Once method invocation is complete (including any subcalls), we return to `ApplyMessage()`, which receives
the serialized response and the [`ActorError`](https://github.com/filecoin-project/lotus/blob/master/chain/actors/aerrors/error.go).
The sender will be charged the appropriate amount of gas for the returned response, which gets put into the
[`MessageReceipt`](https://github.com/filecoin-project/lotus/blob/master/chain/types/message_receipt.go).
The method then refunds any unused gas to the sender, sets up the gas reward for the miner, and
The method then refunds any unused gas to the sender, sets up the gas reward for the miner, and
wraps all of this into an `ApplyRet`, which is returned.
# Building a Lotus node
When we launch a Lotus node with the command `./lotus daemon`
When we launch a Lotus node with the command `./lotus daemon`
(see [here](https://github.com/filecoin-project/lotus/blob/master/cmd/lotus/daemon.go) for more),
the node is created through [dependency injection](https://godoc.org/go.uber.org/fx).
This relies on reflection, which makes some of the references hard to follow.
This relies on reflection, which makes some of the references hard to follow.
The node sets up all of the subsystems it needs to run, such as the repository, the network connections, thechain sync
service, etc.
service, etc.
This setup is orchestrated through calls to the `node.Override` function.
The structure of each call indicates the type of component it will set up
The structure of each call indicates the type of component it will set up
(many defined in [`node/modules/dtypes/`](https://github.com/filecoin-project/lotus/tree/master/node/modules/dtypes)),
and the function that will provide it.
and the function that will provide it.
The dependency is implicit in the argument of the provider function.
As an example, consider the `modules.ChainStore()` function that provides the
[`ChainStore`](https://github.com/filecoin-project/lotus/blob/master/chain/store/store.go) structure.
It takes as one of its parameters the [`ChainBlockstore`](https://github.com/filecoin-project/lotus/blob/master/node/modules/dtypes/storage.go)
type, which becomes one of its dependencies.
type, which becomes one of its dependencies.
For the node to be built successfully the `ChainBlockstore` will need to be provided before `ChainStore`, a requirement
that is made explicit in another `Override()` call that sets the provider of that type as the `ChainBlockstore()` function.
## The Repository
The repo is the directory where all of a node's information is stored. The node is entirely defined by its repo, which
makes it easy to port to another location. This one-to-one relationship means we can speak
makes it easy to port to another location. This one-to-one relationship means we can speak
of the node as the repo it is associated with, instead of the daemon process that runs from that repo.
Only one daemon can run be running with an associated repo at a time.
@ -292,17 +292,17 @@ lsof ~/.lotus/repo.lock
Trying to launch a second daemon hooked to the same repo leads to a `repo is already locked (lotus daemon already running)`
error.
The `node.Repo()` function (`node/builder.go`) contains most of the dependencies (specified as `Override()` calls)
The `node.Repo()` function (`node/builder.go`) contains most of the dependencies (specified as `Override()` calls)
needed to properly set up the node's repo. We list the most salient ones here.
### Datastore
`Datastore` and `ChainBlockstore`: Data related to the node state is saved in the repo's `Datastore`,
an IPFS interface defined [here](github.com/ipfs/go-datastore/datastore.go).
`Datastore` and `ChainBlockstore`: Data related to the node state is saved in the repo's `Datastore`,
an IPFS interface defined [here](https://github.com/ipfs/go-datastore/blob/master/datastore.go).
Lotus creates this interface from a [Badger DB](https://github.com/dgraph-io/badger) in
[`FsRepo`](https://github.com/filecoin-project/lotus/blob/master/node/repo/fsrepo.go).
Every piece of data is fundamentally a key-value pair in the `datastore` directory of the repo.
There are several abstractions laid on top of it that appear through the code depending on *how* we access it,
There are several abstractions laid on top of it that appear through the code depending on *how* we access it,
but it is important to remember that we're always accessing it from the same place.
FIXME: Maybe mention the `Batching` interface as the developer will stumble upon it before reaching the `Datastore` one.
@ -314,8 +314,8 @@ FIXME: IPFS blocks vs Filecoin blocks ideally happens before this / here
The [`Blockstore` interface](`github.com/ipfs/go-ipfs-blockstore/blockstore.go`) structures the key-value pair
into the CID format for the key and the [`Block` interface](`github.com/ipfs/go-block-format/blocks.go`) for the value.
The `Block` value is just a raw string of bytes addressed by its hash, which is included in the CID key.
`ChainBlockstore` creates a `Blockstore` in the repo under the `/blocks` namespace.
`ChainBlockstore` creates a `Blockstore` in the repo under the `/blocks` namespace.
Every key stored there will have the `blocks` prefix so that it does not collide with other stores that use the same repo.
FIXME: Link to IPFS documentation about DAG, CID, and related, especially we need a diagram that shows how do we wrap each datastore inside the next layer (datastore, batching, block store, gc, etc).
@ -323,7 +323,7 @@ FIXME: Link to IPFS documentation about DAG, CID, and related, especially we nee
#### Metadata
`modules.Datastore()` creates a `dtypes.MetadataDS`, which is an alias for the basic `Datastore` interface.
Metadata is stored here under the `/metadata` prefix.
Metadata is stored here under the `/metadata` prefix.
(FIXME: Explain *what* is metadata in contrast with the block store, namely we store the pointer to the heaviest chain, we might just link to that unwritten section here later.)
FIXME: Explain the key store related calls (maybe remove, per Schomatis)
@ -331,10 +331,10 @@ FIXME: Explain the key store related calls (maybe remove, per Schomatis)
### LockedRepo
`LockedRepo()`: This method doesn't create or initialize any new structures, but rather registers an
`OnStop` [hook](https://godoc.org/go.uber.org/fx/internal/lifecycle#Hook)
`OnStop` [hook](https://godoc.org/go.uber.org/fx/internal/lifecycle#Hook)
that will close the locked repository associated with it on shutdown.
### Repo types / Node types
FIXME: This section needs to be clarified / corrected...I don't fully understand the config differences (what do they have in common, if anything?)
@ -351,19 +351,19 @@ As we said, the repo fully identifies the node so a repo type is also a *node* t
FIXME: Much of this might need to be subsumed into the p2p section
The `node.Online()` configuration function (`node/builder.go`) initializes components that involve connecting to,
or interacting with, the Filecoin network. These connections are managed through the libp2p stack (FIXME link to this section when it exists).
or interacting with, the Filecoin network. These connections are managed through the libp2p stack (FIXME link to this section when it exists).
We discuss some of the components found in the full node type (that is, included in the `ApplyIf(isType(repo.FullNode),` call).
#### Chainstore
`modules.ChainStore()` creates the [`store.ChainStore`](https://github.com/filecoin-project/lotus/blob/master/chain/store/store.go))
`modules.ChainStore()` creates the [`store.ChainStore`](https://github.com/filecoin-project/lotus/blob/master/chain/store/store.go))
that wraps the stores
previously instantiated in `Repo()`. It is the main point of entry for the node to all chain-related data
(FIXME: this is incorrect, we sometimes access its underlying block store directly, and probably shouldn't).
previously instantiated in `Repo()`. It is the main point of entry for the node to all chain-related data
(FIXME: this is incorrect, we sometimes access its underlying block store directly, and probably shouldn't).
It also holds the crucial `heaviest` pointer, which indicates the current head of the chain.
#### ChainExchange and ChainBlockservice
`ChainExchange()` and `ChainBlockservice()` establish a BitSwap connection (FIXME libp2p link)
`ChainExchange()` and `ChainBlockservice()` establish a BitSwap connection (FIXME libp2p link)
to exchange chain information in the form of `blocks.Block`s stored in the repo. (See sync section for more details, the Filecoin blocks and messages are backed by these raw IPFS blocks that together form the different structures that define the state of the current/heaviest chain.)
#### Incoming handlers
@ -371,7 +371,7 @@ to exchange chain information in the form of `blocks.Block`s stored in the repo.
and messages from the network (see `<undefined>` for more information about the topics the node is subscribed to, FIXME: should that be part of the libp2p section or should we expand on gossipsub separately?).
#### Hello
`RunHello()`: starts the services to both send (`(*Service).SayHello()`) and receive (`(*Service).HandleStream()`, `node/hello/hello.go`)
`RunHello()`: starts the services to both send (`(*Service).SayHello()`) and receive (`(*Service).HandleStream()`, `node/hello/hello.go`)
`hello` messages. When nodes establish a new connection with each other, they exchange these messages
to share chain-related information (namely their genesis block and their heaviest tipset).
@ -381,14 +381,14 @@ to share chain-related information (namely their genesis block and their heavies
### Ordering the dependencies
We can establish the dependency relations by looking at the parameters that each function needs and by understanding
the architecture of the node and how the different components relate to each other (the chief purpose of this document).
the architecture of the node and how the different components relate to each other (the chief purpose of this document).
As an example, the sync mechanism depends on the node being able to exchange different IPFS blocks with the network,
so as to be able to request the "missing pieces" needed to construct the chain. This dependency is reflected by `NewSyncer()`
having a `blocksync.BlockSync` parameter, which in turn depends on `ChainBlockservice()` and `ChainExchange()`.
The chain exchange service further depends on the chain store to save and retrieve chain data, which is reflected
having a `blocksync.BlockSync` parameter, which in turn depends on `ChainBlockservice()` and `ChainExchange()`.
The chain exchange service further depends on the chain store to save and retrieve chain data, which is reflected
in `ChainExchange()` having `ChainGCBlockstore` as a parameter (which is just a wrapper around `ChainBlockstore` capable
of garbage collection).
This block store is the same store underlying the chain store, which is an indirect dependency of `NewSyncer()` (through the `StateManager`).
(FIXME: This last line is flaky, we need to resolve the hierarchy better, we sometimes refer to the chain store and sometimes to its underlying block store. We need a diagram to visualize all the different components just mentioned otherwise it is too hard to follow. We probably even need to skip some of the connections mentioned.)
(FIXME: This last line is flaky, we need to resolve the hierarchy better, we sometimes refer to the chain store and sometimes to its underlying block store. We need a diagram to visualize all the different components just mentioned otherwise it is too hard to follow. We probably even need to skip some of the connections mentioned.)

View File

@ -51,4 +51,4 @@ To get the number of cores for your GPU, you will need to check your cards sp
## Benchmarking
Here is a [benchmarking tool](https://github.com/filecoin-project/lotus/tree/testnet-staging/cmd/lotus-bench) and a [GitHub issue thread](https://github.com/filecoin-project/lotus/issues/694) for those who wish to experiment with and contribute hardware setups for the **Filecoin Testnet**.
Here is a [benchmarking tool](https://github.com/filecoin-project/lotus/tree/master/cmd/lotus-bench) and a [GitHub issue thread](https://github.com/filecoin-project/lotus/issues/694) for those who wish to experiment with and contribute hardware setups for the **Filecoin Testnet**.

24
go.mod
View File

@ -29,10 +29,10 @@ require (
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200605171344-fcac609550ca
github.com/filecoin-project/go-statestore v0.1.0
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b
github.com/filecoin-project/sector-storage v0.0.0-20200615192001-42c9e08595b7
github.com/filecoin-project/specs-actors v0.6.1
github.com/filecoin-project/specs-storage v0.1.0
github.com/filecoin-project/storage-fsm v0.0.0-20200615162749-494c3bc48743
github.com/filecoin-project/sector-storage v0.0.0-20200625154333-98ef8e4ef246
github.com/filecoin-project/specs-actors v0.6.2-0.20200617175406-de392ca14121
github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea
github.com/filecoin-project/storage-fsm v0.0.0-20200625160832-379a4655b044
github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1
github.com/go-kit/kit v0.10.0
github.com/go-ole/go-ole v1.2.4 // indirect
@ -44,8 +44,8 @@ require (
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d
github.com/ipfs/go-bitswap v0.2.8
github.com/ipfs/go-block-format v0.0.2
github.com/ipfs/go-blockservice v0.1.3
github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00
github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834
github.com/ipfs/go-cid v0.0.6
github.com/ipfs/go-cidutil v0.0.2
github.com/ipfs/go-datastore v0.4.4
github.com/ipfs/go-ds-badger2 v0.1.0
@ -76,20 +76,20 @@ require (
github.com/kelseyhightower/envconfig v1.4.0
github.com/lib/pq v1.2.0
github.com/libp2p/go-eventbus v0.2.1
github.com/libp2p/go-libp2p v0.9.4
github.com/libp2p/go-libp2p v0.10.0
github.com/libp2p/go-libp2p-connmgr v0.2.4
github.com/libp2p/go-libp2p-core v0.5.7
github.com/libp2p/go-libp2p-core v0.6.0
github.com/libp2p/go-libp2p-discovery v0.4.0
github.com/libp2p/go-libp2p-kad-dht v0.8.1
github.com/libp2p/go-libp2p-mplex v0.2.3
github.com/libp2p/go-libp2p-peer v0.2.0
github.com/libp2p/go-libp2p-peerstore v0.2.4
github.com/libp2p/go-libp2p-peerstore v0.2.6
github.com/libp2p/go-libp2p-pubsub v0.3.2
github.com/libp2p/go-libp2p-quic-transport v0.5.0
github.com/libp2p/go-libp2p-record v0.1.2
github.com/libp2p/go-libp2p-routing-helpers v0.2.3
github.com/libp2p/go-libp2p-secio v0.2.2
github.com/libp2p/go-libp2p-swarm v0.2.6
github.com/libp2p/go-libp2p-swarm v0.2.7
github.com/libp2p/go-libp2p-tls v0.1.3
github.com/libp2p/go-libp2p-yamux v0.2.8
github.com/libp2p/go-maddr-filter v0.1.0
@ -100,11 +100,11 @@ require (
github.com/multiformats/go-multiaddr v0.2.2
github.com/multiformats/go-multiaddr-dns v0.2.0
github.com/multiformats/go-multiaddr-net v0.1.5
github.com/multiformats/go-multibase v0.0.2
github.com/multiformats/go-multibase v0.0.3
github.com/multiformats/go-multihash v0.0.13
github.com/opentracing/opentracing-go v1.1.0
github.com/stretchr/objx v0.2.0 // indirect
github.com/stretchr/testify v1.5.1
github.com/stretchr/testify v1.6.1
github.com/syndtr/goleveldb v1.0.0
github.com/urfave/cli/v2 v2.2.0
github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba

48
go.sum
View File

@ -62,7 +62,6 @@ github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBA
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
github.com/akavel/rsrc v0.8.0 h1:zjWn7ukO9Kc5Q62DOJCcxGpXC18RawVtYAGdz2aLlfw=
github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c=
github.com/alangpierce/go-forceexport v0.0.0-20160317203124-8f1d6941cd75/go.mod h1:uAXEEpARkRhCZfEvy/y0Jcc888f9tHCc1W7/UeEtreE=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
@ -253,18 +252,19 @@ github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZO
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b h1:fkRZSPrYpk42PV3/lIXiL0LHetxde7vyYYvSsttQtfg=
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/go.mod h1:Q0GQOBtKf1oE10eSXSlhN45kDBdGvEcVOqMiffqX+N8=
github.com/filecoin-project/sector-storage v0.0.0-20200615154852-728a47ab99d6/go.mod h1:M59QnAeA/oV+Z8oHFLoNpGMv0LZ8Rll+vHVXX7GirPM=
github.com/filecoin-project/sector-storage v0.0.0-20200615192001-42c9e08595b7 h1:cjsOpQKvZosPx9/qqq2bucHVdRyXzvBR1f37atiR3/0=
github.com/filecoin-project/sector-storage v0.0.0-20200615192001-42c9e08595b7/go.mod h1:M59QnAeA/oV+Z8oHFLoNpGMv0LZ8Rll+vHVXX7GirPM=
github.com/filecoin-project/sector-storage v0.0.0-20200625154333-98ef8e4ef246 h1:NfYQRmVRe0LzlNbK5Ket3vbBOwFD5TvtcNtfo/Sd8mg=
github.com/filecoin-project/sector-storage v0.0.0-20200625154333-98ef8e4ef246/go.mod h1:8f0hWDzzIi1hKs4IVKH9RnDsO4LEHVz8BNat0okDOuY=
github.com/filecoin-project/specs-actors v0.0.0-20200210130641-2d1fbd8672cf/go.mod h1:xtDZUB6pe4Pksa/bAJbJ693OilaC5Wbot9jMhLm3cZA=
github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y=
github.com/filecoin-project/specs-actors v0.6.0 h1:IepUsmDGY60QliENVTkBTAkwqGWw9kNbbHOcU/9oiC0=
github.com/filecoin-project/specs-actors v0.6.0/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY=
github.com/filecoin-project/specs-actors v0.6.1 h1:rhHlEzqcuuQU6oKc4csuq+/kQBDZ4EXtSomoN2XApCA=
github.com/filecoin-project/specs-actors v0.6.1/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY=
github.com/filecoin-project/specs-actors v0.6.2-0.20200617175406-de392ca14121 h1:oRA+b4iN4H86xXDXbU3TOyvmBZp7//c5VqTc0oJ6nLg=
github.com/filecoin-project/specs-actors v0.6.2-0.20200617175406-de392ca14121/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY=
github.com/filecoin-project/specs-storage v0.1.0 h1:PkDgTOT5W5Ao7752onjDl4QSv+sgOVdJbvFjOnD5w94=
github.com/filecoin-project/specs-storage v0.1.0/go.mod h1:Pr5ntAaxsh+sLG/LYiL4tKzvA83Vk5vLODYhfNwOg7k=
github.com/filecoin-project/storage-fsm v0.0.0-20200615162749-494c3bc48743 h1:a8f1p6UdeD+ZINBKJN4FhEos8uaKeASOAabq5RCpQdg=
github.com/filecoin-project/storage-fsm v0.0.0-20200615162749-494c3bc48743/go.mod h1:q1YCutTSMq/yGYvDPHReT37bPfDLHltnwJutzR9kOY0=
github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea h1:iixjULRQFPn7Q9KlIqfwLJnlAXO10bbkI+xy5GKGdLY=
github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea/go.mod h1:Pr5ntAaxsh+sLG/LYiL4tKzvA83Vk5vLODYhfNwOg7k=
github.com/filecoin-project/storage-fsm v0.0.0-20200625160832-379a4655b044 h1:i4oMhv1kx/MAUxRN4EM5tag5fI1uagrwQwINgKrzUt4=
github.com/filecoin-project/storage-fsm v0.0.0-20200625160832-379a4655b044/go.mod h1:JD7fmV1BYADDcy4EYQnqFH/rUzXsh0Je0jXarCjZqSk=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
@ -465,6 +465,8 @@ github.com/ipfs/go-blockservice v0.0.7/go.mod h1:EOfb9k/Y878ZTRY/CH0x5+ATtaipfbR
github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M=
github.com/ipfs/go-blockservice v0.1.3 h1:9XgsPMwwWJSC9uVr2pMDsW2qFTBSkxpGMhmna8mIjPM=
github.com/ipfs/go-blockservice v0.1.3/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU=
github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834 h1:hFJoI1D2a3MqiNkSb4nKwrdkhCngUxUTFNwVwovZX2s=
github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU=
github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
@ -473,6 +475,8 @@ github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj
github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog=
github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00 h1:QN88Q0kT2QiDaLxpR/SDsqOBtNIEF/F3n96gSDUimkA=
github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog=
github.com/ipfs/go-cid v0.0.6 h1:go0y+GcDOGeJIV01FeBsta4FHngoA4Wz7KMeLkXAhMs=
github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I=
github.com/ipfs/go-cidutil v0.0.2 h1:CNOboQf1t7Qp0nuNh8QMmhJs0+Q//bRL1axtCnIB1Yo=
github.com/ipfs/go-cidutil v0.0.2/go.mod h1:ewllrvrxG6AMYStla3GD7Cqn+XYSLqjK0vc+086tB6s=
github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE=
@ -558,6 +562,8 @@ github.com/ipfs/go-ipfs-routing v0.1.0 h1:gAJTT1cEeeLj6/DlLX6t+NxD9fQe2ymTO6qWRD
github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY=
github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50=
github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc=
github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8=
github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ=
github.com/ipfs/go-ipld-cbor v0.0.1/go.mod h1:RXHr8s4k0NE0TKhnrxqZC9M888QfsBN9rhS5NjfKzY8=
github.com/ipfs/go-ipld-cbor v0.0.2/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc=
github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc=
@ -724,8 +730,8 @@ github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniV
github.com/libp2p/go-libp2p v0.8.2/go.mod h1:NQDA/F/qArMHGe0J7sDScaKjW8Jh4y/ozQqBbYJ+BnA=
github.com/libp2p/go-libp2p v0.8.3/go.mod h1:EsH1A+8yoWK+L4iKcbPYu6MPluZ+CHWI9El8cTaefiM=
github.com/libp2p/go-libp2p v0.9.2/go.mod h1:cunHNLDVus66Ct9iXXcjKRLdmHdFdHVe1TAnbubJQqQ=
github.com/libp2p/go-libp2p v0.9.4 h1:yighwjFvsF/qQaGtHPZfxcF+ph4ydCNnsKvg712lYRo=
github.com/libp2p/go-libp2p v0.9.4/go.mod h1:NzQcC2o19xgwGqCmjx7DN+4h2F13qPCZ9UJmweYzsnU=
github.com/libp2p/go-libp2p v0.10.0 h1:7ooOvK1wi8eLpyTppy8TeH43UHy5uI75GAHGJxenUi0=
github.com/libp2p/go-libp2p v0.10.0/go.mod h1:yBJNpb+mGJdgrwbKAKrhPU0u3ogyNFTfjJ6bdM+Q/G8=
github.com/libp2p/go-libp2p-autonat v0.0.2/go.mod h1:fs71q5Xk+pdnKU014o2iq1RhMs9/PMaG5zXRFNnIIT4=
github.com/libp2p/go-libp2p-autonat v0.0.6/go.mod h1:uZneLdOkZHro35xIhpbtTzLlgYturpu4J5+0cZK3MqE=
github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8=
@ -751,6 +757,8 @@ github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3
github.com/libp2p/go-libp2p-circuit v0.2.1/go.mod h1:BXPwYDN5A8z4OEY9sOfr2DUQMLQvKt/6oku45YUmjIo=
github.com/libp2p/go-libp2p-circuit v0.2.2 h1:87RLabJ9lrhoiSDDZyCJ80ZlI5TLJMwfyoGAaWXzWqA=
github.com/libp2p/go-libp2p-circuit v0.2.2/go.mod h1:nkG3iE01tR3FoQ2nMm06IUrCpCyJp1Eo4A1xYdpjfs4=
github.com/libp2p/go-libp2p-circuit v0.2.3 h1:3Uw1fPHWrp1tgIhBz0vSOxRUmnKL8L/NGUyEd5WfSGM=
github.com/libp2p/go-libp2p-circuit v0.2.3/go.mod h1:nkG3iE01tR3FoQ2nMm06IUrCpCyJp1Eo4A1xYdpjfs4=
github.com/libp2p/go-libp2p-connmgr v0.1.1/go.mod h1:wZxh8veAmU5qdrfJ0ZBLcU8oJe9L82ciVP/fl1VHjXk=
github.com/libp2p/go-libp2p-connmgr v0.2.3/go.mod h1:Gqjg29zI8CwXX21zRxy6gOg8VYu3zVerJRt2KyktzH4=
github.com/libp2p/go-libp2p-connmgr v0.2.4 h1:TMS0vc0TCBomtQJyWr7fYxcVYYhx+q/2gF++G5Jkl/w=
@ -777,6 +785,8 @@ github.com/libp2p/go-libp2p-core v0.5.5/go.mod h1:vj3awlOr9+GMZJFH9s4mpt9RHHgGqe
github.com/libp2p/go-libp2p-core v0.5.6/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo=
github.com/libp2p/go-libp2p-core v0.5.7 h1:QK3xRwFxqd0Xd9bSZL+8yZ8ncZZbl6Zngd/+Y+A6sgQ=
github.com/libp2p/go-libp2p-core v0.5.7/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo=
github.com/libp2p/go-libp2p-core v0.6.0 h1:u03qofNYTBN+yVg08PuAKylZogVf0xcTEeM8skGf+ak=
github.com/libp2p/go-libp2p-core v0.6.0/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo=
github.com/libp2p/go-libp2p-crypto v0.0.1/go.mod h1:yJkNyDmO341d5wwXxDUGO0LykUVT72ImHNUqh5D/dBE=
github.com/libp2p/go-libp2p-crypto v0.0.2/go.mod h1:eETI5OUfBnvARGOHrJz2eWNyTUxEGZnBxMcbUjfIj4I=
github.com/libp2p/go-libp2p-crypto v0.1.0 h1:k9MFy+o2zGDNGsaoZl0MA3iZ75qXxr9OOoAZF+sD5OQ=
@ -836,17 +846,17 @@ github.com/libp2p/go-libp2p-peerstore v0.2.2/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRj
github.com/libp2p/go-libp2p-peerstore v0.2.3/go.mod h1:K8ljLdFn590GMttg/luh4caB/3g0vKuY01psze0upRw=
github.com/libp2p/go-libp2p-peerstore v0.2.4 h1:jU9S4jYN30kdzTpDAR7SlHUD+meDUjTODh4waLWF1ws=
github.com/libp2p/go-libp2p-peerstore v0.2.4/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s=
github.com/libp2p/go-libp2p-peerstore v0.2.6 h1:2ACefBX23iMdJU9Ke+dcXt3w86MIryes9v7In4+Qq3U=
github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s=
github.com/libp2p/go-libp2p-pnet v0.2.0 h1:J6htxttBipJujEjz1y0a5+eYoiPcFHhSYHH6na5f0/k=
github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA=
github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1VZNHYcK8cLgFJLZ4s=
github.com/libp2p/go-libp2p-protocol v0.1.0/go.mod h1:KQPHpAabB57XQxGrXCNvbL6UEXfQqUgC/1adR2Xtflk=
github.com/libp2p/go-libp2p-pubsub v0.1.1/go.mod h1:ZwlKzRSe1eGvSIdU5bD7+8RZN/Uzw0t1Bp9R1znpR/Q=
github.com/libp2p/go-libp2p-pubsub v0.3.1/go.mod h1:TxPOBuo1FPdsTjFnv+FGZbNbWYsp74Culx+4ViQpato=
github.com/libp2p/go-libp2p-pubsub v0.3.2-0.20200527132641-c0712c6e92cf/go.mod h1:TxPOBuo1FPdsTjFnv+FGZbNbWYsp74Culx+4ViQpato=
github.com/libp2p/go-libp2p-pubsub v0.3.2 h1:k3cJm5JW5mjaWZkobS50sJLJWaB2mBi0HW4eRlE8mSo=
github.com/libp2p/go-libp2p-pubsub v0.3.2/go.mod h1:Uss7/Cfz872KggNb+doCVPHeCDmXB7z500m/R8DaAUk=
github.com/libp2p/go-libp2p-quic-transport v0.1.1/go.mod h1:wqG/jzhF3Pu2NrhJEvE+IE0NTHNXslOPn9JQzyCAxzU=
github.com/libp2p/go-libp2p-quic-transport v0.3.7/go.mod h1:Kr4aDtnfHHNeENn5J+sZIVc+t8HpQn9W6BOxhVGHbgI=
github.com/libp2p/go-libp2p-quic-transport v0.5.0 h1:BUN1lgYNUrtv4WLLQ5rQmC9MCJ6uEXusezGvYRNoJXE=
github.com/libp2p/go-libp2p-quic-transport v0.5.0/go.mod h1:IEcuC5MLxvZ5KuHKjRu+dr3LjCT1Be3rcD/4d8JrX8M=
github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q=
@ -872,8 +882,8 @@ github.com/libp2p/go-libp2p-swarm v0.2.1/go.mod h1:x07b4zkMFo2EvgPV2bMTlNmdQc8i+
github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaTNyBcHImCxRpPKU=
github.com/libp2p/go-libp2p-swarm v0.2.3/go.mod h1:P2VO/EpxRyDxtChXz/VPVXyTnszHvokHKRhfkEgFKNM=
github.com/libp2p/go-libp2p-swarm v0.2.4/go.mod h1:/xIpHFPPh3wmSthtxdGbkHZ0OET1h/GGZes8Wku/M5Y=
github.com/libp2p/go-libp2p-swarm v0.2.6 h1:UhMXIa+yCOALQyceENEIStMlbTCzOM6aWo6vw8QW17Q=
github.com/libp2p/go-libp2p-swarm v0.2.6/go.mod h1:F9hrkZjO7dDbcEiYii/fAB1QdpLuU6h1pa4P5VNsEgc=
github.com/libp2p/go-libp2p-swarm v0.2.7 h1:4lV/sf7f0NuVqunOpt1I11+Z54+xp+m0eeAvxj/LyRc=
github.com/libp2p/go-libp2p-swarm v0.2.7/go.mod h1:ZSJ0Q+oq/B1JgfPHJAT2HTall+xYRNYp1xs4S2FBWKA=
github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
@ -972,7 +982,6 @@ github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/h
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/lucas-clemente/quic-go v0.11.2/go.mod h1:PpMmPfPKO9nKJ/psF49ESTAGQSdfXxlg1otPbEB2nOw=
github.com/lucas-clemente/quic-go v0.15.7/go.mod h1:Myi1OyS0FOjL3not4BxT7KN29bRkcMUV5JVVFLKtDp8=
github.com/lucas-clemente/quic-go v0.16.0 h1:jJw36wfzGJhmOhAOaOC2lS36WgeqXQszH47A7spo1LI=
github.com/lucas-clemente/quic-go v0.16.0/go.mod h1:I0+fcNTdb9eS1ZcjQZbDVPGchJ86chcIxPALn9lEJqE=
github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg=
@ -1043,6 +1052,8 @@ github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc=
github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI=
github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA=
github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4=
github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM=
github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44=
github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44=
github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44=
@ -1072,6 +1083,8 @@ github.com/multiformats/go-multiaddr-net v0.1.5/go.mod h1:ilNnaM9HbmVFqsb/qcNysj
github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs=
github.com/multiformats/go-multibase v0.0.2 h1:2pAgScmS1g9XjH7EtAfNhTuyrWYEWcxy0G5Wo85hWDA=
github.com/multiformats/go-multibase v0.0.2/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs=
github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk=
github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc=
github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U=
github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po=
github.com/multiformats/go-multihash v0.0.7/go.mod h1:XuKXPp8VHcTygube3OWZC+aZrA+H1IhmjoCDtJc7PXM=
@ -1291,6 +1304,9 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
@ -1778,6 +1794,8 @@ gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=

View File

@ -215,6 +215,9 @@ type MiningBase struct {
}
func (m *Miner) GetBestMiningCandidate(ctx context.Context) (*MiningBase, error) {
m.lk.Lock()
defer m.lk.Unlock()
bts, err := m.api.ChainHead(ctx)
if err != nil {
return nil, err
@ -252,6 +255,12 @@ func (m *Miner) hasPower(ctx context.Context, addr address.Address, ts *types.Ti
return mpower.MinerPower.QualityAdjPower.GreaterThanEqual(power.ConsensusMinerMinPower), nil
}
// mineOne mines a single block, and does so synchronously, if and only if we
// have won the current round.
//
// {hint/landmark}: This method coordinates all the steps involved in mining a
// block, including the condition of whether mine or not at all depending on
// whether we win the round or not.
func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (*types.BlockMsg, error) {
log.Debugw("attempting to mine a block", "tipset", types.LogCids(base.TipSet.Cids()))
start := time.Now()

View File

@ -218,6 +218,7 @@ func Online() Option {
Override(new(dtypes.BootstrapPeers), modules.BuiltinBootstrap),
Override(new(dtypes.DrandBootstrap), modules.DrandBootstrap),
Override(new(dtypes.DrandConfig), modules.BuiltinDrandConfig),
Override(HandleIncomingMessagesKey, modules.HandleIncomingMessages),
@ -312,8 +313,12 @@ func Online() Option {
Override(new(gen.WinningPoStProver), storage.NewWinningPoStProver),
Override(new(*miner.Miner), modules.SetupBlockProducer),
Override(new(dtypes.AcceptingRetrievalDealsConfigFunc), modules.NewAcceptingRetrievalDealsConfigFunc),
Override(new(dtypes.SetAcceptingRetrievalDealsConfigFunc), modules.NewSetAcceptingRetrievalDealsConfigFunc),
Override(new(dtypes.AcceptingStorageDealsConfigFunc), modules.NewAcceptingStorageDealsConfigFunc),
Override(new(dtypes.SetAcceptingStorageDealsConfigFunc), modules.NewSetAcceptingStorageDealsConfigFunc),
Override(new(dtypes.StorageDealPieceCidBlocklistConfigFunc), modules.NewStorageDealPieceCidBlocklistConfigFunc),
Override(new(dtypes.SetStorageDealPieceCidBlocklistConfigFunc), modules.NewSetStorageDealPieceCidBlocklistConfigFunc),
),
)
}

View File

@ -4,6 +4,8 @@ import (
"encoding"
"time"
"github.com/ipfs/go-cid"
sectorstorage "github.com/filecoin-project/sector-storage"
)
@ -32,7 +34,9 @@ type StorageMiner struct {
}
type DealmakingConfig struct {
AcceptingStorageDeals bool
AcceptingStorageDeals bool
AcceptingRetrievalDeals bool
PieceCidBlocklist []cid.Cid
}
// API contains configs for API endpoint
@ -120,7 +124,9 @@ func DefaultStorageMiner() *StorageMiner {
},
Dealmaking: DealmakingConfig{
AcceptingStorageDeals: true,
AcceptingStorageDeals: true,
AcceptingRetrievalDeals: true,
PieceCidBlocklist: []cid.Cid{},
},
}
cfg.Common.API.ListenAddress = "/ip4/127.0.0.1/tcp/2345/http"

View File

@ -3,6 +3,7 @@ package client
import (
"context"
"errors"
"fmt"
"github.com/filecoin-project/go-fil-markets/pieceio"
basicnode "github.com/ipld/go-ipld-prime/node/basic"
@ -30,7 +31,7 @@ import (
"go.uber.org/fx"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
rm "github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/sector-storage/ffiwrapper"
"github.com/filecoin-project/specs-actors/actors/abi"
@ -58,8 +59,8 @@ type API struct {
paych.PaychAPI
SMDealClient storagemarket.StorageClient
RetDiscovery retrievalmarket.PeerResolver
Retrieval retrievalmarket.RetrievalClient
RetDiscovery rm.PeerResolver
Retrieval rm.RetrievalClient
Chain *store.ChainStore
LocalDAG dtypes.ClientDAG
@ -201,25 +202,51 @@ func (a *API) ClientFindData(ctx context.Context, root cid.Cid) ([]api.QueryOffe
out := make([]api.QueryOffer, len(peers))
for k, p := range peers {
queryResponse, err := a.Retrieval.Query(ctx, p, root, retrievalmarket.QueryParams{})
if err != nil {
out[k] = api.QueryOffer{Err: err.Error(), Miner: p.Address, MinerPeerID: p.ID}
} else {
out[k] = api.QueryOffer{
Root: root,
Size: queryResponse.Size,
MinPrice: queryResponse.PieceRetrievalPrice(),
PaymentInterval: queryResponse.MaxPaymentInterval,
PaymentIntervalIncrease: queryResponse.MaxPaymentIntervalIncrease,
Miner: queryResponse.PaymentAddress, // TODO: check
MinerPeerID: p.ID,
}
}
out[k] = a.makeRetrievalQuery(ctx, p, root, rm.QueryParams{})
}
return out, nil
}
func (a *API) ClientMinerQueryOffer(ctx context.Context, payload cid.Cid, miner address.Address) (api.QueryOffer, error) {
mi, err := a.StateMinerInfo(ctx, miner, types.EmptyTSK)
if err != nil {
return api.QueryOffer{}, err
}
rp := rm.RetrievalPeer{
Address: miner,
ID: mi.PeerId,
}
return a.makeRetrievalQuery(ctx, rp, payload, rm.QueryParams{}), nil
}
func (a *API) makeRetrievalQuery(ctx context.Context, rp rm.RetrievalPeer, payload cid.Cid, qp rm.QueryParams) api.QueryOffer {
queryResponse, err := a.Retrieval.Query(ctx, rp, payload, qp)
if err != nil {
return api.QueryOffer{Err: err.Error(), Miner: rp.Address, MinerPeerID: rp.ID}
}
var errStr string
switch queryResponse.Status {
case rm.QueryResponseAvailable:
errStr = ""
case rm.QueryResponseUnavailable:
errStr = fmt.Sprintf("retrieval query offer was unavailable: %s", queryResponse.Message)
case rm.QueryResponseError:
errStr = fmt.Sprintf("retrieval query offer errored: %s", queryResponse.Message)
}
return api.QueryOffer{
Root: payload,
Size: queryResponse.Size,
MinPrice: queryResponse.PieceRetrievalPrice(),
PaymentInterval: queryResponse.MaxPaymentInterval,
PaymentIntervalIncrease: queryResponse.MaxPaymentIntervalIncrease,
Miner: queryResponse.PaymentAddress, // TODO: check
MinerPeerID: rp.ID,
Err: errStr,
}
}
func (a *API) ClientImport(ctx context.Context, ref api.FileRef) (cid.Cid, error) {
bufferedDS := ipld.NewBufferedDAG(ctx, a.LocalDAG)
@ -318,13 +345,35 @@ func (a *API) ClientRetrieve(ctx context.Context, order api.RetrievalOrder, ref
retrievalResult := make(chan error, 1)
unsubscribe := a.Retrieval.SubscribeToEvents(func(event retrievalmarket.ClientEvent, state retrievalmarket.ClientDealState) {
unsubscribe := a.Retrieval.SubscribeToEvents(func(event rm.ClientEvent, state rm.ClientDealState) {
if state.PayloadCID.Equals(order.Root) {
switch state.Status {
case retrievalmarket.DealStatusFailed, retrievalmarket.DealStatusErrored:
retrievalResult <- xerrors.Errorf("Retrieval Error: %s", state.Message)
case retrievalmarket.DealStatusCompleted:
case rm.DealStatusCompleted:
retrievalResult <- nil
case rm.DealStatusRejected:
retrievalResult <- xerrors.Errorf("Retrieval Proposal Rejected: %s", state.Message)
case
rm.DealStatusDealNotFound,
rm.DealStatusErrored,
rm.DealStatusFailed:
retrievalResult <- xerrors.Errorf("Retrieval Error: %s", state.Message)
case
rm.DealStatusAccepted,
rm.DealStatusAwaitingAcceptance,
rm.DealStatusBlocksComplete,
rm.DealStatusFinalizing,
rm.DealStatusFundsNeeded,
rm.DealStatusFundsNeededLastPayment,
rm.DealStatusNew,
rm.DealStatusOngoing,
rm.DealStatusPaymentChannelAddingFunds,
rm.DealStatusPaymentChannelAllocatingLane,
rm.DealStatusPaymentChannelCreating,
rm.DealStatusPaymentChannelReady,
rm.DealStatusVerified:
return
default:
retrievalResult <- xerrors.Errorf("Unhandled Retrieval Status: %+v", state.Status)
}
}
})
@ -334,7 +383,7 @@ func (a *API) ClientRetrieve(ctx context.Context, order api.RetrievalOrder, ref
_, err := a.Retrieval.Retrieve(
ctx,
order.Root,
retrievalmarket.NewParamsV0(ppb, order.PaymentInterval, order.PaymentIntervalIncrease),
rm.NewParamsV0(ppb, order.PaymentInterval, order.PaymentIntervalIncrease),
order.Total,
order.MinerPeerID,
order.Client,

View File

@ -139,4 +139,8 @@ func (a *CommonAPI) Shutdown(ctx context.Context) error {
return nil
}
func (a *CommonAPI) Closing(ctx context.Context) (<-chan struct{}, error) {
return make(chan struct{}), nil // relies on jsonrpc closing
}
var _ api.Common = &CommonAPI{}

View File

@ -425,7 +425,7 @@ func (a *StateAPI) StateMarketParticipants(ctx context.Context, tsk types.TipSet
if err != nil {
return nil, err
}
locked, err := hamt.LoadNode(ctx, cst, state.EscrowTable, hamt.UseTreeBitWidth(5))
locked, err := hamt.LoadNode(ctx, cst, state.LockedTable, hamt.UseTreeBitWidth(5))
if err != nil {
return nil, err
}
@ -489,13 +489,11 @@ func (a *StateAPI) StateMarketDeals(ctx context.Context, tsk types.TipSetKey) (m
var s market.DealState
if err := sa.Get(ctx, i, &s); err != nil {
if err != nil {
if _, ok := err.(*amt.ErrNotFound); !ok {
return xerrors.Errorf("failed to get state for deal in proposals array: %w", err)
}
s.SectorStartEpoch = -1
if _, ok := err.(*amt.ErrNotFound); !ok {
return xerrors.Errorf("failed to get state for deal in proposals array: %w", err)
}
s.SectorStartEpoch = -1
}
out[strconv.FormatInt(int64(i), 10)] = api.MarketDeal{
Proposal: d,

View File

@ -43,7 +43,10 @@ type StorageMinerAPI struct {
StorageMgr *sectorstorage.Manager `optional:"true"`
*stores.Index
SetAcceptingStorageDealsConfigFunc dtypes.SetAcceptingStorageDealsConfigFunc
SetAcceptingStorageDealsConfigFunc dtypes.SetAcceptingStorageDealsConfigFunc
SetAcceptingRetrievalDealsConfigFunc dtypes.SetAcceptingRetrievalDealsConfigFunc
StorageDealPieceCidBlocklistConfigFunc dtypes.StorageDealPieceCidBlocklistConfigFunc
SetStorageDealPieceCidBlocklistConfigFunc dtypes.SetStorageDealPieceCidBlocklistConfigFunc
}
func (sm *StorageMinerAPI) ServeRemote(w http.ResponseWriter, r *http.Request) {
@ -172,6 +175,10 @@ func (sm *StorageMinerAPI) SectorsUpdate(ctx context.Context, id abi.SectorNumbe
return sm.Miner.ForceSectorState(ctx, id, sealing.SectorState(state))
}
func (sm *StorageMinerAPI) SectorRemove(ctx context.Context, id abi.SectorNumber) error {
return sm.Miner.RemoveSector(ctx, id)
}
func (sm *StorageMinerAPI) WorkerConnect(ctx context.Context, url string) error {
w, err := connectRemoteWorker(ctx, sm, url)
if err != nil {
@ -201,8 +208,17 @@ func (sm *StorageMinerAPI) MarketListIncompleteDeals(ctx context.Context) ([]sto
return sm.StorageProvider.ListLocalDeals()
}
func (sm *StorageMinerAPI) MarketSetPrice(ctx context.Context, p types.BigInt) error {
return sm.StorageProvider.SetAsk(p, 60*60*24*100) // lasts for 100 days?
func (sm *StorageMinerAPI) MarketSetAsk(ctx context.Context, price types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error {
options := []storagemarket.StorageAskOption{
storagemarket.MinPieceSize(minPieceSize),
storagemarket.MaxPieceSize(maxPieceSize),
}
return sm.StorageProvider.SetAsk(price, duration, options...)
}
func (sm *StorageMinerAPI) MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error) {
return sm.StorageProvider.GetAsk(), nil
}
func (sm *StorageMinerAPI) DealsList(ctx context.Context) ([]storagemarket.StorageDeal, error) {
@ -213,6 +229,10 @@ func (sm *StorageMinerAPI) DealsSetAcceptingStorageDeals(ctx context.Context, b
return sm.SetAcceptingStorageDealsConfigFunc(b)
}
func (sm *StorageMinerAPI) DealsSetAcceptingRetrievalDeals(ctx context.Context, b bool) error {
return sm.SetAcceptingRetrievalDealsConfigFunc(b)
}
func (sm *StorageMinerAPI) DealsImportData(ctx context.Context, deal cid.Cid, fname string) error {
fi, err := os.Open(fname)
if err != nil {
@ -223,6 +243,14 @@ func (sm *StorageMinerAPI) DealsImportData(ctx context.Context, deal cid.Cid, fn
return sm.StorageProvider.ImportDataForDeal(ctx, deal, fi)
}
func (sm *StorageMinerAPI) DealsPieceCidBlocklist(ctx context.Context) ([]cid.Cid, error) {
return sm.StorageDealPieceCidBlocklistConfigFunc()
}
func (sm *StorageMinerAPI) DealsSetPieceCidBlocklist(ctx context.Context, cids []cid.Cid) error {
return sm.SetStorageDealPieceCidBlocklistConfigFunc(cids)
}
func (sm *StorageMinerAPI) StorageAddLocal(ctx context.Context, path string) error {
if sm.StorageMgr == nil {
return xerrors.Errorf("no storage manager")

View File

@ -0,0 +1,6 @@
package dtypes
type DrandConfig struct {
Servers []string
ChainInfoJSON string
}

View File

@ -1,6 +1,8 @@
package dtypes
import (
"github.com/ipfs/go-cid"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/specs-actors/actors/abi"
)
@ -8,10 +10,27 @@ import (
type MinerAddress address.Address
type MinerID abi.ActorID
// AcceptingStorageDealsFunc is a function which reads from miner config to
// determine if the user has disabled storage deals (or not).
// AcceptingStorageDealsConfigFunc is a function which reads from miner config
// to determine if the user has disabled storage deals (or not).
type AcceptingStorageDealsConfigFunc func() (bool, error)
// SetAcceptingStorageDealsFunc is a function which is used to disable or enable
// storage deal acceptance.
// SetAcceptingStorageDealsConfigFunc is a function which is used to disable or
// enable storage deal acceptance.
type SetAcceptingStorageDealsConfigFunc func(bool) error
// AcceptingRetrievalDealsConfigFunc is a function which reads from miner config
// to determine if the user has disabled retrieval acceptance (or not).
type AcceptingRetrievalDealsConfigFunc func() (bool, error)
// SetAcceptingRetrievalDealsConfigFunc is a function which is used to disable
// or enable retrieval deal acceptance.
type SetAcceptingRetrievalDealsConfigFunc func(bool) error
// StorageDealPieceCidBlocklistConfigFunc is a function which reads from miner config
// to obtain a list of CIDs for which the storage miner will not accept storage
// proposals.
type StorageDealPieceCidBlocklistConfigFunc func() ([]cid.Cid, error)
// SetStorageDealPieceCidBlocklistConfigFunc is a function which is used to set a
// list of CIDs for which the storage miner will reject deal proposals.
type SetStorageDealPieceCidBlocklistConfigFunc func([]cid.Cid) error

View File

@ -44,13 +44,14 @@ type GossipIn struct {
Db dtypes.DrandBootstrap
Cfg *config.Pubsub
Sk *dtypes.ScoreKeeper
Dr dtypes.DrandConfig
}
func getDrandTopic() (string, error) {
func getDrandTopic(chainInfoJSON string) (string, error) {
var drandInfo = struct {
Hash string `json:"hash"`
}{}
err := json.Unmarshal([]byte(build.DrandChain), &drandInfo)
err := json.Unmarshal([]byte(chainInfoJSON), &drandInfo)
if err != nil {
return "", xerrors.Errorf("could not unmarshal drand chain info: %w", err)
}
@ -68,7 +69,7 @@ func GossipSub(in GossipIn) (service *pubsub.PubSub, err error) {
}
isBootstrapNode := in.Cfg.Bootstrapper
drandTopic, err := getDrandTopic()
drandTopic, err := getDrandTopic(in.Dr.ChainInfoJSON)
if err != nil {
return nil, err
}

View File

@ -108,8 +108,13 @@ func RetrievalResolver(l *discovery.Local) retrievalmarket.PeerResolver {
type RandomBeaconParams struct {
fx.In
PubSub *pubsub.PubSub `optional:"true"`
Cs *store.ChainStore
PubSub *pubsub.PubSub `optional:"true"`
Cs *store.ChainStore
DrandConfig dtypes.DrandConfig
}
func BuiltinDrandConfig() dtypes.DrandConfig {
return build.DrandConfig
}
func RandomBeacon(p RandomBeaconParams, _ dtypes.AfterGenesisSet) (beacon.RandomBeacon, error) {
@ -119,5 +124,5 @@ func RandomBeacon(p RandomBeaconParams, _ dtypes.AfterGenesisSet) (beacon.Random
}
//return beacon.NewMockBeacon(build.BlockDelay * time.Second)
return drand.NewDrandBeacon(gen.Timestamp, build.BlockDelay, p.PubSub)
return drand.NewDrandBeacon(gen.Timestamp, build.BlockDelay, p.PubSub, p.DrandConfig)
}

View File

@ -3,11 +3,13 @@ package modules
import (
"context"
"errors"
"fmt"
"net/http"
"github.com/ipfs/go-bitswap"
"github.com/ipfs/go-bitswap/network"
"github.com/ipfs/go-blockservice"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/namespace"
graphsync "github.com/ipfs/go-graphsync/impl"
@ -73,6 +75,12 @@ func GetParams(sbc *ffiwrapper.Config) error {
return err
}
// If built-in assets are disabled, we expect the user to have placed the right
// parameters in the right location on the filesystem (/var/tmp/filecoin-proof-parameters).
if build.DisableBuiltinAssets {
return nil
}
if err := paramfetch.GetParams(context.TODO(), build.ParametersJSON(), uint64(ssize)); err != nil {
return xerrors.Errorf("fetching proof parameters: %w", err)
}
@ -307,7 +315,7 @@ func NewStorageAsk(ctx helpers.MetricsCtx, fapi lapi.FullNode, ds dtypes.Metadat
return storedAsk, nil
}
func StorageProvider(minerAddress dtypes.MinerAddress, ffiConfig *ffiwrapper.Config, storedAsk *storedask.StoredAsk, h host.Host, ds dtypes.MetadataDS, ibs dtypes.StagingBlockstore, r repo.LockedRepo, pieceStore dtypes.ProviderPieceStore, dataTransfer dtypes.ProviderDataTransfer, spn storagemarket.StorageProviderNode, isAcceptingFunc dtypes.AcceptingStorageDealsConfigFunc) (storagemarket.StorageProvider, error) {
func StorageProvider(minerAddress dtypes.MinerAddress, ffiConfig *ffiwrapper.Config, storedAsk *storedask.StoredAsk, h host.Host, ds dtypes.MetadataDS, ibs dtypes.StagingBlockstore, r repo.LockedRepo, pieceStore dtypes.ProviderPieceStore, dataTransfer dtypes.ProviderDataTransfer, spn storagemarket.StorageProviderNode, isAcceptingFunc dtypes.AcceptingStorageDealsConfigFunc, blocklistFunc dtypes.StorageDealPieceCidBlocklistConfigFunc) (storagemarket.StorageProvider, error) {
net := smnet.NewFromLibp2pHost(h)
store, err := piecefilestore.NewLocalFileStore(piecefilestore.OsPath(r.Path()))
if err != nil {
@ -325,6 +333,18 @@ func StorageProvider(minerAddress dtypes.MinerAddress, ffiConfig *ffiwrapper.Con
return false, "miner is not accepting storage deals", nil
}
blocklist, err := blocklistFunc()
if err != nil {
return false, "miner error", err
}
for idx := range blocklist {
if deal.Proposal.PieceCID.Equals(blocklist[idx]) {
log.Warnf("piece CID in proposal %s is blocklisted; rejecting storage deal proposal from client: %s", deal.Proposal.PieceCID, deal.Client.String())
return false, fmt.Sprintf("miner has blocklisted piece CID %s", deal.Proposal.PieceCID), nil
}
}
return true, "", nil
})
@ -337,14 +357,31 @@ func StorageProvider(minerAddress dtypes.MinerAddress, ffiConfig *ffiwrapper.Con
}
// RetrievalProvider creates a new retrieval provider attached to the provider blockstore
func RetrievalProvider(h host.Host, miner *storage.Miner, sealer sectorstorage.SectorManager, full lapi.FullNode, ds dtypes.MetadataDS, pieceStore dtypes.ProviderPieceStore, ibs dtypes.StagingBlockstore) (retrievalmarket.RetrievalProvider, error) {
func RetrievalProvider(h host.Host, miner *storage.Miner, sealer sectorstorage.SectorManager, full lapi.FullNode, ds dtypes.MetadataDS, pieceStore dtypes.ProviderPieceStore, ibs dtypes.StagingBlockstore, isAcceptingFunc dtypes.AcceptingRetrievalDealsConfigFunc) (retrievalmarket.RetrievalProvider, error) {
adapter := retrievaladapter.NewRetrievalProviderNode(miner, sealer, full)
address, err := minerAddrFromDS(ds)
maddr, err := minerAddrFromDS(ds)
if err != nil {
return nil, err
}
network := rmnet.NewFromLibp2pHost(h)
return retrievalimpl.NewProvider(address, adapter, network, pieceStore, ibs, namespace.Wrap(ds, datastore.NewKey("/retrievals/provider")))
netwk := rmnet.NewFromLibp2pHost(h)
opt := retrievalimpl.DealDeciderOpt(func(ctx context.Context, state retrievalmarket.ProviderDealState) (bool, string, error) {
b, err := isAcceptingFunc()
if err != nil {
return false, "miner error", err
}
if !b {
log.Warn("retrieval deal acceptance disabled; rejecting retrieval deal proposal from client")
return false, "miner is not accepting retrieval deals", nil
}
return true, "", nil
})
return retrievalimpl.NewProvider(maddr, adapter, netwk, pieceStore, ibs, namespace.Wrap(ds, datastore.NewKey("/retrievals/provider")), opt)
}
func SectorStorage(mctx helpers.MetricsCtx, lc fx.Lifecycle, ls stores.LocalStorage, si stores.SectorIndex, cfg *ffiwrapper.Config, sc sectorstorage.SealerConfig, urls sectorstorage.URLs, sa sectorstorage.StorageAuth) (*sectorstorage.Manager, error) {
@ -379,36 +416,88 @@ func StorageAuth(ctx helpers.MetricsCtx, ca lapi.Common) (sectorstorage.StorageA
return sectorstorage.StorageAuth(headers), nil
}
func NewAcceptingRetrievalDealsConfigFunc(r repo.LockedRepo) (dtypes.AcceptingRetrievalDealsConfigFunc, error) {
return func() (out bool, err error) {
err = readCfg(r, func(cfg *config.StorageMiner) {
out = cfg.Dealmaking.AcceptingRetrievalDeals
})
return
}, nil
}
func NewSetAcceptingRetrievalDealsConfigFunc(r repo.LockedRepo) (dtypes.SetAcceptingRetrievalDealsConfigFunc, error) {
return func(b bool) (err error) {
err = mutateCfg(r, func(cfg *config.StorageMiner) {
cfg.Dealmaking.AcceptingRetrievalDeals = b
})
return
}, nil
}
func NewAcceptingStorageDealsConfigFunc(r repo.LockedRepo) (dtypes.AcceptingStorageDealsConfigFunc, error) {
return func() (bool, error) {
raw, err := r.Config()
if err != nil {
return false, err
}
cfg, ok := raw.(*config.StorageMiner)
if !ok {
return false, xerrors.New("expected address of config.StorageMiner")
}
return cfg.Dealmaking.AcceptingStorageDeals, nil
return func() (out bool, err error) {
err = readCfg(r, func(cfg *config.StorageMiner) {
out = cfg.Dealmaking.AcceptingStorageDeals
})
return
}, nil
}
func NewSetAcceptingStorageDealsConfigFunc(r repo.LockedRepo) (dtypes.SetAcceptingStorageDealsConfigFunc, error) {
return func(b bool) error {
var typeErr error
setConfigErr := r.SetConfig(func(raw interface{}) {
cfg, ok := raw.(*config.StorageMiner)
if !ok {
typeErr = errors.New("expected storage miner config")
return
}
return func(b bool) (err error) {
err = mutateCfg(r, func(cfg *config.StorageMiner) {
cfg.Dealmaking.AcceptingStorageDeals = b
})
return multierr.Combine(typeErr, setConfigErr)
return
}, nil
}
func NewStorageDealPieceCidBlocklistConfigFunc(r repo.LockedRepo) (dtypes.StorageDealPieceCidBlocklistConfigFunc, error) {
return func() (out []cid.Cid, err error) {
err = readCfg(r, func(cfg *config.StorageMiner) {
out = cfg.Dealmaking.PieceCidBlocklist
})
return
}, nil
}
func NewSetStorageDealPieceCidBlocklistConfigFunc(r repo.LockedRepo) (dtypes.SetStorageDealPieceCidBlocklistConfigFunc, error) {
return func(blocklist []cid.Cid) (err error) {
err = mutateCfg(r, func(cfg *config.StorageMiner) {
cfg.Dealmaking.PieceCidBlocklist = blocklist
})
return
}, nil
}
func readCfg(r repo.LockedRepo, accessor func(*config.StorageMiner)) error {
raw, err := r.Config()
if err != nil {
return err
}
cfg, ok := raw.(*config.StorageMiner)
if !ok {
return xerrors.New("expected address of config.StorageMiner")
}
accessor(cfg)
return nil
}
func mutateCfg(r repo.LockedRepo, mutator func(*config.StorageMiner)) error {
var typeErr error
setConfigErr := r.SetConfig(func(raw interface{}) {
cfg, ok := raw.(*config.StorageMiner)
if !ok {
typeErr = errors.New("expected storage miner config")
return
}
mutator(cfg)
})
return multierr.Combine(typeErr, setConfigErr)
}

View File

@ -0,0 +1,15 @@
[Unit]
Description=Chainwatch
After=lotus-daemon.service
Requires=lotus-daemon.service
[Service]
Environment=GOLOG_FILE="/var/log/lotus/chainwatch.log"
Environment=GOLOG_LOG_FMT="json"
Environment=LOTUS_DB=""
Environment=LOTUS_PATH="%h/.lotus"
EnvironmentFile=-/etc/lotus/chainwatch.env
ExecStart=/usr/local/bin/chainwatch run
[Install]
WantedBy=multi-user.target

View File

@ -1,14 +1,14 @@
[Unit]
Description=Lotus Daemon
After=network-online.target
Wants=network-online.target
Requires=network-online.target
[Service]
Environment=GOLOG_FILE="/var/log/lotus-daemon"
Environment=GOLOG_FILE="/var/log/lotus/daemon.log"
Environment=GOLOG_LOG_FMT="json"
ExecStart=/usr/local/bin/lotus daemon
Restart=always
RestartSec=30
RestartSec=10
MemoryAccounting=true
MemoryHigh=8G

View File

@ -2,10 +2,11 @@
Description=Lotus Storage Miner
After=network.target
After=lotus-daemon.service
Requires=lotus-daemon.service
[Service]
ExecStart=/usr/local/bin/lotus-storage-miner run
Environment=GOLOG_FILE="/var/log/lotus-miner"
Environment=GOLOG_FILE="/var/log/lotus/miner.log"
Environment=GOLOG_LOG_FMT="json"
[Install]

View File

@ -39,3 +39,7 @@ func (m *Miner) PledgeSector() error {
func (m *Miner) ForceSectorState(ctx context.Context, id abi.SectorNumber, state sealing.SectorState) error {
return m.sealing.ForceSectorState(ctx, id, state)
}
func (m *Miner) RemoveSector(ctx context.Context, id abi.SectorNumber) error {
return m.sealing.Remove(ctx, id)
}

View File

@ -99,10 +99,6 @@ func (s *WindowPoStScheduler) checkSectors(ctx context.Context, check *abi.BitFi
log.Warnw("Checked sectors", "checked", len(tocheck), "good", len(sectors))
if len(sectors) == 0 { // nothing to recover
return nil, nil
}
sbf := bitfield.New()
for s := range sectors {
(&sbf).Set(uint64(s.Number))
@ -387,17 +383,14 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di miner.DeadlineInfo
return nil, xerrors.Errorf("get need prove sectors: %w", err)
}
var skipped *abi.BitField
{
good, err := s.checkSectors(ctx, nps)
if err != nil {
return nil, xerrors.Errorf("checking sectors to skip: %w", err)
}
good, err := s.checkSectors(ctx, nps)
if err != nil {
return nil, xerrors.Errorf("checking sectors to skip: %w", err)
}
skipped, err = bitfield.SubtractBitField(nps, good)
if err != nil {
return nil, xerrors.Errorf("nps - good: %w", err)
}
skipped, err := bitfield.SubtractBitField(nps, good)
if err != nil {
return nil, xerrors.Errorf("nps - good: %w", err)
}
skipCount, err := skipped.Count()
@ -405,7 +398,7 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di miner.DeadlineInfo
return nil, xerrors.Errorf("getting skipped sector count: %w", err)
}
ssi, err := s.sortedSectorInfo(ctx, nps, ts)
ssi, err := s.sortedSectorInfo(ctx, good, ts)
if err != nil {
return nil, xerrors.Errorf("getting sorted sector info: %w", err)
}