2021-07-23 11:55:50 +00:00
|
|
|
package config
|
|
|
|
|
|
|
|
import (
|
2022-09-13 21:41:06 +00:00
|
|
|
"time"
|
|
|
|
|
2022-09-13 17:05:48 +00:00
|
|
|
hraft "github.com/hashicorp/raft"
|
2022-06-14 15:00:51 +00:00
|
|
|
"github.com/ipfs/go-cid"
|
2022-09-13 17:05:48 +00:00
|
|
|
"github.com/libp2p/go-libp2p/core/peer"
|
2022-06-14 15:00:51 +00:00
|
|
|
|
2021-07-23 11:55:50 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/types"
|
2022-06-14 18:03:38 +00:00
|
|
|
"github.com/filecoin-project/lotus/storage/sealer"
|
2021-07-23 11:55:50 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// // NOTE: ONLY PUT STRUCT DEFINITIONS IN THIS FILE
|
2021-07-23 14:02:45 +00:00
|
|
|
// //
|
|
|
|
// // After making edits here, run 'make cfgdoc-gen' (or 'make gen')
|
2021-07-23 11:55:50 +00:00
|
|
|
|
|
|
|
// Common is common config between full node and miner
|
|
|
|
type Common struct {
|
2022-03-10 10:58:31 +00:00
|
|
|
API API
|
|
|
|
Backup Backup
|
|
|
|
Logging Logging
|
|
|
|
Libp2p Libp2p
|
|
|
|
Pubsub Pubsub
|
2021-07-23 11:55:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// FullNode is a full node config
|
|
|
|
type FullNode struct {
|
|
|
|
Common
|
|
|
|
Client Client
|
|
|
|
Wallet Wallet
|
|
|
|
Fees FeeConfig
|
|
|
|
Chainstore Chainstore
|
2022-09-13 17:05:48 +00:00
|
|
|
Raft ClusterRaftConfig
|
2021-07-23 11:55:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// // Common
|
|
|
|
|
|
|
|
type Backup struct {
|
2021-07-23 13:40:30 +00:00
|
|
|
// When set to true disables metadata log (.lotus/kvlog). This can save disk
|
|
|
|
// space by reducing metadata redundancy.
|
|
|
|
//
|
|
|
|
// Note that in case of metadata corruption it might be much harder to recover
|
|
|
|
// your node if metadata log is disabled
|
2021-07-23 11:55:50 +00:00
|
|
|
DisableMetadataLog bool
|
|
|
|
}
|
|
|
|
|
2022-03-10 10:58:31 +00:00
|
|
|
// Logging is the logging system config
|
|
|
|
type Logging struct {
|
|
|
|
// SubsystemLevels specify per-subsystem log levels
|
|
|
|
SubsystemLevels map[string]string
|
|
|
|
}
|
|
|
|
|
2021-07-23 11:55:50 +00:00
|
|
|
// StorageMiner is a miner config
|
|
|
|
type StorageMiner struct {
|
|
|
|
Common
|
|
|
|
|
2022-02-03 11:51:01 +00:00
|
|
|
Subsystems MinerSubsystemConfig
|
|
|
|
Dealmaking DealmakingConfig
|
|
|
|
IndexProvider IndexProviderConfig
|
2022-03-29 01:19:11 +00:00
|
|
|
Proving ProvingConfig
|
2022-02-03 11:51:01 +00:00
|
|
|
Sealing SealingConfig
|
2022-03-29 01:19:11 +00:00
|
|
|
Storage SealerConfig
|
2022-02-03 11:51:01 +00:00
|
|
|
Fees MinerFeeConfig
|
|
|
|
Addresses MinerAddressConfig
|
|
|
|
DAGStore DAGStoreConfig
|
integrate DAG store and CARv2 in deal-making (#6671)
This commit removes badger from the deal-making processes, and
moves to a new architecture with the dagstore as the cental
component on the miner-side, and CARv2s on the client-side.
Every deal that has been handed off to the sealing subsystem becomes
a shard in the dagstore. Shards are mounted via the LotusMount, which
teaches the dagstore how to load the related piece when serving
retrievals.
When the miner starts the Lotus for the first time with this patch,
we will perform a one-time migration of all active deals into the
dagstore. This is a lightweight process, and it consists simply
of registering the shards in the dagstore.
Shards are backed by the unsealed copy of the piece. This is currently
a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so
when it's time to acquire a shard to serve a retrieval, the unsealed
CARv1 is joined with its index (safeguarded by the dagstore), to form
a read-only blockstore, thus taking the place of the monolithic
badger.
Data transfers have been adjusted to interface directly with CARv2 files.
On inbound transfers (client retrievals, miner storage deals), we stream
the received data into a CARv2 ReadWrite blockstore. On outbound transfers
(client storage deals, miner retrievals), we serve the data off a CARv2
ReadOnly blockstore.
Client-side imports are managed by the refactored *imports.Manager
component (when not using IPFS integration). Just like it before, we use
the go-filestore library to avoid duplicating the data from the original
file in the resulting UnixFS DAG (concretely the leaves). However, the
target of those imports are what we call "ref-CARv2s": CARv2 files placed
under the `$LOTUS_PATH/imports` directory, containing the intermediate
nodes in full, and the leaves as positional references to the original file
on disk.
Client-side retrievals are placed into CARv2 files in the location:
`$LOTUS_PATH/retrievals`.
A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore`
subcommands have been introduced on the miner-side to inspect and manage
the dagstore.
Despite moving to a CARv2-backed system, the IPFS integration has been
respected, and it continues to be possible to make storage deals with data
held in an IPFS node, and to perform retrievals directly into an IPFS node.
NOTE: because the "staging" and "client" Badger blockstores are no longer
used, existing imports on the client will be rendered useless. On startup,
Lotus will enumerate all imports and print WARN statements on the log for
each import that needs to be reimported. These log lines contain these
messages:
- import lacks carv2 path; import will not work; please reimport
- import has missing/broken carv2; please reimport
At the end, we will print a "sanity check completed" message indicating
the count of imports found, and how many were deemed broken.
Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com>
Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
Co-authored-by: Raúl Kripalani <raul@protocol.ai>
Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type DAGStoreConfig struct {
|
|
|
|
// Path to the dagstore root directory. This directory contains three
|
|
|
|
// subdirectories, which can be symlinked to alternative locations if
|
|
|
|
// need be:
|
|
|
|
// - ./transients: caches unsealed deals that have been fetched from the
|
|
|
|
// storage subsystem for serving retrievals.
|
|
|
|
// - ./indices: stores shard indices.
|
|
|
|
// - ./datastore: holds the KV store tracking the state of every shard
|
|
|
|
// known to the DAG store.
|
|
|
|
// Default value: <LOTUS_MARKETS_PATH>/dagstore (split deployment) or
|
|
|
|
// <LOTUS_MINER_PATH>/dagstore (monolith deployment)
|
|
|
|
RootDir string
|
|
|
|
|
|
|
|
// The maximum amount of indexing jobs that can run simultaneously.
|
|
|
|
// 0 means unlimited.
|
|
|
|
// Default value: 5.
|
|
|
|
MaxConcurrentIndex int
|
|
|
|
|
|
|
|
// The maximum amount of unsealed deals that can be fetched simultaneously
|
|
|
|
// from the storage subsystem. 0 means unlimited.
|
|
|
|
// Default value: 0 (unlimited).
|
|
|
|
MaxConcurrentReadyFetches int
|
|
|
|
|
2022-01-13 18:26:13 +00:00
|
|
|
// The maximum amount of unseals that can be processed simultaneously
|
|
|
|
// from the storage subsystem. 0 means unlimited.
|
|
|
|
// Default value: 0 (unlimited).
|
|
|
|
MaxConcurrentUnseals int
|
|
|
|
|
integrate DAG store and CARv2 in deal-making (#6671)
This commit removes badger from the deal-making processes, and
moves to a new architecture with the dagstore as the cental
component on the miner-side, and CARv2s on the client-side.
Every deal that has been handed off to the sealing subsystem becomes
a shard in the dagstore. Shards are mounted via the LotusMount, which
teaches the dagstore how to load the related piece when serving
retrievals.
When the miner starts the Lotus for the first time with this patch,
we will perform a one-time migration of all active deals into the
dagstore. This is a lightweight process, and it consists simply
of registering the shards in the dagstore.
Shards are backed by the unsealed copy of the piece. This is currently
a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so
when it's time to acquire a shard to serve a retrieval, the unsealed
CARv1 is joined with its index (safeguarded by the dagstore), to form
a read-only blockstore, thus taking the place of the monolithic
badger.
Data transfers have been adjusted to interface directly with CARv2 files.
On inbound transfers (client retrievals, miner storage deals), we stream
the received data into a CARv2 ReadWrite blockstore. On outbound transfers
(client storage deals, miner retrievals), we serve the data off a CARv2
ReadOnly blockstore.
Client-side imports are managed by the refactored *imports.Manager
component (when not using IPFS integration). Just like it before, we use
the go-filestore library to avoid duplicating the data from the original
file in the resulting UnixFS DAG (concretely the leaves). However, the
target of those imports are what we call "ref-CARv2s": CARv2 files placed
under the `$LOTUS_PATH/imports` directory, containing the intermediate
nodes in full, and the leaves as positional references to the original file
on disk.
Client-side retrievals are placed into CARv2 files in the location:
`$LOTUS_PATH/retrievals`.
A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore`
subcommands have been introduced on the miner-side to inspect and manage
the dagstore.
Despite moving to a CARv2-backed system, the IPFS integration has been
respected, and it continues to be possible to make storage deals with data
held in an IPFS node, and to perform retrievals directly into an IPFS node.
NOTE: because the "staging" and "client" Badger blockstores are no longer
used, existing imports on the client will be rendered useless. On startup,
Lotus will enumerate all imports and print WARN statements on the log for
each import that needs to be reimported. These log lines contain these
messages:
- import lacks carv2 path; import will not work; please reimport
- import has missing/broken carv2; please reimport
At the end, we will print a "sanity check completed" message indicating
the count of imports found, and how many were deemed broken.
Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com>
Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
Co-authored-by: Raúl Kripalani <raul@protocol.ai>
Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
|
|
|
// The maximum number of simultaneous inflight API calls to the storage
|
|
|
|
// subsystem.
|
|
|
|
// Default value: 100.
|
|
|
|
MaxConcurrencyStorageCalls int
|
|
|
|
|
|
|
|
// The time between calls to periodic dagstore GC, in time.Duration string
|
|
|
|
// representation, e.g. 1m, 5m, 1h.
|
|
|
|
// Default value: 1 minute.
|
|
|
|
GCInterval Duration
|
2021-07-23 11:55:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type MinerSubsystemConfig struct {
|
|
|
|
EnableMining bool
|
|
|
|
EnableSealing bool
|
|
|
|
EnableSectorStorage bool
|
|
|
|
EnableMarkets bool
|
|
|
|
|
|
|
|
SealerApiInfo string // if EnableSealing == false
|
|
|
|
SectorIndexApiInfo string // if EnableSectorStorage == false
|
|
|
|
}
|
|
|
|
|
|
|
|
type DealmakingConfig struct {
|
2021-07-23 13:40:30 +00:00
|
|
|
// When enabled, the miner can accept online deals
|
|
|
|
ConsiderOnlineStorageDeals bool
|
|
|
|
// When enabled, the miner can accept offline deals
|
|
|
|
ConsiderOfflineStorageDeals bool
|
|
|
|
// When enabled, the miner can accept retrieval deals
|
|
|
|
ConsiderOnlineRetrievalDeals bool
|
|
|
|
// When enabled, the miner can accept offline retrieval deals
|
|
|
|
ConsiderOfflineRetrievalDeals bool
|
|
|
|
// When enabled, the miner can accept verified deals
|
|
|
|
ConsiderVerifiedStorageDeals bool
|
|
|
|
// When enabled, the miner can accept unverified deals
|
2021-07-23 11:55:50 +00:00
|
|
|
ConsiderUnverifiedStorageDeals bool
|
2021-07-23 13:40:30 +00:00
|
|
|
// A list of Data CIDs to reject when making deals
|
|
|
|
PieceCidBlocklist []cid.Cid
|
|
|
|
// Maximum expected amount of time getting the deal into a sealed sector will take
|
|
|
|
// This includes the time the deal will need to get transferred and published
|
|
|
|
// before being assigned to a sector
|
|
|
|
ExpectedSealDuration Duration
|
2021-07-23 11:55:50 +00:00
|
|
|
// Maximum amount of time proposed deal StartEpoch can be in future
|
|
|
|
MaxDealStartDelay Duration
|
2021-07-23 13:40:30 +00:00
|
|
|
// When a deal is ready to publish, the amount of time to wait for more
|
|
|
|
// deals to be ready to publish before publishing them all as a batch
|
2021-07-23 11:55:50 +00:00
|
|
|
PublishMsgPeriod Duration
|
|
|
|
// The maximum number of deals to include in a single PublishStorageDeals
|
|
|
|
// message
|
|
|
|
MaxDealsPerPublishMsg uint64
|
|
|
|
// The maximum collateral that the provider will put up against a deal,
|
|
|
|
// as a multiplier of the minimum collateral bound
|
|
|
|
MaxProviderCollateralMultiplier uint64
|
2021-09-06 13:52:25 +00:00
|
|
|
// The maximum allowed disk usage size in bytes of staging deals not yet
|
2021-09-06 15:07:19 +00:00
|
|
|
// passed to the sealing node by the markets service. 0 is unlimited.
|
2021-09-06 13:52:25 +00:00
|
|
|
MaxStagingDealsBytes int64
|
2021-09-30 01:49:59 +00:00
|
|
|
// The maximum number of parallel online data transfers for storage deals
|
|
|
|
SimultaneousTransfersForStorage uint64
|
2021-10-28 11:39:57 +00:00
|
|
|
// The maximum number of simultaneous data transfers from any single client
|
|
|
|
// for storage deals.
|
|
|
|
// Unset by default (0), and values higher than SimultaneousTransfersForStorage
|
|
|
|
// will have no effect; i.e. the total number of simultaneous data transfers
|
|
|
|
// across all storage clients is bound by SimultaneousTransfersForStorage
|
|
|
|
// regardless of this number.
|
|
|
|
SimultaneousTransfersForStoragePerClient uint64
|
2021-09-30 01:49:59 +00:00
|
|
|
// The maximum number of parallel online data transfers for retrieval deals
|
|
|
|
SimultaneousTransfersForRetrieval uint64
|
2021-09-30 12:35:23 +00:00
|
|
|
// Minimum start epoch buffer to give time for sealing of sector with deal.
|
|
|
|
StartEpochSealingBuffer uint64
|
2021-07-23 11:55:50 +00:00
|
|
|
|
2021-07-23 13:40:30 +00:00
|
|
|
// A command used for fine-grained evaluation of storage deals
|
|
|
|
// see https://docs.filecoin.io/mine/lotus/miner-configuration/#using-filters-for-fine-grained-storage-and-retrieval-deal-acceptance for more details
|
|
|
|
Filter string
|
|
|
|
// A command used for fine-grained evaluation of retrieval deals
|
|
|
|
// see https://docs.filecoin.io/mine/lotus/miner-configuration/#using-filters-for-fine-grained-storage-and-retrieval-deal-acceptance for more details
|
2021-07-23 11:55:50 +00:00
|
|
|
RetrievalFilter string
|
|
|
|
|
|
|
|
RetrievalPricing *RetrievalPricing
|
|
|
|
}
|
|
|
|
|
2022-02-03 11:51:01 +00:00
|
|
|
type IndexProviderConfig struct {
|
2022-03-02 13:45:09 +00:00
|
|
|
|
|
|
|
// Enable set whether to enable indexing announcement to the network and expose endpoints that
|
2022-04-13 07:25:33 +00:00
|
|
|
// allow indexer nodes to process announcements. Enabled by default.
|
2022-03-02 13:45:09 +00:00
|
|
|
Enable bool
|
|
|
|
|
|
|
|
// EntriesCacheCapacity sets the maximum capacity to use for caching the indexing advertisement
|
|
|
|
// entries. Defaults to 1024 if not specified. The cache is evicted using LRU policy. The
|
|
|
|
// maximum storage used by the cache is a factor of EntriesCacheCapacity, EntriesChunkSize and
|
|
|
|
// the length of multihashes being advertised. For example, advertising 128-bit long multihashes
|
|
|
|
// with the default EntriesCacheCapacity, and EntriesChunkSize means the cache size can grow to
|
|
|
|
// 256MiB when full.
|
|
|
|
EntriesCacheCapacity int
|
|
|
|
|
|
|
|
// EntriesChunkSize sets the maximum number of multihashes to include in a single entries chunk.
|
|
|
|
// Defaults to 16384 if not specified. Note that chunks are chained together for indexing
|
|
|
|
// advertisements that include more multihashes than the configured EntriesChunkSize.
|
|
|
|
EntriesChunkSize int
|
|
|
|
|
|
|
|
// TopicName sets the topic name on which the changes to the advertised content are announced.
|
2022-04-21 11:13:34 +00:00
|
|
|
// If not explicitly specified, the topic name is automatically inferred from the network name
|
|
|
|
// in following format: '/indexer/ingest/<network-name>'
|
|
|
|
// Defaults to empty, which implies the topic name is inferred from network name.
|
2022-03-02 13:45:09 +00:00
|
|
|
TopicName string
|
|
|
|
|
|
|
|
// PurgeCacheOnStart sets whether to clear any cached entries chunks when the provider engine
|
|
|
|
// starts. By default, the cache is rehydrated from previously cached entries stored in
|
|
|
|
// datastore if any is present.
|
|
|
|
PurgeCacheOnStart bool
|
2021-11-10 16:28:23 +00:00
|
|
|
}
|
|
|
|
|
2021-07-23 11:55:50 +00:00
|
|
|
type RetrievalPricing struct {
|
|
|
|
Strategy string // possible values: "default", "external"
|
|
|
|
|
|
|
|
Default *RetrievalPricingDefault
|
|
|
|
External *RetrievalPricingExternal
|
|
|
|
}
|
|
|
|
|
|
|
|
type RetrievalPricingExternal struct {
|
|
|
|
// Path of the external script that will be run to price a retrieval deal.
|
|
|
|
// This parameter is ONLY applicable if the retrieval pricing policy strategy has been configured to "external".
|
|
|
|
Path string
|
|
|
|
}
|
|
|
|
|
|
|
|
type RetrievalPricingDefault struct {
|
|
|
|
// VerifiedDealsFreeTransfer configures zero fees for data transfer for a retrieval deal
|
|
|
|
// of a payloadCid that belongs to a verified storage deal.
|
|
|
|
// This parameter is ONLY applicable if the retrieval pricing policy strategy has been configured to "default".
|
|
|
|
// default value is true
|
|
|
|
VerifiedDealsFreeTransfer bool
|
|
|
|
}
|
|
|
|
|
2022-03-29 01:19:11 +00:00
|
|
|
type ProvingConfig struct {
|
|
|
|
// Maximum number of sector checks to run in parallel. (0 = unlimited)
|
2022-07-01 19:24:54 +00:00
|
|
|
//
|
|
|
|
// WARNING: Setting this value too high may make the node crash by running out of stack
|
|
|
|
// WARNING: Setting this value too low may make sector challenge reading much slower, resulting in failed PoSt due
|
|
|
|
// to late submission.
|
2022-07-01 20:20:05 +00:00
|
|
|
//
|
|
|
|
// After changing this option, confirm that the new value works in your setup by invoking
|
|
|
|
// 'lotus-miner proving compute window-post 0'
|
2022-03-29 01:19:11 +00:00
|
|
|
ParallelCheckLimit int
|
|
|
|
|
2022-07-01 19:24:54 +00:00
|
|
|
// Disable Window PoSt computation on the lotus-miner process even if no window PoSt workers are present.
|
|
|
|
//
|
|
|
|
// WARNING: If no windowPoSt workers are connected, window PoSt WILL FAIL resulting in faulty sectors which will need
|
|
|
|
// to be recovered. Before enabling this option, make sure your PoSt workers work correctly.
|
2022-07-01 20:20:05 +00:00
|
|
|
//
|
|
|
|
// After changing this option, confirm that the new value works in your setup by invoking
|
|
|
|
// 'lotus-miner proving compute window-post 0'
|
2022-07-01 19:24:54 +00:00
|
|
|
DisableBuiltinWindowPoSt bool
|
|
|
|
|
|
|
|
// Disable Winning PoSt computation on the lotus-miner process even if no winning PoSt workers are present.
|
|
|
|
//
|
|
|
|
// WARNING: If no WinningPoSt workers are connected, Winning PoSt WILL FAIL resulting in lost block rewards.
|
|
|
|
// Before enabling this option, make sure your PoSt workers work correctly.
|
|
|
|
DisableBuiltinWinningPoSt bool
|
2022-07-01 20:20:05 +00:00
|
|
|
|
|
|
|
// Disable WindowPoSt provable sector readability checks.
|
|
|
|
//
|
|
|
|
// In normal operation, when preparing to compute WindowPoSt, lotus-miner will perform a round of reading challenges
|
|
|
|
// from all sectors to confirm that those sectors can be proven. Challenges read in this process are discarded, as
|
2022-07-04 15:17:00 +00:00
|
|
|
// we're only interested in checking that sector data can be read.
|
2022-07-01 20:20:05 +00:00
|
|
|
//
|
|
|
|
// When using builtin proof computation (no PoSt workers, and DisableBuiltinWindowPoSt is set to false), this process
|
|
|
|
// can save a lot of time and compute resources in the case that some sectors are not readable - this is caused by
|
|
|
|
// the builtin logic not skipping snark computation when some sectors need to be skipped.
|
|
|
|
//
|
|
|
|
// When using PoSt workers, this process is mostly redundant, with PoSt workers challenges will be read once, and
|
|
|
|
// if challenges for some sectors aren't readable, those sectors will just get skipped.
|
|
|
|
//
|
2022-07-04 15:17:00 +00:00
|
|
|
// Disabling sector pre-checks will slightly reduce IO load when proving sectors, possibly resulting in shorter
|
2022-07-01 20:20:05 +00:00
|
|
|
// time to produce window PoSt. In setups with good IO capabilities the effect of this option on proving time should
|
|
|
|
// be negligible.
|
|
|
|
//
|
|
|
|
// NOTE: It likely is a bad idea to disable sector pre-checks in setups with no PoSt workers.
|
|
|
|
//
|
|
|
|
// NOTE: Even when this option is enabled, recovering sectors will be checked before recovery declaration message is
|
|
|
|
// sent to the chain
|
|
|
|
//
|
|
|
|
// After changing this option, confirm that the new value works in your setup by invoking
|
|
|
|
// 'lotus-miner proving compute window-post 0'
|
|
|
|
DisableWDPoStPreChecks bool
|
2022-07-07 10:33:40 +00:00
|
|
|
|
|
|
|
// Maximum number of partitions to prove in a single SubmitWindowPoSt messace. 0 = network limit (10 in nv16)
|
|
|
|
//
|
|
|
|
// A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors.
|
|
|
|
//
|
|
|
|
// The maximum number of sectors which can be proven in a single PoSt message is 25000 in network version 16, which
|
|
|
|
// means that a single message can prove at most 10 partinions
|
|
|
|
//
|
|
|
|
// In some cases when submitting PoSt messages which are recovering sectors, the default network limit may still be
|
|
|
|
// too high to fit in the block gas limit; In those cases it may be necessary to set this value to something lower
|
|
|
|
// than 10; Note that setting this value lower may result in less efficient gas use - more messages will be sent,
|
|
|
|
// to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
|
|
|
|
//
|
|
|
|
// Setting this value above the network limit has no effect
|
2022-07-07 14:52:22 +00:00
|
|
|
MaxPartitionsPerPoStMessage int
|
|
|
|
|
|
|
|
// Maximum number of partitions to declare in a single DeclareFaultsRecovered message. 0 = no limit.
|
|
|
|
|
|
|
|
// In some cases when submitting DeclareFaultsRecovered messages,
|
|
|
|
// there may be too many recoveries to fit in a BlockGasLimit.
|
|
|
|
// In those cases it may be necessary to set this value to something low (eg 1);
|
|
|
|
// Note that setting this value lower may result in less efficient gas use - more messages will be sent than needed,
|
|
|
|
// resulting in more total gas use (but each message will have lower gas limit)
|
|
|
|
MaxPartitionsPerRecoveryMessage int
|
2022-03-29 01:19:11 +00:00
|
|
|
}
|
|
|
|
|
2021-07-23 11:55:50 +00:00
|
|
|
type SealingConfig struct {
|
2021-07-23 13:40:30 +00:00
|
|
|
// Upper bound on how many sectors can be waiting for more deals to be packed in it before it begins sealing at any given time.
|
|
|
|
// If the miner is accepting multiple deals in parallel, up to MaxWaitDealsSectors of new sectors will be created.
|
|
|
|
// If more than MaxWaitDealsSectors deals are accepted in parallel, only MaxWaitDealsSectors deals will be processed in parallel
|
|
|
|
// Note that setting this number too high in relation to deal ingestion rate may result in poor sector packing efficiency
|
2021-07-23 11:55:50 +00:00
|
|
|
// 0 = no limit
|
|
|
|
MaxWaitDealsSectors uint64
|
|
|
|
|
2022-03-17 20:12:42 +00:00
|
|
|
// Upper bound on how many sectors can be sealing+upgrading at the same time when creating new CC sectors (0 = unlimited)
|
2021-07-23 11:55:50 +00:00
|
|
|
MaxSealingSectors uint64
|
|
|
|
|
2022-03-17 20:12:42 +00:00
|
|
|
// Upper bound on how many sectors can be sealing+upgrading at the same time when creating new sectors with deals (0 = unlimited)
|
2021-07-23 11:55:50 +00:00
|
|
|
MaxSealingSectorsForDeals uint64
|
|
|
|
|
2022-03-17 20:27:10 +00:00
|
|
|
// Prefer creating new sectors even if there are sectors Available for upgrading.
|
2022-03-17 20:12:42 +00:00
|
|
|
// This setting combined with MaxUpgradingSectors set to a value higher than MaxSealingSectorsForDeals makes it
|
|
|
|
// possible to use fast sector upgrades to handle high volumes of storage deals, while still using the simple sealing
|
|
|
|
// flow when the volume of storage deals is lower.
|
|
|
|
PreferNewSectorsForDeals bool
|
|
|
|
|
|
|
|
// Upper bound on how many sectors can be sealing+upgrading at the same time when upgrading CC sectors with deals (0 = MaxSealingSectorsForDeals)
|
|
|
|
MaxUpgradingSectors uint64
|
|
|
|
|
2021-07-24 00:36:29 +00:00
|
|
|
// CommittedCapacitySectorLifetime is the duration a Committed Capacity (CC) sector will
|
|
|
|
// live before it must be extended or converted into sector containing deals before it is
|
|
|
|
// terminated. Value must be between 180-540 days inclusive
|
|
|
|
CommittedCapacitySectorLifetime Duration
|
|
|
|
|
2021-07-23 13:40:30 +00:00
|
|
|
// Period of time that a newly created sector will wait for more deals to be packed in to before it starts to seal.
|
|
|
|
// Sectors which are fully filled will start sealing immediately
|
2021-07-23 11:55:50 +00:00
|
|
|
WaitDealsDelay Duration
|
|
|
|
|
2021-07-23 13:40:30 +00:00
|
|
|
// Whether to keep unsealed copies of deal data regardless of whether the client requested that. This lets the miner
|
|
|
|
// avoid the relatively high cost of unsealing the data later, at the cost of more storage space
|
2021-07-23 11:55:50 +00:00
|
|
|
AlwaysKeepUnsealedCopy bool
|
|
|
|
|
|
|
|
// Run sector finalization before submitting sector proof to the chain
|
|
|
|
FinalizeEarly bool
|
|
|
|
|
2022-03-26 19:50:21 +00:00
|
|
|
// Whether new sectors are created to pack incoming deals
|
|
|
|
// When this is set to false no new sectors will be created for sealing incoming deals
|
|
|
|
// This is useful for forcing all deals to be assigned as snap deals to sectors marked for upgrade
|
|
|
|
MakeNewSectorForDeals bool
|
|
|
|
|
2022-03-16 18:29:47 +00:00
|
|
|
// After sealing CC sectors, make them available for upgrading with deals
|
|
|
|
MakeCCSectorsAvailable bool
|
|
|
|
|
2021-07-23 11:55:50 +00:00
|
|
|
// Whether to use available miner balance for sector collateral instead of sending it with each message
|
|
|
|
CollateralFromMinerBalance bool
|
|
|
|
// Minimum available balance to keep in the miner actor before sending it with messages
|
|
|
|
AvailableBalanceBuffer types.FIL
|
|
|
|
// Don't send collateral with messages even if there is no available balance in the miner actor
|
|
|
|
DisableCollateralFallback bool
|
|
|
|
|
|
|
|
// enable / disable precommit batching (takes effect after nv13)
|
|
|
|
BatchPreCommits bool
|
|
|
|
// maximum precommit batch size - batches will be sent immediately above this size
|
|
|
|
MaxPreCommitBatch int
|
|
|
|
// how long to wait before submitting a batch after crossing the minimum batch size
|
|
|
|
PreCommitBatchWait Duration
|
|
|
|
// time buffer for forceful batch submission before sectors/deal in batch would start expiring
|
|
|
|
PreCommitBatchSlack Duration
|
|
|
|
|
|
|
|
// enable / disable commit aggregation (takes effect after nv13)
|
|
|
|
AggregateCommits bool
|
2022-05-23 16:49:01 +00:00
|
|
|
// minimum batched commit size - batches above this size will eventually be sent on a timeout
|
2021-07-23 11:55:50 +00:00
|
|
|
MinCommitBatch int
|
2022-05-23 16:49:01 +00:00
|
|
|
// maximum batched commit size - batches will be sent immediately above this size
|
2021-07-23 11:55:50 +00:00
|
|
|
MaxCommitBatch int
|
|
|
|
// how long to wait before submitting a batch after crossing the minimum batch size
|
|
|
|
CommitBatchWait Duration
|
|
|
|
// time buffer for forceful batch submission before sectors/deals in batch would start expiring
|
|
|
|
CommitBatchSlack Duration
|
|
|
|
|
2021-09-30 14:53:12 +00:00
|
|
|
// network BaseFee below which to stop doing precommit batching, instead
|
|
|
|
// sending precommit messages to the chain individually
|
|
|
|
BatchPreCommitAboveBaseFee types.FIL
|
|
|
|
|
2021-07-23 11:55:50 +00:00
|
|
|
// network BaseFee below which to stop doing commit aggregation, instead
|
|
|
|
// submitting proofs to the chain individually
|
|
|
|
AggregateAboveBaseFee types.FIL
|
|
|
|
|
|
|
|
TerminateBatchMax uint64
|
|
|
|
TerminateBatchMin uint64
|
|
|
|
TerminateBatchWait Duration
|
|
|
|
|
|
|
|
// Keep this many sectors in sealing pipeline, start CC if needed
|
|
|
|
// todo TargetSealingSectors uint64
|
|
|
|
|
|
|
|
// todo TargetSectors - stop auto-pleding new sectors after this many sectors are sealed, default CC upgrade for deals sectors if above
|
|
|
|
}
|
|
|
|
|
2022-03-29 01:19:11 +00:00
|
|
|
type SealerConfig struct {
|
|
|
|
ParallelFetchLimit int
|
|
|
|
|
|
|
|
// Local worker config
|
|
|
|
AllowAddPiece bool
|
|
|
|
AllowPreCommit1 bool
|
|
|
|
AllowPreCommit2 bool
|
|
|
|
AllowCommit bool
|
|
|
|
AllowUnseal bool
|
|
|
|
AllowReplicaUpdate bool
|
|
|
|
AllowProveReplicaUpdate2 bool
|
|
|
|
AllowRegenSectorKey bool
|
|
|
|
|
2022-08-03 10:54:32 +00:00
|
|
|
// LocalWorkerName specifies a custom name for the builtin worker.
|
|
|
|
// If set to an empty string (default) os hostname will be used
|
|
|
|
LocalWorkerName string
|
|
|
|
|
2022-05-23 14:58:43 +00:00
|
|
|
// Assigner specifies the worker assigner to use when scheduling tasks.
|
|
|
|
// "utilization" (default) - assign tasks to workers with lowest utilization.
|
|
|
|
// "spread" - assign tasks to as many distinct workers as possible.
|
|
|
|
Assigner string
|
|
|
|
|
2022-05-23 21:53:25 +00:00
|
|
|
// DisallowRemoteFinalize when set to true will force all Finalize tasks to
|
|
|
|
// run on workers with local access to both long-term storage and the sealing
|
|
|
|
// path containing the sector.
|
2022-05-23 23:33:56 +00:00
|
|
|
// --
|
2022-05-23 21:53:25 +00:00
|
|
|
// WARNING: Only set this if all workers have access to long-term storage
|
|
|
|
// paths. If this flag is enabled, and there are workers without long-term
|
|
|
|
// storage access, sectors will not be moved from them, and Finalize tasks
|
|
|
|
// will appear to be stuck.
|
2022-05-23 23:33:56 +00:00
|
|
|
// --
|
2022-05-23 21:53:25 +00:00
|
|
|
// If you see stuck Finalize tasks after enabling this setting, check
|
|
|
|
// 'lotus-miner sealing sched-diag' and 'lotus-miner storage find [sector num]'
|
|
|
|
DisallowRemoteFinalize bool
|
|
|
|
|
2022-03-29 01:19:11 +00:00
|
|
|
// ResourceFiltering instructs the system which resource filtering strategy
|
|
|
|
// to use when evaluating tasks against this worker. An empty value defaults
|
|
|
|
// to "hardware".
|
2022-06-14 18:03:38 +00:00
|
|
|
ResourceFiltering sealer.ResourceFilteringStrategy
|
2022-03-29 01:19:11 +00:00
|
|
|
}
|
|
|
|
|
2021-07-23 11:55:50 +00:00
|
|
|
type BatchFeeConfig struct {
|
|
|
|
Base types.FIL
|
|
|
|
PerSector types.FIL
|
|
|
|
}
|
|
|
|
|
|
|
|
type MinerFeeConfig struct {
|
|
|
|
MaxPreCommitGasFee types.FIL
|
|
|
|
MaxCommitGasFee types.FIL
|
|
|
|
|
|
|
|
// maxBatchFee = maxBase + maxPerSector * nSectors
|
|
|
|
MaxPreCommitBatchGasFee BatchFeeConfig
|
|
|
|
MaxCommitBatchGasFee BatchFeeConfig
|
|
|
|
|
2021-07-23 13:40:30 +00:00
|
|
|
MaxTerminateGasFee types.FIL
|
|
|
|
// WindowPoSt is a high-value operation, so the default fee should be high.
|
2021-07-23 11:55:50 +00:00
|
|
|
MaxWindowPoStGasFee types.FIL
|
|
|
|
MaxPublishDealsFee types.FIL
|
|
|
|
MaxMarketBalanceAddFee types.FIL
|
|
|
|
}
|
|
|
|
|
|
|
|
type MinerAddressConfig struct {
|
2021-07-23 13:40:30 +00:00
|
|
|
// Addresses to send PreCommit messages from
|
|
|
|
PreCommitControl []string
|
|
|
|
// Addresses to send Commit messages from
|
2021-07-23 11:55:50 +00:00
|
|
|
CommitControl []string
|
|
|
|
TerminateControl []string
|
|
|
|
DealPublishControl []string
|
|
|
|
|
|
|
|
// DisableOwnerFallback disables usage of the owner address for messages
|
|
|
|
// sent automatically
|
|
|
|
DisableOwnerFallback bool
|
|
|
|
// DisableWorkerFallback disables usage of the worker address for messages
|
|
|
|
// sent automatically, if control addresses are configured.
|
|
|
|
// A control address that doesn't have enough funds will still be chosen
|
|
|
|
// over the worker address if this flag is set.
|
|
|
|
DisableWorkerFallback bool
|
|
|
|
}
|
|
|
|
|
|
|
|
// API contains configs for API endpoint
|
|
|
|
type API struct {
|
2021-07-23 12:55:19 +00:00
|
|
|
// Binding address for the Lotus API
|
2021-07-23 11:55:50 +00:00
|
|
|
ListenAddress string
|
|
|
|
RemoteListenAddress string
|
|
|
|
Timeout Duration
|
|
|
|
}
|
|
|
|
|
|
|
|
// Libp2p contains configs for libp2p
|
|
|
|
type Libp2p struct {
|
2021-07-23 13:40:30 +00:00
|
|
|
// Binding address for the libp2p host - 0 means random port.
|
|
|
|
// Format: multiaddress; see https://multiformats.io/multiaddr/
|
|
|
|
ListenAddresses []string
|
|
|
|
// Addresses to explicitally announce to other peers. If not specified,
|
|
|
|
// all interface addresses are announced
|
|
|
|
// Format: multiaddress
|
|
|
|
AnnounceAddresses []string
|
|
|
|
// Addresses to not announce
|
|
|
|
// Format: multiaddress
|
2021-07-23 11:55:50 +00:00
|
|
|
NoAnnounceAddresses []string
|
|
|
|
BootstrapPeers []string
|
|
|
|
ProtectedPeers []string
|
|
|
|
|
2021-08-27 17:14:29 +00:00
|
|
|
// When not disabled (default), lotus asks NAT devices (e.g., routers), to
|
|
|
|
// open up an external port and forward it to the port lotus is running on.
|
|
|
|
// When this works (i.e., when your router supports NAT port forwarding),
|
|
|
|
// it makes the local lotus node accessible from the public internet
|
|
|
|
DisableNatPortMap bool
|
|
|
|
|
2021-08-27 17:16:50 +00:00
|
|
|
// ConnMgrLow is the number of connections that the basic connection manager
|
|
|
|
// will trim down to.
|
|
|
|
ConnMgrLow uint
|
|
|
|
// ConnMgrHigh is the number of connections that, when exceeded, will trigger
|
|
|
|
// a connection GC operation. Note: protected/recently formed connections don't
|
|
|
|
// count towards this limit.
|
|
|
|
ConnMgrHigh uint
|
|
|
|
// ConnMgrGrace is a time duration that new connections are immune from being
|
|
|
|
// closed by the connection manager.
|
2021-07-23 11:55:50 +00:00
|
|
|
ConnMgrGrace Duration
|
|
|
|
}
|
|
|
|
|
|
|
|
type Pubsub struct {
|
2021-07-23 13:40:30 +00:00
|
|
|
// Run the node in bootstrap-node mode
|
|
|
|
Bootstrapper bool
|
|
|
|
// DirectPeers specifies peers with direct peering agreements. These peers are
|
|
|
|
// connected outside of the mesh, with all (valid) message unconditionally
|
|
|
|
// forwarded to them. The router will maintain open connections to these peers.
|
|
|
|
// Note that the peering agreement should be reciprocal with direct peers
|
|
|
|
// symmetrically configured at both ends.
|
|
|
|
// Type: Array of multiaddress peerinfo strings, must include peerid (/p2p/12D3K...
|
2021-07-23 11:55:50 +00:00
|
|
|
DirectPeers []string
|
|
|
|
IPColocationWhitelist []string
|
|
|
|
RemoteTracer string
|
|
|
|
}
|
|
|
|
|
|
|
|
type Chainstore struct {
|
|
|
|
EnableSplitstore bool
|
|
|
|
Splitstore Splitstore
|
|
|
|
}
|
|
|
|
|
|
|
|
type Splitstore struct {
|
2021-07-24 05:59:15 +00:00
|
|
|
// ColdStoreType specifies the type of the coldstore.
|
|
|
|
// It can be "universal" (default) or "discard" for discarding cold blocks.
|
2021-07-23 11:55:50 +00:00
|
|
|
ColdStoreType string
|
2021-07-24 05:59:15 +00:00
|
|
|
// HotStoreType specifies the type of the hotstore.
|
|
|
|
// Only currently supported value is "badger".
|
|
|
|
HotStoreType string
|
|
|
|
// MarkSetType specifies the type of the markset.
|
2022-02-06 09:21:16 +00:00
|
|
|
// It can be "map" for in memory marking or "badger" (default) for on-disk marking.
|
2021-07-24 05:59:15 +00:00
|
|
|
MarkSetType string
|
|
|
|
|
|
|
|
// HotStoreMessageRetention specifies the retention policy for messages, in finalities beyond
|
|
|
|
// the compaction boundary; default is 0.
|
2021-07-23 11:55:50 +00:00
|
|
|
HotStoreMessageRetention uint64
|
2021-07-27 06:53:22 +00:00
|
|
|
// HotStoreFullGCFrequency specifies how often to perform a full (moving) GC on the hotstore.
|
|
|
|
// A value of 0 disables, while a value 1 will do full GC in every compaction.
|
2021-07-24 05:59:15 +00:00
|
|
|
// Default is 20 (about once a week).
|
2021-07-27 06:53:22 +00:00
|
|
|
HotStoreFullGCFrequency uint64
|
2022-08-08 20:06:32 +00:00
|
|
|
|
|
|
|
// EnableColdStoreAutoPrune turns on compaction of the cold store i.e. pruning
|
|
|
|
// where hotstore compaction occurs every finality epochs pruning happens every 3 finalities
|
|
|
|
// Default is false
|
|
|
|
EnableColdStoreAutoPrune bool
|
|
|
|
|
|
|
|
// ColdStoreFullGCFrequency specifies how often to performa a full (moving) GC on the coldstore.
|
|
|
|
// Only applies if auto prune is enabled. A value of 0 disables while a value of 1 will do
|
|
|
|
// full GC in every prune.
|
|
|
|
// Default is 7 (about once every a week)
|
|
|
|
ColdStoreFullGCFrequency uint64
|
|
|
|
|
|
|
|
// ColdStoreRetention specifies the retention policy for data reachable from the chain, in
|
|
|
|
// finalities beyond the compaction boundary, default is 0, -1 retains everything
|
|
|
|
ColdStoreRetention int64
|
2021-07-23 11:55:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// // Full Node
|
|
|
|
type Client struct {
|
2021-07-23 13:40:30 +00:00
|
|
|
UseIpfs bool
|
|
|
|
IpfsOnlineMode bool
|
|
|
|
IpfsMAddr string
|
|
|
|
IpfsUseForRetrieval bool
|
|
|
|
// The maximum number of simultaneous data transfers between the client
|
2021-09-30 01:49:59 +00:00
|
|
|
// and storage providers for storage deals
|
|
|
|
SimultaneousTransfersForStorage uint64
|
|
|
|
// The maximum number of simultaneous data transfers between the client
|
|
|
|
// and storage providers for retrieval deals
|
|
|
|
SimultaneousTransfersForRetrieval uint64
|
2022-01-06 15:26:25 +00:00
|
|
|
|
2022-02-14 19:07:13 +00:00
|
|
|
// Require that retrievals perform no on-chain operations. Paid retrievals
|
2022-01-06 15:26:25 +00:00
|
|
|
// without existing payment channels with available funds will fail instead
|
|
|
|
// of automatically performing on-chain operations.
|
|
|
|
OffChainRetrieval bool
|
2021-07-23 11:55:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type Wallet struct {
|
|
|
|
RemoteBackend string
|
|
|
|
EnableLedger bool
|
|
|
|
DisableLocal bool
|
|
|
|
}
|
|
|
|
|
|
|
|
type FeeConfig struct {
|
|
|
|
DefaultMaxFee types.FIL
|
|
|
|
}
|
2022-09-13 17:05:48 +00:00
|
|
|
|
|
|
|
// ClusterRaftConfig allows to configure the Raft Consensus component for the node cluster.
|
|
|
|
type ClusterRaftConfig struct {
|
2022-09-13 23:09:03 +00:00
|
|
|
// config to enabled node cluster with raft consensus
|
|
|
|
ClusterModeEnabled bool
|
2022-09-13 17:05:48 +00:00
|
|
|
// will shutdown libp2p host on shutdown. Useful for testing
|
|
|
|
HostShutdown bool
|
|
|
|
// A folder to store Raft's data.
|
|
|
|
DataFolder string
|
|
|
|
// InitPeerset provides the list of initial cluster peers for new Raft
|
|
|
|
// peers (with no prior state). It is ignored when Raft was already
|
|
|
|
// initialized or when starting in staging mode.
|
|
|
|
InitPeerset []peer.ID
|
|
|
|
// LeaderTimeout specifies how long to wait for a leader before
|
|
|
|
// failing an operation.
|
|
|
|
WaitForLeaderTimeout time.Duration
|
|
|
|
// NetworkTimeout specifies how long before a Raft network
|
|
|
|
// operation is timed out
|
|
|
|
NetworkTimeout time.Duration
|
|
|
|
// CommitRetries specifies how many times we retry a failed commit until
|
|
|
|
// we give up.
|
|
|
|
CommitRetries int
|
|
|
|
// How long to wait between retries
|
|
|
|
CommitRetryDelay time.Duration
|
|
|
|
// BackupsRotate specifies the maximum number of Raft's DataFolder
|
|
|
|
// copies that we keep as backups (renaming) after cleanup.
|
|
|
|
BackupsRotate int
|
|
|
|
// Namespace to use when writing keys to the datastore
|
|
|
|
DatastoreNamespace string
|
|
|
|
|
|
|
|
// A Hashicorp Raft's configuration object.
|
|
|
|
RaftConfig *hraft.Config
|
|
|
|
|
|
|
|
// Tracing enables propagation of contexts across binary boundaries.
|
|
|
|
Tracing bool
|
|
|
|
}
|