lotus/node/config/doc_gen.go

1321 lines
33 KiB
Go
Raw Normal View History

2021-07-23 11:55:50 +00:00
// Code generated by github.com/filecoin-project/lotus/node/config/cfgdocgen. DO NOT EDIT.
package config
type DocField struct {
Name string
2021-07-23 11:55:50 +00:00
Type string
Comment string
}
var Doc = map[string][]DocField{
"API": []DocField{
{
Name: "ListenAddress",
Type: "string",
Comment: `Binding address for the Lotus API`,
2021-07-23 11:55:50 +00:00
},
{
Name: "RemoteListenAddress",
Type: "string",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "Timeout",
Type: "Duration",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
},
"Backup": []DocField{
{
Name: "DisableMetadataLog",
Type: "bool",
Comment: `When set to true disables metadata log (.lotus/kvlog). This can save disk
space by reducing metadata redundancy.
Note that in case of metadata corruption it might be much harder to recover
your node if metadata log is disabled`,
2021-07-23 11:55:50 +00:00
},
},
"BatchFeeConfig": []DocField{
{
Name: "Base",
Type: "types.FIL",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "PerSector",
Type: "types.FIL",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
},
"Chainstore": []DocField{
{
Name: "EnableSplitstore",
Type: "bool",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "Splitstore",
Type: "Splitstore",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
},
"Client": []DocField{
{
Name: "UseIpfs",
Type: "bool",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "IpfsOnlineMode",
Type: "bool",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "IpfsMAddr",
Type: "string",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "IpfsUseForRetrieval",
Type: "bool",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "SimultaneousTransfersForStorage",
Type: "uint64",
Comment: `The maximum number of simultaneous data transfers between the client
and storage providers for storage deals`,
},
{
Name: "SimultaneousTransfersForRetrieval",
Type: "uint64",
Comment: `The maximum number of simultaneous data transfers between the client
and storage providers for retrieval deals`,
2021-07-23 11:55:50 +00:00
},
2022-01-06 15:26:25 +00:00
{
Name: "OffChainRetrieval",
2022-01-06 15:26:25 +00:00
Type: "bool",
2022-02-14 20:16:41 +00:00
Comment: `Require that retrievals perform no on-chain operations. Paid retrievals
2022-01-06 15:26:25 +00:00
without existing payment channels with available funds will fail instead
of automatically performing on-chain operations.`,
},
2021-07-23 11:55:50 +00:00
},
"Common": []DocField{
{
Name: "API",
Type: "API",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "Backup",
Type: "Backup",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "Logging",
Type: "Logging",
Comment: ``,
},
2021-07-23 11:55:50 +00:00
{
Name: "Libp2p",
Type: "Libp2p",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "Pubsub",
Type: "Pubsub",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
},
integrate DAG store and CARv2 in deal-making (#6671) This commit removes badger from the deal-making processes, and moves to a new architecture with the dagstore as the cental component on the miner-side, and CARv2s on the client-side. Every deal that has been handed off to the sealing subsystem becomes a shard in the dagstore. Shards are mounted via the LotusMount, which teaches the dagstore how to load the related piece when serving retrievals. When the miner starts the Lotus for the first time with this patch, we will perform a one-time migration of all active deals into the dagstore. This is a lightweight process, and it consists simply of registering the shards in the dagstore. Shards are backed by the unsealed copy of the piece. This is currently a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so when it's time to acquire a shard to serve a retrieval, the unsealed CARv1 is joined with its index (safeguarded by the dagstore), to form a read-only blockstore, thus taking the place of the monolithic badger. Data transfers have been adjusted to interface directly with CARv2 files. On inbound transfers (client retrievals, miner storage deals), we stream the received data into a CARv2 ReadWrite blockstore. On outbound transfers (client storage deals, miner retrievals), we serve the data off a CARv2 ReadOnly blockstore. Client-side imports are managed by the refactored *imports.Manager component (when not using IPFS integration). Just like it before, we use the go-filestore library to avoid duplicating the data from the original file in the resulting UnixFS DAG (concretely the leaves). However, the target of those imports are what we call "ref-CARv2s": CARv2 files placed under the `$LOTUS_PATH/imports` directory, containing the intermediate nodes in full, and the leaves as positional references to the original file on disk. Client-side retrievals are placed into CARv2 files in the location: `$LOTUS_PATH/retrievals`. A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore` subcommands have been introduced on the miner-side to inspect and manage the dagstore. Despite moving to a CARv2-backed system, the IPFS integration has been respected, and it continues to be possible to make storage deals with data held in an IPFS node, and to perform retrievals directly into an IPFS node. NOTE: because the "staging" and "client" Badger blockstores are no longer used, existing imports on the client will be rendered useless. On startup, Lotus will enumerate all imports and print WARN statements on the log for each import that needs to be reimported. These log lines contain these messages: - import lacks carv2 path; import will not work; please reimport - import has missing/broken carv2; please reimport At the end, we will print a "sanity check completed" message indicating the count of imports found, and how many were deemed broken. Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com> Co-authored-by: Raúl Kripalani <raul@protocol.ai> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
"DAGStoreConfig": []DocField{
{
Name: "RootDir",
integrate DAG store and CARv2 in deal-making (#6671) This commit removes badger from the deal-making processes, and moves to a new architecture with the dagstore as the cental component on the miner-side, and CARv2s on the client-side. Every deal that has been handed off to the sealing subsystem becomes a shard in the dagstore. Shards are mounted via the LotusMount, which teaches the dagstore how to load the related piece when serving retrievals. When the miner starts the Lotus for the first time with this patch, we will perform a one-time migration of all active deals into the dagstore. This is a lightweight process, and it consists simply of registering the shards in the dagstore. Shards are backed by the unsealed copy of the piece. This is currently a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so when it's time to acquire a shard to serve a retrieval, the unsealed CARv1 is joined with its index (safeguarded by the dagstore), to form a read-only blockstore, thus taking the place of the monolithic badger. Data transfers have been adjusted to interface directly with CARv2 files. On inbound transfers (client retrievals, miner storage deals), we stream the received data into a CARv2 ReadWrite blockstore. On outbound transfers (client storage deals, miner retrievals), we serve the data off a CARv2 ReadOnly blockstore. Client-side imports are managed by the refactored *imports.Manager component (when not using IPFS integration). Just like it before, we use the go-filestore library to avoid duplicating the data from the original file in the resulting UnixFS DAG (concretely the leaves). However, the target of those imports are what we call "ref-CARv2s": CARv2 files placed under the `$LOTUS_PATH/imports` directory, containing the intermediate nodes in full, and the leaves as positional references to the original file on disk. Client-side retrievals are placed into CARv2 files in the location: `$LOTUS_PATH/retrievals`. A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore` subcommands have been introduced on the miner-side to inspect and manage the dagstore. Despite moving to a CARv2-backed system, the IPFS integration has been respected, and it continues to be possible to make storage deals with data held in an IPFS node, and to perform retrievals directly into an IPFS node. NOTE: because the "staging" and "client" Badger blockstores are no longer used, existing imports on the client will be rendered useless. On startup, Lotus will enumerate all imports and print WARN statements on the log for each import that needs to be reimported. These log lines contain these messages: - import lacks carv2 path; import will not work; please reimport - import has missing/broken carv2; please reimport At the end, we will print a "sanity check completed" message indicating the count of imports found, and how many were deemed broken. Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com> Co-authored-by: Raúl Kripalani <raul@protocol.ai> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
Type: "string",
Comment: `Path to the dagstore root directory. This directory contains three
subdirectories, which can be symlinked to alternative locations if
need be:
- ./transients: caches unsealed deals that have been fetched from the
storage subsystem for serving retrievals.
- ./indices: stores shard indices.
- ./datastore: holds the KV store tracking the state of every shard
known to the DAG store.
Default value: <LOTUS_MARKETS_PATH>/dagstore (split deployment) or
<LOTUS_MINER_PATH>/dagstore (monolith deployment)`,
},
{
Name: "MaxConcurrentIndex",
integrate DAG store and CARv2 in deal-making (#6671) This commit removes badger from the deal-making processes, and moves to a new architecture with the dagstore as the cental component on the miner-side, and CARv2s on the client-side. Every deal that has been handed off to the sealing subsystem becomes a shard in the dagstore. Shards are mounted via the LotusMount, which teaches the dagstore how to load the related piece when serving retrievals. When the miner starts the Lotus for the first time with this patch, we will perform a one-time migration of all active deals into the dagstore. This is a lightweight process, and it consists simply of registering the shards in the dagstore. Shards are backed by the unsealed copy of the piece. This is currently a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so when it's time to acquire a shard to serve a retrieval, the unsealed CARv1 is joined with its index (safeguarded by the dagstore), to form a read-only blockstore, thus taking the place of the monolithic badger. Data transfers have been adjusted to interface directly with CARv2 files. On inbound transfers (client retrievals, miner storage deals), we stream the received data into a CARv2 ReadWrite blockstore. On outbound transfers (client storage deals, miner retrievals), we serve the data off a CARv2 ReadOnly blockstore. Client-side imports are managed by the refactored *imports.Manager component (when not using IPFS integration). Just like it before, we use the go-filestore library to avoid duplicating the data from the original file in the resulting UnixFS DAG (concretely the leaves). However, the target of those imports are what we call "ref-CARv2s": CARv2 files placed under the `$LOTUS_PATH/imports` directory, containing the intermediate nodes in full, and the leaves as positional references to the original file on disk. Client-side retrievals are placed into CARv2 files in the location: `$LOTUS_PATH/retrievals`. A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore` subcommands have been introduced on the miner-side to inspect and manage the dagstore. Despite moving to a CARv2-backed system, the IPFS integration has been respected, and it continues to be possible to make storage deals with data held in an IPFS node, and to perform retrievals directly into an IPFS node. NOTE: because the "staging" and "client" Badger blockstores are no longer used, existing imports on the client will be rendered useless. On startup, Lotus will enumerate all imports and print WARN statements on the log for each import that needs to be reimported. These log lines contain these messages: - import lacks carv2 path; import will not work; please reimport - import has missing/broken carv2; please reimport At the end, we will print a "sanity check completed" message indicating the count of imports found, and how many were deemed broken. Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com> Co-authored-by: Raúl Kripalani <raul@protocol.ai> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
Type: "int",
Comment: `The maximum amount of indexing jobs that can run simultaneously.
0 means unlimited.
Default value: 5.`,
},
{
Name: "MaxConcurrentReadyFetches",
integrate DAG store and CARv2 in deal-making (#6671) This commit removes badger from the deal-making processes, and moves to a new architecture with the dagstore as the cental component on the miner-side, and CARv2s on the client-side. Every deal that has been handed off to the sealing subsystem becomes a shard in the dagstore. Shards are mounted via the LotusMount, which teaches the dagstore how to load the related piece when serving retrievals. When the miner starts the Lotus for the first time with this patch, we will perform a one-time migration of all active deals into the dagstore. This is a lightweight process, and it consists simply of registering the shards in the dagstore. Shards are backed by the unsealed copy of the piece. This is currently a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so when it's time to acquire a shard to serve a retrieval, the unsealed CARv1 is joined with its index (safeguarded by the dagstore), to form a read-only blockstore, thus taking the place of the monolithic badger. Data transfers have been adjusted to interface directly with CARv2 files. On inbound transfers (client retrievals, miner storage deals), we stream the received data into a CARv2 ReadWrite blockstore. On outbound transfers (client storage deals, miner retrievals), we serve the data off a CARv2 ReadOnly blockstore. Client-side imports are managed by the refactored *imports.Manager component (when not using IPFS integration). Just like it before, we use the go-filestore library to avoid duplicating the data from the original file in the resulting UnixFS DAG (concretely the leaves). However, the target of those imports are what we call "ref-CARv2s": CARv2 files placed under the `$LOTUS_PATH/imports` directory, containing the intermediate nodes in full, and the leaves as positional references to the original file on disk. Client-side retrievals are placed into CARv2 files in the location: `$LOTUS_PATH/retrievals`. A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore` subcommands have been introduced on the miner-side to inspect and manage the dagstore. Despite moving to a CARv2-backed system, the IPFS integration has been respected, and it continues to be possible to make storage deals with data held in an IPFS node, and to perform retrievals directly into an IPFS node. NOTE: because the "staging" and "client" Badger blockstores are no longer used, existing imports on the client will be rendered useless. On startup, Lotus will enumerate all imports and print WARN statements on the log for each import that needs to be reimported. These log lines contain these messages: - import lacks carv2 path; import will not work; please reimport - import has missing/broken carv2; please reimport At the end, we will print a "sanity check completed" message indicating the count of imports found, and how many were deemed broken. Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com> Co-authored-by: Raúl Kripalani <raul@protocol.ai> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
Type: "int",
Comment: `The maximum amount of unsealed deals that can be fetched simultaneously
from the storage subsystem. 0 means unlimited.
Default value: 0 (unlimited).`,
},
{
Name: "MaxConcurrentUnseals",
Type: "int",
Comment: `The maximum amount of unseals that can be processed simultaneously
from the storage subsystem. 0 means unlimited.
integrate DAG store and CARv2 in deal-making (#6671) This commit removes badger from the deal-making processes, and moves to a new architecture with the dagstore as the cental component on the miner-side, and CARv2s on the client-side. Every deal that has been handed off to the sealing subsystem becomes a shard in the dagstore. Shards are mounted via the LotusMount, which teaches the dagstore how to load the related piece when serving retrievals. When the miner starts the Lotus for the first time with this patch, we will perform a one-time migration of all active deals into the dagstore. This is a lightweight process, and it consists simply of registering the shards in the dagstore. Shards are backed by the unsealed copy of the piece. This is currently a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so when it's time to acquire a shard to serve a retrieval, the unsealed CARv1 is joined with its index (safeguarded by the dagstore), to form a read-only blockstore, thus taking the place of the monolithic badger. Data transfers have been adjusted to interface directly with CARv2 files. On inbound transfers (client retrievals, miner storage deals), we stream the received data into a CARv2 ReadWrite blockstore. On outbound transfers (client storage deals, miner retrievals), we serve the data off a CARv2 ReadOnly blockstore. Client-side imports are managed by the refactored *imports.Manager component (when not using IPFS integration). Just like it before, we use the go-filestore library to avoid duplicating the data from the original file in the resulting UnixFS DAG (concretely the leaves). However, the target of those imports are what we call "ref-CARv2s": CARv2 files placed under the `$LOTUS_PATH/imports` directory, containing the intermediate nodes in full, and the leaves as positional references to the original file on disk. Client-side retrievals are placed into CARv2 files in the location: `$LOTUS_PATH/retrievals`. A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore` subcommands have been introduced on the miner-side to inspect and manage the dagstore. Despite moving to a CARv2-backed system, the IPFS integration has been respected, and it continues to be possible to make storage deals with data held in an IPFS node, and to perform retrievals directly into an IPFS node. NOTE: because the "staging" and "client" Badger blockstores are no longer used, existing imports on the client will be rendered useless. On startup, Lotus will enumerate all imports and print WARN statements on the log for each import that needs to be reimported. These log lines contain these messages: - import lacks carv2 path; import will not work; please reimport - import has missing/broken carv2; please reimport At the end, we will print a "sanity check completed" message indicating the count of imports found, and how many were deemed broken. Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com> Co-authored-by: Raúl Kripalani <raul@protocol.ai> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
Default value: 0 (unlimited).`,
},
{
Name: "MaxConcurrencyStorageCalls",
integrate DAG store and CARv2 in deal-making (#6671) This commit removes badger from the deal-making processes, and moves to a new architecture with the dagstore as the cental component on the miner-side, and CARv2s on the client-side. Every deal that has been handed off to the sealing subsystem becomes a shard in the dagstore. Shards are mounted via the LotusMount, which teaches the dagstore how to load the related piece when serving retrievals. When the miner starts the Lotus for the first time with this patch, we will perform a one-time migration of all active deals into the dagstore. This is a lightweight process, and it consists simply of registering the shards in the dagstore. Shards are backed by the unsealed copy of the piece. This is currently a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so when it's time to acquire a shard to serve a retrieval, the unsealed CARv1 is joined with its index (safeguarded by the dagstore), to form a read-only blockstore, thus taking the place of the monolithic badger. Data transfers have been adjusted to interface directly with CARv2 files. On inbound transfers (client retrievals, miner storage deals), we stream the received data into a CARv2 ReadWrite blockstore. On outbound transfers (client storage deals, miner retrievals), we serve the data off a CARv2 ReadOnly blockstore. Client-side imports are managed by the refactored *imports.Manager component (when not using IPFS integration). Just like it before, we use the go-filestore library to avoid duplicating the data from the original file in the resulting UnixFS DAG (concretely the leaves). However, the target of those imports are what we call "ref-CARv2s": CARv2 files placed under the `$LOTUS_PATH/imports` directory, containing the intermediate nodes in full, and the leaves as positional references to the original file on disk. Client-side retrievals are placed into CARv2 files in the location: `$LOTUS_PATH/retrievals`. A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore` subcommands have been introduced on the miner-side to inspect and manage the dagstore. Despite moving to a CARv2-backed system, the IPFS integration has been respected, and it continues to be possible to make storage deals with data held in an IPFS node, and to perform retrievals directly into an IPFS node. NOTE: because the "staging" and "client" Badger blockstores are no longer used, existing imports on the client will be rendered useless. On startup, Lotus will enumerate all imports and print WARN statements on the log for each import that needs to be reimported. These log lines contain these messages: - import lacks carv2 path; import will not work; please reimport - import has missing/broken carv2; please reimport At the end, we will print a "sanity check completed" message indicating the count of imports found, and how many were deemed broken. Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com> Co-authored-by: Raúl Kripalani <raul@protocol.ai> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
Type: "int",
Comment: `The maximum number of simultaneous inflight API calls to the storage
subsystem.
Default value: 100.`,
},
{
Name: "GCInterval",
integrate DAG store and CARv2 in deal-making (#6671) This commit removes badger from the deal-making processes, and moves to a new architecture with the dagstore as the cental component on the miner-side, and CARv2s on the client-side. Every deal that has been handed off to the sealing subsystem becomes a shard in the dagstore. Shards are mounted via the LotusMount, which teaches the dagstore how to load the related piece when serving retrievals. When the miner starts the Lotus for the first time with this patch, we will perform a one-time migration of all active deals into the dagstore. This is a lightweight process, and it consists simply of registering the shards in the dagstore. Shards are backed by the unsealed copy of the piece. This is currently a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so when it's time to acquire a shard to serve a retrieval, the unsealed CARv1 is joined with its index (safeguarded by the dagstore), to form a read-only blockstore, thus taking the place of the monolithic badger. Data transfers have been adjusted to interface directly with CARv2 files. On inbound transfers (client retrievals, miner storage deals), we stream the received data into a CARv2 ReadWrite blockstore. On outbound transfers (client storage deals, miner retrievals), we serve the data off a CARv2 ReadOnly blockstore. Client-side imports are managed by the refactored *imports.Manager component (when not using IPFS integration). Just like it before, we use the go-filestore library to avoid duplicating the data from the original file in the resulting UnixFS DAG (concretely the leaves). However, the target of those imports are what we call "ref-CARv2s": CARv2 files placed under the `$LOTUS_PATH/imports` directory, containing the intermediate nodes in full, and the leaves as positional references to the original file on disk. Client-side retrievals are placed into CARv2 files in the location: `$LOTUS_PATH/retrievals`. A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore` subcommands have been introduced on the miner-side to inspect and manage the dagstore. Despite moving to a CARv2-backed system, the IPFS integration has been respected, and it continues to be possible to make storage deals with data held in an IPFS node, and to perform retrievals directly into an IPFS node. NOTE: because the "staging" and "client" Badger blockstores are no longer used, existing imports on the client will be rendered useless. On startup, Lotus will enumerate all imports and print WARN statements on the log for each import that needs to be reimported. These log lines contain these messages: - import lacks carv2 path; import will not work; please reimport - import has missing/broken carv2; please reimport At the end, we will print a "sanity check completed" message indicating the count of imports found, and how many were deemed broken. Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com> Co-authored-by: Raúl Kripalani <raul@protocol.ai> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
Type: "Duration",
Comment: `The time between calls to periodic dagstore GC, in time.Duration string
representation, e.g. 1m, 5m, 1h.
Default value: 1 minute.`,
},
},
2021-07-23 11:55:50 +00:00
"DealmakingConfig": []DocField{
{
Name: "ConsiderOnlineStorageDeals",
Type: "bool",
Comment: `When enabled, the miner can accept online deals`,
2021-07-23 11:55:50 +00:00
},
{
Name: "ConsiderOfflineStorageDeals",
Type: "bool",
Comment: `When enabled, the miner can accept offline deals`,
2021-07-23 11:55:50 +00:00
},
{
Name: "ConsiderOnlineRetrievalDeals",
Type: "bool",
Comment: `When enabled, the miner can accept retrieval deals`,
2021-07-23 11:55:50 +00:00
},
{
Name: "ConsiderOfflineRetrievalDeals",
Type: "bool",
Comment: `When enabled, the miner can accept offline retrieval deals`,
2021-07-23 11:55:50 +00:00
},
{
Name: "ConsiderVerifiedStorageDeals",
Type: "bool",
Comment: `When enabled, the miner can accept verified deals`,
2021-07-23 11:55:50 +00:00
},
{
Name: "ConsiderUnverifiedStorageDeals",
Type: "bool",
Comment: `When enabled, the miner can accept unverified deals`,
2021-07-23 11:55:50 +00:00
},
{
Name: "PieceCidBlocklist",
Type: "[]cid.Cid",
Comment: `A list of Data CIDs to reject when making deals`,
2021-07-23 11:55:50 +00:00
},
{
Name: "ExpectedSealDuration",
Type: "Duration",
Comment: `Maximum expected amount of time getting the deal into a sealed sector will take
This includes the time the deal will need to get transferred and published
before being assigned to a sector`,
2021-07-23 11:55:50 +00:00
},
{
Name: "MaxDealStartDelay",
Type: "Duration",
2021-07-23 11:55:50 +00:00
Comment: `Maximum amount of time proposed deal StartEpoch can be in future`,
},
{
Name: "PublishMsgPeriod",
Type: "Duration",
Comment: `When a deal is ready to publish, the amount of time to wait for more
deals to be ready to publish before publishing them all as a batch`,
2021-07-23 11:55:50 +00:00
},
{
Name: "MaxDealsPerPublishMsg",
Type: "uint64",
2021-07-23 11:55:50 +00:00
Comment: `The maximum number of deals to include in a single PublishStorageDeals
message`,
},
{
Name: "MaxProviderCollateralMultiplier",
Type: "uint64",
2021-07-23 11:55:50 +00:00
Comment: `The maximum collateral that the provider will put up against a deal,
as a multiplier of the minimum collateral bound`,
},
{
Name: "MaxStagingDealsBytes",
Type: "int64",
2021-09-06 13:52:25 +00:00
Comment: `The maximum allowed disk usage size in bytes of staging deals not yet
2021-09-06 15:07:19 +00:00
passed to the sealing node by the markets service. 0 is unlimited.`,
2021-07-23 11:55:50 +00:00
},
{
Name: "SimultaneousTransfersForStorage",
Type: "uint64",
Comment: `The maximum number of parallel online data transfers for storage deals`,
},
{
Name: "SimultaneousTransfersForStoragePerClient",
Type: "uint64",
Comment: `The maximum number of simultaneous data transfers from any single client
for storage deals.
Unset by default (0), and values higher than SimultaneousTransfersForStorage
will have no effect; i.e. the total number of simultaneous data transfers
across all storage clients is bound by SimultaneousTransfersForStorage
regardless of this number.`,
},
{
Name: "SimultaneousTransfersForRetrieval",
Type: "uint64",
Comment: `The maximum number of parallel online data transfers for retrieval deals`,
2021-07-23 11:55:50 +00:00
},
{
Name: "StartEpochSealingBuffer",
Type: "uint64",
Comment: `Minimum start epoch buffer to give time for sealing of sector with deal.`,
},
2021-07-23 11:55:50 +00:00
{
Name: "Filter",
Type: "string",
Comment: `A command used for fine-grained evaluation of storage deals
see https://lotus.filecoin.io/storage-providers/advanced-configurations/market/#using-filters-for-fine-grained-storage-and-retrieval-deal-acceptance for more details`,
2021-07-23 11:55:50 +00:00
},
{
Name: "RetrievalFilter",
Type: "string",
Comment: `A command used for fine-grained evaluation of retrieval deals
see https://lotus.filecoin.io/storage-providers/advanced-configurations/market/#using-filters-for-fine-grained-storage-and-retrieval-deal-acceptance for more details`,
2021-07-23 11:55:50 +00:00
},
{
Name: "RetrievalPricing",
Type: "*RetrievalPricing",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
},
"FeeConfig": []DocField{
{
Name: "DefaultMaxFee",
Type: "types.FIL",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
},
"FullNode": []DocField{
{
Name: "Client",
Type: "Client",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "Wallet",
Type: "Wallet",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "Fees",
Type: "FeeConfig",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "Chainstore",
Type: "Chainstore",
2022-09-13 17:05:48 +00:00
Comment: ``,
},
{
Name: "Raft",
Type: "UserRaftConfig",
2022-09-13 17:05:48 +00:00
2021-07-23 11:55:50 +00:00
Comment: ``,
},
},
"IndexProviderConfig": []DocField{
{
Name: "Enable",
Type: "bool",
Comment: `Enable set whether to enable indexing announcement to the network and expose endpoints that
2022-04-13 07:25:33 +00:00
allow indexer nodes to process announcements. Enabled by default.`,
},
{
Name: "EntriesCacheCapacity",
Type: "int",
Comment: `EntriesCacheCapacity sets the maximum capacity to use for caching the indexing advertisement
entries. Defaults to 1024 if not specified. The cache is evicted using LRU policy. The
maximum storage used by the cache is a factor of EntriesCacheCapacity, EntriesChunkSize and
the length of multihashes being advertised. For example, advertising 128-bit long multihashes
with the default EntriesCacheCapacity, and EntriesChunkSize means the cache size can grow to
256MiB when full.`,
},
{
Name: "EntriesChunkSize",
Type: "int",
Comment: `EntriesChunkSize sets the maximum number of multihashes to include in a single entries chunk.
Defaults to 16384 if not specified. Note that chunks are chained together for indexing
advertisements that include more multihashes than the configured EntriesChunkSize.`,
},
{
Name: "TopicName",
Type: "string",
Comment: `TopicName sets the topic name on which the changes to the advertised content are announced.
If not explicitly specified, the topic name is automatically inferred from the network name
in following format: '/indexer/ingest/<network-name>'
Defaults to empty, which implies the topic name is inferred from network name.`,
},
{
Name: "PurgeCacheOnStart",
Type: "bool",
Comment: `PurgeCacheOnStart sets whether to clear any cached entries chunks when the provider engine
starts. By default, the cache is rehydrated from previously cached entries stored in
datastore if any is present.`,
},
},
2021-07-23 11:55:50 +00:00
"Libp2p": []DocField{
{
Name: "ListenAddresses",
Type: "[]string",
Comment: `Binding address for the libp2p host - 0 means random port.
Format: multiaddress; see https://multiformats.io/multiaddr/`,
2021-07-23 11:55:50 +00:00
},
{
Name: "AnnounceAddresses",
Type: "[]string",
Comment: `Addresses to explicitally announce to other peers. If not specified,
all interface addresses are announced
Format: multiaddress`,
2021-07-23 11:55:50 +00:00
},
{
Name: "NoAnnounceAddresses",
Type: "[]string",
Comment: `Addresses to not announce
Format: multiaddress`,
2021-07-23 11:55:50 +00:00
},
{
Name: "BootstrapPeers",
Type: "[]string",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "ProtectedPeers",
Type: "[]string",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
2021-08-27 17:14:29 +00:00
{
Name: "DisableNatPortMap",
2021-08-27 17:14:29 +00:00
Type: "bool",
Comment: `When not disabled (default), lotus asks NAT devices (e.g., routers), to
open up an external port and forward it to the port lotus is running on.
When this works (i.e., when your router supports NAT port forwarding),
it makes the local lotus node accessible from the public internet`,
},
2021-07-23 11:55:50 +00:00
{
Name: "ConnMgrLow",
Type: "uint",
Comment: `ConnMgrLow is the number of connections that the basic connection manager
will trim down to.`,
2021-07-23 11:55:50 +00:00
},
{
Name: "ConnMgrHigh",
Type: "uint",
Comment: `ConnMgrHigh is the number of connections that, when exceeded, will trigger
a connection GC operation. Note: protected/recently formed connections don't
count towards this limit.`,
2021-07-23 11:55:50 +00:00
},
{
Name: "ConnMgrGrace",
Type: "Duration",
Comment: `ConnMgrGrace is a time duration that new connections are immune from being
closed by the connection manager.`,
2021-07-23 11:55:50 +00:00
},
},
"Logging": []DocField{
{
Name: "SubsystemLevels",
Type: "map[string]string",
Comment: `SubsystemLevels specify per-subsystem log levels`,
},
},
2021-07-23 11:55:50 +00:00
"MinerAddressConfig": []DocField{
{
Name: "PreCommitControl",
Type: "[]string",
Comment: `Addresses to send PreCommit messages from`,
2021-07-23 11:55:50 +00:00
},
{
Name: "CommitControl",
Type: "[]string",
Comment: `Addresses to send Commit messages from`,
2021-07-23 11:55:50 +00:00
},
{
Name: "TerminateControl",
Type: "[]string",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "DealPublishControl",
Type: "[]string",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "DisableOwnerFallback",
Type: "bool",
2021-07-23 11:55:50 +00:00
Comment: `DisableOwnerFallback disables usage of the owner address for messages
sent automatically`,
},
{
Name: "DisableWorkerFallback",
Type: "bool",
2021-07-23 11:55:50 +00:00
Comment: `DisableWorkerFallback disables usage of the worker address for messages
sent automatically, if control addresses are configured.
A control address that doesn't have enough funds will still be chosen
over the worker address if this flag is set.`,
},
},
"MinerFeeConfig": []DocField{
{
Name: "MaxPreCommitGasFee",
Type: "types.FIL",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "MaxCommitGasFee",
Type: "types.FIL",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "MaxPreCommitBatchGasFee",
Type: "BatchFeeConfig",
2021-07-23 11:55:50 +00:00
Comment: `maxBatchFee = maxBase + maxPerSector * nSectors`,
},
{
Name: "MaxCommitBatchGasFee",
Type: "BatchFeeConfig",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "MaxTerminateGasFee",
Type: "types.FIL",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "MaxWindowPoStGasFee",
Type: "types.FIL",
Comment: `WindowPoSt is a high-value operation, so the default fee should be high.`,
2021-07-23 11:55:50 +00:00
},
{
Name: "MaxPublishDealsFee",
Type: "types.FIL",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "MaxMarketBalanceAddFee",
Type: "types.FIL",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
},
"MinerSubsystemConfig": []DocField{
{
Name: "EnableMining",
Type: "bool",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "EnableSealing",
Type: "bool",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "EnableSectorStorage",
Type: "bool",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "EnableMarkets",
Type: "bool",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "SealerApiInfo",
Type: "string",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "SectorIndexApiInfo",
Type: "string",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
},
2022-03-29 01:19:11 +00:00
"ProvingConfig": []DocField{
{
Name: "ParallelCheckLimit",
2022-03-29 01:19:11 +00:00
Type: "int",
Comment: `Maximum number of sector checks to run in parallel. (0 = unlimited)
WARNING: Setting this value too high may make the node crash by running out of stack
WARNING: Setting this value too low may make sector challenge reading much slower, resulting in failed PoSt due
to late submission.
After changing this option, confirm that the new value works in your setup by invoking
'lotus-miner proving compute window-post 0'`,
},
{
Name: "DisableBuiltinWindowPoSt",
Type: "bool",
Comment: `Disable Window PoSt computation on the lotus-miner process even if no window PoSt workers are present.
WARNING: If no windowPoSt workers are connected, window PoSt WILL FAIL resulting in faulty sectors which will need
to be recovered. Before enabling this option, make sure your PoSt workers work correctly.
After changing this option, confirm that the new value works in your setup by invoking
'lotus-miner proving compute window-post 0'`,
},
{
Name: "DisableBuiltinWinningPoSt",
Type: "bool",
Comment: `Disable Winning PoSt computation on the lotus-miner process even if no winning PoSt workers are present.
WARNING: If no WinningPoSt workers are connected, Winning PoSt WILL FAIL resulting in lost block rewards.
Before enabling this option, make sure your PoSt workers work correctly.`,
2022-03-29 01:19:11 +00:00
},
{
Name: "DisableWDPoStPreChecks",
Type: "bool",
Comment: `Disable WindowPoSt provable sector readability checks.
In normal operation, when preparing to compute WindowPoSt, lotus-miner will perform a round of reading challenges
from all sectors to confirm that those sectors can be proven. Challenges read in this process are discarded, as
2022-07-04 15:17:00 +00:00
we're only interested in checking that sector data can be read.
When using builtin proof computation (no PoSt workers, and DisableBuiltinWindowPoSt is set to false), this process
can save a lot of time and compute resources in the case that some sectors are not readable - this is caused by
the builtin logic not skipping snark computation when some sectors need to be skipped.
When using PoSt workers, this process is mostly redundant, with PoSt workers challenges will be read once, and
if challenges for some sectors aren't readable, those sectors will just get skipped.
2022-07-04 15:17:00 +00:00
Disabling sector pre-checks will slightly reduce IO load when proving sectors, possibly resulting in shorter
time to produce window PoSt. In setups with good IO capabilities the effect of this option on proving time should
be negligible.
NOTE: It likely is a bad idea to disable sector pre-checks in setups with no PoSt workers.
NOTE: Even when this option is enabled, recovering sectors will be checked before recovery declaration message is
sent to the chain
After changing this option, confirm that the new value works in your setup by invoking
'lotus-miner proving compute window-post 0'`,
},
{
Name: "MaxPartitionsPerPoStMessage",
Type: "int",
Comment: `Maximum number of partitions to prove in a single SubmitWindowPoSt messace. 0 = network limit (10 in nv16)
A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors.
The maximum number of sectors which can be proven in a single PoSt message is 25000 in network version 16, which
means that a single message can prove at most 10 partinions
In some cases when submitting PoSt messages which are recovering sectors, the default network limit may still be
too high to fit in the block gas limit; In those cases it may be necessary to set this value to something lower
than 10; Note that setting this value lower may result in less efficient gas use - more messages will be sent,
to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
Setting this value above the network limit has no effect`,
},
{
Name: "MaxPartitionsPerRecoveryMessage",
Type: "int",
Comment: `In some cases when submitting DeclareFaultsRecovered messages,
there may be too many recoveries to fit in a BlockGasLimit.
In those cases it may be necessary to set this value to something low (eg 1);
Note that setting this value lower may result in less efficient gas use - more messages will be sent than needed,
resulting in more total gas use (but each message will have lower gas limit)`,
},
2022-03-29 01:19:11 +00:00
},
2021-07-23 11:55:50 +00:00
"Pubsub": []DocField{
{
Name: "Bootstrapper",
Type: "bool",
Comment: `Run the node in bootstrap-node mode`,
2021-07-23 11:55:50 +00:00
},
{
Name: "DirectPeers",
Type: "[]string",
Comment: `DirectPeers specifies peers with direct peering agreements. These peers are
connected outside of the mesh, with all (valid) message unconditionally
forwarded to them. The router will maintain open connections to these peers.
Note that the peering agreement should be reciprocal with direct peers
symmetrically configured at both ends.
Type: Array of multiaddress peerinfo strings, must include peerid (/p2p/12D3K...`,
2021-07-23 11:55:50 +00:00
},
{
Name: "IPColocationWhitelist",
Type: "[]string",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "RemoteTracer",
Type: "string",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
},
"RetrievalPricing": []DocField{
{
Name: "Strategy",
Type: "string",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "Default",
Type: "*RetrievalPricingDefault",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "External",
Type: "*RetrievalPricingExternal",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
},
"RetrievalPricingDefault": []DocField{
{
Name: "VerifiedDealsFreeTransfer",
Type: "bool",
2021-07-23 11:55:50 +00:00
Comment: `VerifiedDealsFreeTransfer configures zero fees for data transfer for a retrieval deal
of a payloadCid that belongs to a verified storage deal.
This parameter is ONLY applicable if the retrieval pricing policy strategy has been configured to "default".
default value is true`,
},
},
"RetrievalPricingExternal": []DocField{
{
Name: "Path",
Type: "string",
2021-07-23 11:55:50 +00:00
Comment: `Path of the external script that will be run to price a retrieval deal.
This parameter is ONLY applicable if the retrieval pricing policy strategy has been configured to "external".`,
},
},
2022-03-29 01:19:11 +00:00
"SealerConfig": []DocField{
{
Name: "ParallelFetchLimit",
2022-03-29 01:19:11 +00:00
Type: "int",
Comment: ``,
},
{
2022-09-16 21:45:23 +00:00
Name: "AllowSectorDownload",
2022-03-29 01:19:11 +00:00
Type: "bool",
Comment: ``,
2022-03-29 01:19:11 +00:00
},
{
Name: "AllowAddPiece",
2022-03-29 01:19:11 +00:00
Type: "bool",
Comment: ``,
},
{
Name: "AllowPreCommit1",
2022-03-29 01:19:11 +00:00
Type: "bool",
Comment: ``,
},
{
Name: "AllowPreCommit2",
2022-03-29 01:19:11 +00:00
Type: "bool",
Comment: ``,
},
{
Name: "AllowCommit",
2022-03-29 01:19:11 +00:00
Type: "bool",
Comment: ``,
},
{
Name: "AllowUnseal",
2022-03-29 01:19:11 +00:00
Type: "bool",
Comment: ``,
},
{
Name: "AllowReplicaUpdate",
2022-03-29 01:19:11 +00:00
Type: "bool",
Comment: ``,
},
{
Name: "AllowProveReplicaUpdate2",
2022-03-29 01:19:11 +00:00
Type: "bool",
Comment: ``,
},
{
Name: "AllowRegenSectorKey",
2022-03-29 01:19:11 +00:00
Type: "bool",
Comment: ``,
},
{
Name: "LocalWorkerName",
Type: "string",
Comment: `LocalWorkerName specifies a custom name for the builtin worker.
If set to an empty string (default) os hostname will be used`,
},
2022-05-23 15:32:54 +00:00
{
Name: "Assigner",
2022-05-23 15:32:54 +00:00
Type: "string",
Comment: `Assigner specifies the worker assigner to use when scheduling tasks.
"utilization" (default) - assign tasks to workers with lowest utilization.
"spread" - assign tasks to as many distinct workers as possible.`,
2022-05-23 21:53:25 +00:00
},
{
Name: "DisallowRemoteFinalize",
2022-05-23 21:53:25 +00:00
Type: "bool",
2022-05-23 23:33:56 +00:00
Comment: `DisallowRemoteFinalize when set to true will force all Finalize tasks to
run on workers with local access to both long-term storage and the sealing
path containing the sector.
--
WARNING: Only set this if all workers have access to long-term storage
paths. If this flag is enabled, and there are workers without long-term
storage access, sectors will not be moved from them, and Finalize tasks
will appear to be stuck.
--
If you see stuck Finalize tasks after enabling this setting, check
2022-05-23 21:53:25 +00:00
'lotus-miner sealing sched-diag' and 'lotus-miner storage find [sector num]'`,
2022-05-23 15:32:54 +00:00
},
2022-03-29 01:19:11 +00:00
{
Name: "ResourceFiltering",
2022-06-14 18:08:10 +00:00
Type: "sealer.ResourceFilteringStrategy",
2022-03-29 01:19:11 +00:00
Comment: `ResourceFiltering instructs the system which resource filtering strategy
to use when evaluating tasks against this worker. An empty value defaults
to "hardware".`,
},
},
2021-07-23 11:55:50 +00:00
"SealingConfig": []DocField{
{
Name: "MaxWaitDealsSectors",
Type: "uint64",
Comment: `Upper bound on how many sectors can be waiting for more deals to be packed in it before it begins sealing at any given time.
If the miner is accepting multiple deals in parallel, up to MaxWaitDealsSectors of new sectors will be created.
If more than MaxWaitDealsSectors deals are accepted in parallel, only MaxWaitDealsSectors deals will be processed in parallel
Note that setting this number too high in relation to deal ingestion rate may result in poor sector packing efficiency
0 = no limit`,
2021-07-23 11:55:50 +00:00
},
{
Name: "MaxSealingSectors",
Type: "uint64",
2022-03-17 20:12:42 +00:00
Comment: `Upper bound on how many sectors can be sealing+upgrading at the same time when creating new CC sectors (0 = unlimited)`,
2021-07-23 11:55:50 +00:00
},
{
Name: "MaxSealingSectorsForDeals",
Type: "uint64",
2022-03-17 20:12:42 +00:00
Comment: `Upper bound on how many sectors can be sealing+upgrading at the same time when creating new sectors with deals (0 = unlimited)`,
},
{
Name: "PreferNewSectorsForDeals",
2022-03-17 20:12:42 +00:00
Type: "bool",
Comment: `Prefer creating new sectors even if there are sectors Available for upgrading.
This setting combined with MaxUpgradingSectors set to a value higher than MaxSealingSectorsForDeals makes it
2022-03-17 20:12:42 +00:00
possible to use fast sector upgrades to handle high volumes of storage deals, while still using the simple sealing
flow when the volume of storage deals is lower.`,
},
{
Name: "MaxUpgradingSectors",
2022-03-17 20:12:42 +00:00
Type: "uint64",
Comment: `Upper bound on how many sectors can be sealing+upgrading at the same time when upgrading CC sectors with deals (0 = MaxSealingSectorsForDeals)`,
2021-07-23 11:55:50 +00:00
},
2021-07-24 01:05:50 +00:00
{
Name: "MinUpgradeSectorExpiration",
Type: "uint64",
Comment: `When set to a non-zero value, minimum number of epochs until sector expiration required for sectors to be considered
for upgrades (0 = DealMinDuration = 180 days = 518400 epochs)
Note that if all deals waiting in the input queue have lifetimes longer than this value, upgrade sectors will be
required to have expiration of at least the soonest-ending deal`,
},
{
Name: "MinTargetUpgradeSectorExpiration",
Type: "uint64",
Comment: `When set to a non-zero value, minimum number of epochs until sector expiration above which upgrade candidates will
be selected based on lowest initial pledge.
Target sector expiration is calculated by looking at the input deal queue, sorting it by deal expiration, and
selecting N deals from the queue up to sector size. The target expiration will be Nth deal end epoch, or in case
where there weren't enough deals to fill a sector, DealMaxDuration (540 days = 1555200 epochs)
Setting this to a high value (for example to maximum deal duration - 1555200) will disable selection based on
initial pledge - upgrade sectors will always be chosen based on longest expiration`,
},
2021-07-24 01:05:50 +00:00
{
Name: "CommittedCapacitySectorLifetime",
2021-07-24 01:05:50 +00:00
Type: "Duration",
Comment: `CommittedCapacitySectorLifetime is the duration a Committed Capacity (CC) sector will
live before it must be extended or converted into sector containing deals before it is
terminated. Value must be between 180-540 days inclusive`,
},
2021-07-23 11:55:50 +00:00
{
Name: "WaitDealsDelay",
Type: "Duration",
Comment: `Period of time that a newly created sector will wait for more deals to be packed in to before it starts to seal.
Sectors which are fully filled will start sealing immediately`,
2021-07-23 11:55:50 +00:00
},
{
Name: "AlwaysKeepUnsealedCopy",
Type: "bool",
Comment: `Whether to keep unsealed copies of deal data regardless of whether the client requested that. This lets the miner
avoid the relatively high cost of unsealing the data later, at the cost of more storage space`,
2021-07-23 11:55:50 +00:00
},
{
Name: "FinalizeEarly",
Type: "bool",
2021-07-23 11:55:50 +00:00
Comment: `Run sector finalization before submitting sector proof to the chain`,
},
{
Name: "MakeNewSectorForDeals",
Type: "bool",
Comment: `Whether new sectors are created to pack incoming deals
When this is set to false no new sectors will be created for sealing incoming deals
This is useful for forcing all deals to be assigned as snap deals to sectors marked for upgrade`,
},
{
Name: "MakeCCSectorsAvailable",
Type: "bool",
Comment: `After sealing CC sectors, make them available for upgrading with deals`,
},
2021-07-23 11:55:50 +00:00
{
Name: "CollateralFromMinerBalance",
Type: "bool",
2021-07-23 11:55:50 +00:00
Comment: `Whether to use available miner balance for sector collateral instead of sending it with each message`,
},
{
Name: "AvailableBalanceBuffer",
Type: "types.FIL",
2021-07-23 11:55:50 +00:00
Comment: `Minimum available balance to keep in the miner actor before sending it with messages`,
},
{
Name: "DisableCollateralFallback",
Type: "bool",
2021-07-23 11:55:50 +00:00
Comment: `Don't send collateral with messages even if there is no available balance in the miner actor`,
},
{
Name: "BatchPreCommits",
Type: "bool",
2021-07-23 11:55:50 +00:00
Comment: `enable / disable precommit batching (takes effect after nv13)`,
},
{
Name: "MaxPreCommitBatch",
Type: "int",
2021-07-23 11:55:50 +00:00
Comment: `maximum precommit batch size - batches will be sent immediately above this size`,
},
{
Name: "PreCommitBatchWait",
Type: "Duration",
2021-07-23 11:55:50 +00:00
Comment: `how long to wait before submitting a batch after crossing the minimum batch size`,
},
{
Name: "PreCommitBatchSlack",
Type: "Duration",
2021-07-23 11:55:50 +00:00
Comment: `time buffer for forceful batch submission before sectors/deal in batch would start expiring`,
},
{
Name: "AggregateCommits",
Type: "bool",
2021-07-23 11:55:50 +00:00
Comment: `enable / disable commit aggregation (takes effect after nv13)`,
},
{
Name: "MinCommitBatch",
Type: "int",
Comment: `minimum batched commit size - batches above this size will eventually be sent on a timeout`,
2021-07-23 11:55:50 +00:00
},
{
Name: "MaxCommitBatch",
Type: "int",
Comment: `maximum batched commit size - batches will be sent immediately above this size`,
2021-07-23 11:55:50 +00:00
},
{
Name: "CommitBatchWait",
Type: "Duration",
2021-07-23 11:55:50 +00:00
Comment: `how long to wait before submitting a batch after crossing the minimum batch size`,
},
{
Name: "CommitBatchSlack",
Type: "Duration",
2021-07-23 11:55:50 +00:00
Comment: `time buffer for forceful batch submission before sectors/deals in batch would start expiring`,
},
{
Name: "BatchPreCommitAboveBaseFee",
Type: "types.FIL",
Comment: `network BaseFee below which to stop doing precommit batching, instead
sending precommit messages to the chain individually`,
},
2021-07-23 11:55:50 +00:00
{
Name: "AggregateAboveBaseFee",
Type: "types.FIL",
2021-07-23 11:55:50 +00:00
Comment: `network BaseFee below which to stop doing commit aggregation, instead
submitting proofs to the chain individually`,
},
{
Name: "TerminateBatchMax",
Type: "uint64",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "TerminateBatchMin",
Type: "uint64",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "TerminateBatchWait",
Type: "Duration",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
},
"Splitstore": []DocField{
{
Name: "ColdStoreType",
Type: "string",
2021-07-24 06:00:00 +00:00
Comment: `ColdStoreType specifies the type of the coldstore.
It can be "universal" (default) or "discard" for discarding cold blocks.`,
2021-07-23 11:55:50 +00:00
},
{
Name: "HotStoreType",
Type: "string",
2021-07-24 06:00:00 +00:00
Comment: `HotStoreType specifies the type of the hotstore.
Only currently supported value is "badger".`,
2021-07-23 11:55:50 +00:00
},
{
Name: "MarkSetType",
Type: "string",
2021-07-24 06:00:00 +00:00
Comment: `MarkSetType specifies the type of the markset.
2022-02-06 10:28:21 +00:00
It can be "map" for in memory marking or "badger" (default) for on-disk marking.`,
2021-07-23 11:55:50 +00:00
},
{
Name: "HotStoreMessageRetention",
Type: "uint64",
2021-07-24 06:00:00 +00:00
Comment: `HotStoreMessageRetention specifies the retention policy for messages, in finalities beyond
the compaction boundary; default is 0.`,
},
{
Name: "HotStoreFullGCFrequency",
2021-07-24 06:00:00 +00:00
Type: "uint64",
2021-07-27 09:13:26 +00:00
Comment: `HotStoreFullGCFrequency specifies how often to perform a full (moving) GC on the hotstore.
A value of 0 disables, while a value 1 will do full GC in every compaction.
2021-07-24 06:00:00 +00:00
Default is 20 (about once a week).`,
2021-07-23 11:55:50 +00:00
},
{
Name: "EnableColdStoreAutoPrune",
Type: "bool",
Comment: `EnableColdStoreAutoPrune turns on compaction of the cold store i.e. pruning
where hotstore compaction occurs every finality epochs pruning happens every 3 finalities
Default is false`,
},
{
Name: "ColdStoreFullGCFrequency",
Type: "uint64",
Comment: `ColdStoreFullGCFrequency specifies how often to performa a full (moving) GC on the coldstore.
Only applies if auto prune is enabled. A value of 0 disables while a value of 1 will do
full GC in every prune.
Default is 7 (about once every a week)`,
},
{
Name: "ColdStoreRetention",
Type: "int64",
Comment: `ColdStoreRetention specifies the retention policy for data reachable from the chain, in
finalities beyond the compaction boundary, default is 0, -1 retains everything`,
},
2021-07-23 11:55:50 +00:00
},
"StorageMiner": []DocField{
{
Name: "Subsystems",
Type: "MinerSubsystemConfig",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "Dealmaking",
Type: "DealmakingConfig",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
2021-11-17 11:02:11 +00:00
{
Name: "IndexProvider",
Type: "IndexProviderConfig",
2021-11-17 11:02:11 +00:00
Comment: ``,
},
2022-03-29 01:19:11 +00:00
{
Name: "Proving",
2022-03-29 01:19:11 +00:00
Type: "ProvingConfig",
Comment: ``,
},
2021-07-23 11:55:50 +00:00
{
Name: "Sealing",
Type: "SealingConfig",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "Storage",
2022-03-29 01:19:11 +00:00
Type: "SealerConfig",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "Fees",
Type: "MinerFeeConfig",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "Addresses",
Type: "MinerAddressConfig",
integrate DAG store and CARv2 in deal-making (#6671) This commit removes badger from the deal-making processes, and moves to a new architecture with the dagstore as the cental component on the miner-side, and CARv2s on the client-side. Every deal that has been handed off to the sealing subsystem becomes a shard in the dagstore. Shards are mounted via the LotusMount, which teaches the dagstore how to load the related piece when serving retrievals. When the miner starts the Lotus for the first time with this patch, we will perform a one-time migration of all active deals into the dagstore. This is a lightweight process, and it consists simply of registering the shards in the dagstore. Shards are backed by the unsealed copy of the piece. This is currently a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so when it's time to acquire a shard to serve a retrieval, the unsealed CARv1 is joined with its index (safeguarded by the dagstore), to form a read-only blockstore, thus taking the place of the monolithic badger. Data transfers have been adjusted to interface directly with CARv2 files. On inbound transfers (client retrievals, miner storage deals), we stream the received data into a CARv2 ReadWrite blockstore. On outbound transfers (client storage deals, miner retrievals), we serve the data off a CARv2 ReadOnly blockstore. Client-side imports are managed by the refactored *imports.Manager component (when not using IPFS integration). Just like it before, we use the go-filestore library to avoid duplicating the data from the original file in the resulting UnixFS DAG (concretely the leaves). However, the target of those imports are what we call "ref-CARv2s": CARv2 files placed under the `$LOTUS_PATH/imports` directory, containing the intermediate nodes in full, and the leaves as positional references to the original file on disk. Client-side retrievals are placed into CARv2 files in the location: `$LOTUS_PATH/retrievals`. A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore` subcommands have been introduced on the miner-side to inspect and manage the dagstore. Despite moving to a CARv2-backed system, the IPFS integration has been respected, and it continues to be possible to make storage deals with data held in an IPFS node, and to perform retrievals directly into an IPFS node. NOTE: because the "staging" and "client" Badger blockstores are no longer used, existing imports on the client will be rendered useless. On startup, Lotus will enumerate all imports and print WARN statements on the log for each import that needs to be reimported. These log lines contain these messages: - import lacks carv2 path; import will not work; please reimport - import has missing/broken carv2; please reimport At the end, we will print a "sanity check completed" message indicating the count of imports found, and how many were deemed broken. Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com> Co-authored-by: Raúl Kripalani <raul@protocol.ai> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
Comment: ``,
},
{
Name: "DAGStore",
integrate DAG store and CARv2 in deal-making (#6671) This commit removes badger from the deal-making processes, and moves to a new architecture with the dagstore as the cental component on the miner-side, and CARv2s on the client-side. Every deal that has been handed off to the sealing subsystem becomes a shard in the dagstore. Shards are mounted via the LotusMount, which teaches the dagstore how to load the related piece when serving retrievals. When the miner starts the Lotus for the first time with this patch, we will perform a one-time migration of all active deals into the dagstore. This is a lightweight process, and it consists simply of registering the shards in the dagstore. Shards are backed by the unsealed copy of the piece. This is currently a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so when it's time to acquire a shard to serve a retrieval, the unsealed CARv1 is joined with its index (safeguarded by the dagstore), to form a read-only blockstore, thus taking the place of the monolithic badger. Data transfers have been adjusted to interface directly with CARv2 files. On inbound transfers (client retrievals, miner storage deals), we stream the received data into a CARv2 ReadWrite blockstore. On outbound transfers (client storage deals, miner retrievals), we serve the data off a CARv2 ReadOnly blockstore. Client-side imports are managed by the refactored *imports.Manager component (when not using IPFS integration). Just like it before, we use the go-filestore library to avoid duplicating the data from the original file in the resulting UnixFS DAG (concretely the leaves). However, the target of those imports are what we call "ref-CARv2s": CARv2 files placed under the `$LOTUS_PATH/imports` directory, containing the intermediate nodes in full, and the leaves as positional references to the original file on disk. Client-side retrievals are placed into CARv2 files in the location: `$LOTUS_PATH/retrievals`. A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore` subcommands have been introduced on the miner-side to inspect and manage the dagstore. Despite moving to a CARv2-backed system, the IPFS integration has been respected, and it continues to be possible to make storage deals with data held in an IPFS node, and to perform retrievals directly into an IPFS node. NOTE: because the "staging" and "client" Badger blockstores are no longer used, existing imports on the client will be rendered useless. On startup, Lotus will enumerate all imports and print WARN statements on the log for each import that needs to be reimported. These log lines contain these messages: - import lacks carv2 path; import will not work; please reimport - import has missing/broken carv2; please reimport At the end, we will print a "sanity check completed" message indicating the count of imports found, and how many were deemed broken. Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com> Co-authored-by: Raúl Kripalani <raul@protocol.ai> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
Type: "DAGStoreConfig",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
},
"UserRaftConfig": []DocField{
{
Name: "ClusterModeEnabled",
Type: "bool",
Comment: `config to enabled node cluster with raft consensus`,
},
{
Name: "HostShutdown",
Type: "bool",
Comment: `will shutdown libp2p host on shutdown. Useful for testing`,
},
{
Name: "DataFolder",
Type: "string",
Comment: `A folder to store Raft's data.`,
},
{
Name: "InitPeerset",
Type: "[]peer.ID",
Comment: `InitPeerset provides the list of initial cluster peers for new Raft
peers (with no prior state). It is ignored when Raft was already
initialized or when starting in staging mode.`,
},
{
Name: "WaitForLeaderTimeout",
Type: "Duration",
Comment: `LeaderTimeout specifies how long to wait for a leader before
failing an operation.`,
},
{
Name: "NetworkTimeout",
Type: "Duration",
Comment: `NetworkTimeout specifies how long before a Raft network
operation is timed out`,
},
{
Name: "CommitRetries",
Type: "int",
Comment: `CommitRetries specifies how many times we retry a failed commit until
we give up.`,
},
{
Name: "CommitRetryDelay",
Type: "Duration",
Comment: `How long to wait between retries`,
},
{
Name: "BackupsRotate",
Type: "int",
Comment: `BackupsRotate specifies the maximum number of Raft's DataFolder
copies that we keep as backups (renaming) after cleanup.`,
},
{
Name: "DatastoreNamespace",
Type: "string",
Comment: `Namespace to use when writing keys to the datastore`,
},
{
Name: "Tracing",
Type: "bool",
Comment: `Tracing enables propagation of contexts across binary boundaries.`,
},
},
2021-07-23 11:55:50 +00:00
"Wallet": []DocField{
{
Name: "RemoteBackend",
Type: "string",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "EnableLedger",
Type: "bool",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
{
Name: "DisableLocal",
Type: "bool",
2021-07-23 11:55:50 +00:00
Comment: ``,
},
},
}