2021-09-30 16:23:53 +00:00
[ API ]
# Binding address for the Lotus API
#
# type: string
# env var: LOTUS_API_LISTENADDRESS
#ListenAddress = "/ip4/127.0.0.1/tcp/2345/http"
# type: string
# env var: LOTUS_API_REMOTELISTENADDRESS
#RemoteListenAddress = "127.0.0.1:2345"
# type: Duration
# env var: LOTUS_API_TIMEOUT
#Timeout = "30s"
[ Backup ]
2022-07-01 20:21:19 +00:00
# When set to true disables metadata log (.lotus/kvlog). This can save disk
# space by reducing metadata redundancy.
#
2021-09-30 16:23:53 +00:00
# Note that in case of metadata corruption it might be much harder to recover
# your node if metadata log is disabled
#
# type: bool
# env var: LOTUS_BACKUP_DISABLEMETADATALOG
2022-04-12 21:15:25 +00:00
#DisableMetadataLog = true
2021-09-30 16:23:53 +00:00
2022-03-10 10:58:31 +00:00
[ Logging ]
[ Logging . SubsystemLevels ]
# env var: LOTUS_LOGGING_SUBSYSTEMLEVELS_EXAMPLE-SUBSYSTEM
#example-subsystem = "INFO"
2021-09-30 16:23:53 +00:00
[ Libp2p ]
# Binding address for the libp2p host - 0 means random port.
# Format: multiaddress; see https://multiformats.io/multiaddr/
#
# type: []string
# env var: LOTUS_LIBP2P_LISTENADDRESSES
2023-05-10 20:28:09 +00:00
#ListenAddresses = ["/ip4/0.0.0.0/tcp/0", "/ip6/::/tcp/0", "/ip4/0.0.0.0/udp/0/quic-v1", "/ip6/::/udp/0/quic-v1", "/ip4/0.0.0.0/udp/0/quic-v1/webtransport", "/ip6/::/udp/0/quic-v1/webtransport"]
2021-09-30 16:23:53 +00:00
# Addresses to explicitally announce to other peers. If not specified,
# all interface addresses are announced
# Format: multiaddress
#
# type: []string
# env var: LOTUS_LIBP2P_ANNOUNCEADDRESSES
#AnnounceAddresses = []
# Addresses to not announce
# Format: multiaddress
#
# type: []string
# env var: LOTUS_LIBP2P_NOANNOUNCEADDRESSES
#NoAnnounceAddresses = []
# When not disabled (default), lotus asks NAT devices (e.g., routers), to
# open up an external port and forward it to the port lotus is running on.
# When this works (i.e., when your router supports NAT port forwarding),
# it makes the local lotus node accessible from the public internet
#
# type: bool
# env var: LOTUS_LIBP2P_DISABLENATPORTMAP
#DisableNatPortMap = false
# ConnMgrLow is the number of connections that the basic connection manager
# will trim down to.
#
# type: uint
# env var: LOTUS_LIBP2P_CONNMGRLOW
#ConnMgrLow = 150
# ConnMgrHigh is the number of connections that, when exceeded, will trigger
# a connection GC operation. Note: protected/recently formed connections don't
# count towards this limit.
#
# type: uint
# env var: LOTUS_LIBP2P_CONNMGRHIGH
#ConnMgrHigh = 180
# ConnMgrGrace is a time duration that new connections are immune from being
# closed by the connection manager.
#
# type: Duration
# env var: LOTUS_LIBP2P_CONNMGRGRACE
#ConnMgrGrace = "20s"
[ Pubsub ]
# Run the node in bootstrap-node mode
#
# type: bool
# env var: LOTUS_PUBSUB_BOOTSTRAPPER
#Bootstrapper = false
# type: string
# env var: LOTUS_PUBSUB_REMOTETRACER
#RemoteTracer = ""
2021-11-10 13:11:15 +00:00
# Path to file that will be used to output tracer content in JSON format.
# If present tracer will save data to defined file.
# Format: file path
#
# type: string
# env var: LOTUS_PUBSUB_JSONTRACER
#JsonTracer = ""
# Connection string for elasticsearch instance.
# If present tracer will save data to elasticsearch.
# Format: https://<username>:<password>@<elasticsearch_url>:<port>/
#
# type: string
# env var: LOTUS_PUBSUB_ELASTICSEARCHTRACER
#ElasticSearchTracer = ""
# Name of elasticsearch index that will be used to save tracer data.
# This property is used only if ElasticSearchTracer propery is set.
#
# type: string
# env var: LOTUS_PUBSUB_ELASTICSEARCHINDEX
#ElasticSearchIndex = ""
# Auth token that will be passed with logs to elasticsearch - used for weighted peers score.
#
# type: string
# env var: LOTUS_PUBSUB_TRACERSOURCEAUTH
#TracerSourceAuth = ""
2021-09-30 16:23:53 +00:00
[ Subsystems ]
# type: bool
# env var: LOTUS_SUBSYSTEMS_ENABLEMINING
#EnableMining = true
# type: bool
# env var: LOTUS_SUBSYSTEMS_ENABLESEALING
#EnableSealing = true
# type: bool
# env var: LOTUS_SUBSYSTEMS_ENABLESECTORSTORAGE
#EnableSectorStorage = true
# type: bool
# env var: LOTUS_SUBSYSTEMS_ENABLEMARKETS
2023-05-03 20:42:23 +00:00
#EnableMarkets = false
2021-09-30 16:23:53 +00:00
# type: string
# env var: LOTUS_SUBSYSTEMS_SEALERAPIINFO
#SealerApiInfo = ""
# type: string
# env var: LOTUS_SUBSYSTEMS_SECTORINDEXAPIINFO
#SectorIndexApiInfo = ""
[ Dealmaking ]
# When enabled, the miner can accept online deals
#
# type: bool
# env var: LOTUS_DEALMAKING_CONSIDERONLINESTORAGEDEALS
#ConsiderOnlineStorageDeals = true
# When enabled, the miner can accept offline deals
#
# type: bool
# env var: LOTUS_DEALMAKING_CONSIDEROFFLINESTORAGEDEALS
#ConsiderOfflineStorageDeals = true
# When enabled, the miner can accept retrieval deals
#
# type: bool
# env var: LOTUS_DEALMAKING_CONSIDERONLINERETRIEVALDEALS
#ConsiderOnlineRetrievalDeals = true
# When enabled, the miner can accept offline retrieval deals
#
# type: bool
# env var: LOTUS_DEALMAKING_CONSIDEROFFLINERETRIEVALDEALS
#ConsiderOfflineRetrievalDeals = true
# When enabled, the miner can accept verified deals
#
# type: bool
# env var: LOTUS_DEALMAKING_CONSIDERVERIFIEDSTORAGEDEALS
#ConsiderVerifiedStorageDeals = true
# When enabled, the miner can accept unverified deals
#
# type: bool
# env var: LOTUS_DEALMAKING_CONSIDERUNVERIFIEDSTORAGEDEALS
#ConsiderUnverifiedStorageDeals = true
# A list of Data CIDs to reject when making deals
#
# type: []cid.Cid
# env var: LOTUS_DEALMAKING_PIECECIDBLOCKLIST
#PieceCidBlocklist = []
# Maximum expected amount of time getting the deal into a sealed sector will take
# This includes the time the deal will need to get transferred and published
# before being assigned to a sector
#
# type: Duration
# env var: LOTUS_DEALMAKING_EXPECTEDSEALDURATION
#ExpectedSealDuration = "24h0m0s"
# Maximum amount of time proposed deal StartEpoch can be in future
#
# type: Duration
# env var: LOTUS_DEALMAKING_MAXDEALSTARTDELAY
#MaxDealStartDelay = "336h0m0s"
# When a deal is ready to publish, the amount of time to wait for more
# deals to be ready to publish before publishing them all as a batch
#
# type: Duration
# env var: LOTUS_DEALMAKING_PUBLISHMSGPERIOD
#PublishMsgPeriod = "1h0m0s"
# The maximum number of deals to include in a single PublishStorageDeals
# message
#
# type: uint64
# env var: LOTUS_DEALMAKING_MAXDEALSPERPUBLISHMSG
#MaxDealsPerPublishMsg = 8
# The maximum collateral that the provider will put up against a deal,
# as a multiplier of the minimum collateral bound
#
# type: uint64
# env var: LOTUS_DEALMAKING_MAXPROVIDERCOLLATERALMULTIPLIER
#MaxProviderCollateralMultiplier = 2
# The maximum allowed disk usage size in bytes of staging deals not yet
# passed to the sealing node by the markets service. 0 is unlimited.
#
# type: int64
# env var: LOTUS_DEALMAKING_MAXSTAGINGDEALSBYTES
#MaxStagingDealsBytes = 0
2021-10-05 03:23:49 +00:00
# The maximum number of parallel online data transfers for storage deals
2021-09-30 16:23:53 +00:00
#
# type: uint64
2021-10-05 03:23:49 +00:00
# env var: LOTUS_DEALMAKING_SIMULTANEOUSTRANSFERSFORSTORAGE
#SimultaneousTransfersForStorage = 20
2021-10-28 11:39:57 +00:00
# The maximum number of simultaneous data transfers from any single client
# for storage deals.
# Unset by default (0), and values higher than SimultaneousTransfersForStorage
# will have no effect; i.e. the total number of simultaneous data transfers
# across all storage clients is bound by SimultaneousTransfersForStorage
# regardless of this number.
#
# type: uint64
# env var: LOTUS_DEALMAKING_SIMULTANEOUSTRANSFERSFORSTORAGEPERCLIENT
#SimultaneousTransfersForStoragePerClient = 0
2021-10-05 03:23:49 +00:00
# The maximum number of parallel online data transfers for retrieval deals
#
# type: uint64
# env var: LOTUS_DEALMAKING_SIMULTANEOUSTRANSFERSFORRETRIEVAL
#SimultaneousTransfersForRetrieval = 20
2021-09-30 16:23:53 +00:00
2021-09-30 12:35:23 +00:00
# Minimum start epoch buffer to give time for sealing of sector with deal.
#
# type: uint64
# env var: LOTUS_DEALMAKING_STARTEPOCHSEALINGBUFFER
#StartEpochSealingBuffer = 480
2021-09-30 16:23:53 +00:00
# A command used for fine-grained evaluation of storage deals
2022-09-13 23:10:22 +00:00
# see https://lotus.filecoin.io/storage-providers/advanced-configurations/market/#using-filters-for-fine-grained-storage-and-retrieval-deal-acceptance for more details
2021-09-30 16:23:53 +00:00
#
# type: string
# env var: LOTUS_DEALMAKING_FILTER
#Filter = ""
# A command used for fine-grained evaluation of retrieval deals
2022-09-13 23:10:22 +00:00
# see https://lotus.filecoin.io/storage-providers/advanced-configurations/market/#using-filters-for-fine-grained-storage-and-retrieval-deal-acceptance for more details
2021-09-30 16:23:53 +00:00
#
# type: string
# env var: LOTUS_DEALMAKING_RETRIEVALFILTER
#RetrievalFilter = ""
[ Dealmaking . RetrievalPricing ]
# env var: LOTUS_DEALMAKING_RETRIEVALPRICING_STRATEGY
#Strategy = "default"
[ Dealmaking . RetrievalPricing . Default ]
# env var: LOTUS_DEALMAKING_RETRIEVALPRICING_DEFAULT_VERIFIEDDEALSFREETRANSFER
#VerifiedDealsFreeTransfer = true
[ Dealmaking . RetrievalPricing . External ]
# env var: LOTUS_DEALMAKING_RETRIEVALPRICING_EXTERNAL_PATH
#Path = ""
2022-02-03 11:51:01 +00:00
[ IndexProvider ]
2022-03-02 13:45:09 +00:00
# Enable set whether to enable indexing announcement to the network and expose endpoints that
2022-04-13 07:25:33 +00:00
# allow indexer nodes to process announcements. Enabled by default.
2022-03-02 13:45:09 +00:00
#
# type: bool
# env var: LOTUS_INDEXPROVIDER_ENABLE
2022-03-14 18:56:07 +00:00
#Enable = true
2022-03-02 13:45:09 +00:00
# EntriesCacheCapacity sets the maximum capacity to use for caching the indexing advertisement
# entries. Defaults to 1024 if not specified. The cache is evicted using LRU policy. The
# maximum storage used by the cache is a factor of EntriesCacheCapacity, EntriesChunkSize and
# the length of multihashes being advertised. For example, advertising 128-bit long multihashes
# with the default EntriesCacheCapacity, and EntriesChunkSize means the cache size can grow to
# 256MiB when full.
#
# type: int
# env var: LOTUS_INDEXPROVIDER_ENTRIESCACHECAPACITY
#EntriesCacheCapacity = 1024
2021-11-17 11:25:25 +00:00
2022-03-02 13:45:09 +00:00
# EntriesChunkSize sets the maximum number of multihashes to include in a single entries chunk.
# Defaults to 16384 if not specified. Note that chunks are chained together for indexing
# advertisements that include more multihashes than the configured EntriesChunkSize.
#
# type: int
# env var: LOTUS_INDEXPROVIDER_ENTRIESCHUNKSIZE
#EntriesChunkSize = 16384
2021-11-17 11:25:25 +00:00
2022-03-02 13:45:09 +00:00
# TopicName sets the topic name on which the changes to the advertised content are announced.
2022-04-21 11:13:34 +00:00
# If not explicitly specified, the topic name is automatically inferred from the network name
# in following format: '/indexer/ingest/<network-name>'
# Defaults to empty, which implies the topic name is inferred from network name.
2022-03-02 13:45:09 +00:00
#
# type: string
# env var: LOTUS_INDEXPROVIDER_TOPICNAME
2022-04-21 11:13:34 +00:00
#TopicName = ""
2022-02-03 16:24:49 +00:00
2022-03-02 13:45:09 +00:00
# PurgeCacheOnStart sets whether to clear any cached entries chunks when the provider engine
# starts. By default, the cache is rehydrated from previously cached entries stored in
# datastore if any is present.
#
# type: bool
# env var: LOTUS_INDEXPROVIDER_PURGECACHEONSTART
#PurgeCacheOnStart = false
2022-02-03 16:24:49 +00:00
2021-11-17 11:25:25 +00:00
2022-03-29 01:19:11 +00:00
[ Proving ]
2022-07-01 20:21:19 +00:00
# Maximum number of sector checks to run in parallel. (0 = unlimited)
#
2022-07-01 19:24:54 +00:00
# WARNING: Setting this value too high may make the node crash by running out of stack
# WARNING: Setting this value too low may make sector challenge reading much slower, resulting in failed PoSt due
# to late submission.
2022-07-01 20:21:19 +00:00
#
# After changing this option, confirm that the new value works in your setup by invoking
# 'lotus-miner proving compute window-post 0'
2022-03-29 01:19:11 +00:00
#
# type: int
# env var: LOTUS_PROVING_PARALLELCHECKLIMIT
2023-02-28 10:46:11 +00:00
#ParallelCheckLimit = 32
2022-03-29 01:19:11 +00:00
2022-11-17 17:25:30 +00:00
# Maximum amount of time a proving pre-check can take for a sector. If the check times out the sector will be skipped
#
# WARNING: Setting this value too low risks in sectors being skipped even though they are accessible, just reading the
# test challenge took longer than this timeout
# WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this sector are
# blocked (e.g. in case of disconnected NFS mount)
#
# type: Duration
# env var: LOTUS_PROVING_SINGLECHECKTIMEOUT
#SingleCheckTimeout = "10m0s"
# Maximum amount of time a proving pre-check can take for an entire partition. If the check times out, sectors in
# the partition which didn't get checked on time will be skipped
#
# WARNING: Setting this value too low risks in sectors being skipped even though they are accessible, just reading the
# test challenge took longer than this timeout
# WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this partition are
# blocked or slow
#
# type: Duration
# env var: LOTUS_PROVING_PARTITIONCHECKTIMEOUT
#PartitionCheckTimeout = "20m0s"
2022-07-01 20:21:19 +00:00
# Disable Window PoSt computation on the lotus-miner process even if no window PoSt workers are present.
#
2022-07-01 19:24:54 +00:00
# WARNING: If no windowPoSt workers are connected, window PoSt WILL FAIL resulting in faulty sectors which will need
# to be recovered. Before enabling this option, make sure your PoSt workers work correctly.
2022-07-01 20:21:19 +00:00
#
# After changing this option, confirm that the new value works in your setup by invoking
# 'lotus-miner proving compute window-post 0'
2022-07-01 19:24:54 +00:00
#
# type: bool
# env var: LOTUS_PROVING_DISABLEBUILTINWINDOWPOST
#DisableBuiltinWindowPoSt = false
2022-07-01 20:21:19 +00:00
# Disable Winning PoSt computation on the lotus-miner process even if no winning PoSt workers are present.
#
2022-07-01 19:24:54 +00:00
# WARNING: If no WinningPoSt workers are connected, Winning PoSt WILL FAIL resulting in lost block rewards.
# Before enabling this option, make sure your PoSt workers work correctly.
#
# type: bool
# env var: LOTUS_PROVING_DISABLEBUILTINWINNINGPOST
#DisableBuiltinWinningPoSt = false
2022-07-01 20:21:19 +00:00
# Disable WindowPoSt provable sector readability checks.
#
# In normal operation, when preparing to compute WindowPoSt, lotus-miner will perform a round of reading challenges
# from all sectors to confirm that those sectors can be proven. Challenges read in this process are discarded, as
2022-07-04 15:17:00 +00:00
# we're only interested in checking that sector data can be read.
2022-07-01 20:21:19 +00:00
#
# When using builtin proof computation (no PoSt workers, and DisableBuiltinWindowPoSt is set to false), this process
# can save a lot of time and compute resources in the case that some sectors are not readable - this is caused by
# the builtin logic not skipping snark computation when some sectors need to be skipped.
#
# When using PoSt workers, this process is mostly redundant, with PoSt workers challenges will be read once, and
# if challenges for some sectors aren't readable, those sectors will just get skipped.
#
2022-07-04 15:17:00 +00:00
# Disabling sector pre-checks will slightly reduce IO load when proving sectors, possibly resulting in shorter
2022-07-01 20:21:19 +00:00
# time to produce window PoSt. In setups with good IO capabilities the effect of this option on proving time should
# be negligible.
#
# NOTE: It likely is a bad idea to disable sector pre-checks in setups with no PoSt workers.
#
# NOTE: Even when this option is enabled, recovering sectors will be checked before recovery declaration message is
# sent to the chain
#
# After changing this option, confirm that the new value works in your setup by invoking
# 'lotus-miner proving compute window-post 0'
#
# type: bool
# env var: LOTUS_PROVING_DISABLEWDPOSTPRECHECKS
#DisableWDPoStPreChecks = false
2022-07-07 10:33:40 +00:00
# Maximum number of partitions to prove in a single SubmitWindowPoSt messace. 0 = network limit (10 in nv16)
#
# A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors.
#
# The maximum number of sectors which can be proven in a single PoSt message is 25000 in network version 16, which
2022-10-04 18:44:00 +00:00
# means that a single message can prove at most 10 partitions
2022-07-07 10:33:40 +00:00
#
2022-10-04 18:44:00 +00:00
# Note that setting this value lower may result in less efficient gas use - more messages will be sent,
2022-07-07 10:33:40 +00:00
# to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
#
# Setting this value above the network limit has no effect
#
# type: int
2022-07-07 14:52:22 +00:00
# env var: LOTUS_PROVING_MAXPARTITIONSPERPOSTMESSAGE
#MaxPartitionsPerPoStMessage = 0
# In some cases when submitting DeclareFaultsRecovered messages,
# there may be too many recoveries to fit in a BlockGasLimit.
# In those cases it may be necessary to set this value to something low (eg 1);
# Note that setting this value lower may result in less efficient gas use - more messages will be sent than needed,
# resulting in more total gas use (but each message will have lower gas limit)
#
# type: int
# env var: LOTUS_PROVING_MAXPARTITIONSPERRECOVERYMESSAGE
#MaxPartitionsPerRecoveryMessage = 0
2022-07-07 10:33:40 +00:00
2022-10-04 19:21:55 +00:00
# Enable single partition per PoSt Message for partitions containing recovery sectors
2022-10-04 18:44:00 +00:00
#
# In cases when submitting PoSt messages which contain recovering sectors, the default network limit may still be
# too high to fit in the block gas limit. In those cases, it becomes useful to only house the single partition
2022-10-04 19:21:55 +00:00
# with recovering sectors in the post message
2022-10-04 18:44:00 +00:00
#
# Note that setting this value lower may result in less efficient gas use - more messages will be sent,
# to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
#
# type: bool
# env var: LOTUS_PROVING_SINGLERECOVERINGPARTITIONPERPOSTMESSAGE
#SingleRecoveringPartitionPerPostMessage = false
2022-03-29 01:19:11 +00:00
2021-09-30 16:23:53 +00:00
[ Sealing ]
# Upper bound on how many sectors can be waiting for more deals to be packed in it before it begins sealing at any given time.
# If the miner is accepting multiple deals in parallel, up to MaxWaitDealsSectors of new sectors will be created.
# If more than MaxWaitDealsSectors deals are accepted in parallel, only MaxWaitDealsSectors deals will be processed in parallel
# Note that setting this number too high in relation to deal ingestion rate may result in poor sector packing efficiency
# 0 = no limit
#
# type: uint64
# env var: LOTUS_SEALING_MAXWAITDEALSSECTORS
#MaxWaitDealsSectors = 2
2022-03-17 20:12:42 +00:00
# Upper bound on how many sectors can be sealing+upgrading at the same time when creating new CC sectors (0 = unlimited)
2021-09-30 16:23:53 +00:00
#
# type: uint64
# env var: LOTUS_SEALING_MAXSEALINGSECTORS
#MaxSealingSectors = 0
2022-03-17 20:12:42 +00:00
# Upper bound on how many sectors can be sealing+upgrading at the same time when creating new sectors with deals (0 = unlimited)
2021-09-30 16:23:53 +00:00
#
# type: uint64
# env var: LOTUS_SEALING_MAXSEALINGSECTORSFORDEALS
#MaxSealingSectorsForDeals = 0
2022-03-17 20:27:10 +00:00
# Prefer creating new sectors even if there are sectors Available for upgrading.
2022-03-17 20:12:42 +00:00
# This setting combined with MaxUpgradingSectors set to a value higher than MaxSealingSectorsForDeals makes it
# possible to use fast sector upgrades to handle high volumes of storage deals, while still using the simple sealing
# flow when the volume of storage deals is lower.
#
# type: bool
# env var: LOTUS_SEALING_PREFERNEWSECTORSFORDEALS
#PreferNewSectorsForDeals = false
# Upper bound on how many sectors can be sealing+upgrading at the same time when upgrading CC sectors with deals (0 = MaxSealingSectorsForDeals)
#
# type: uint64
# env var: LOTUS_SEALING_MAXUPGRADINGSECTORS
#MaxUpgradingSectors = 0
2022-09-14 10:45:22 +00:00
# When set to a non-zero value, minimum number of epochs until sector expiration required for sectors to be considered
# for upgrades (0 = DealMinDuration = 180 days = 518400 epochs)
#
# Note that if all deals waiting in the input queue have lifetimes longer than this value, upgrade sectors will be
# required to have expiration of at least the soonest-ending deal
#
# type: uint64
# env var: LOTUS_SEALING_MINUPGRADESECTOREXPIRATION
#MinUpgradeSectorExpiration = 0
2023-05-23 19:34:27 +00:00
# DEPRECATED: Target expiration is no longer used
2022-09-14 10:45:22 +00:00
#
# type: uint64
# env var: LOTUS_SEALING_MINTARGETUPGRADESECTOREXPIRATION
#MinTargetUpgradeSectorExpiration = 0
2021-09-30 16:23:53 +00:00
# CommittedCapacitySectorLifetime is the duration a Committed Capacity (CC) sector will
# live before it must be extended or converted into sector containing deals before it is
# terminated. Value must be between 180-540 days inclusive
#
# type: Duration
# env var: LOTUS_SEALING_COMMITTEDCAPACITYSECTORLIFETIME
#CommittedCapacitySectorLifetime = "12960h0m0s"
# Period of time that a newly created sector will wait for more deals to be packed in to before it starts to seal.
# Sectors which are fully filled will start sealing immediately
#
# type: Duration
# env var: LOTUS_SEALING_WAITDEALSDELAY
#WaitDealsDelay = "6h0m0s"
# Whether to keep unsealed copies of deal data regardless of whether the client requested that. This lets the miner
# avoid the relatively high cost of unsealing the data later, at the cost of more storage space
#
# type: bool
# env var: LOTUS_SEALING_ALWAYSKEEPUNSEALEDCOPY
#AlwaysKeepUnsealedCopy = true
# Run sector finalization before submitting sector proof to the chain
#
# type: bool
# env var: LOTUS_SEALING_FINALIZEEARLY
#FinalizeEarly = false
2022-03-26 19:50:21 +00:00
# Whether new sectors are created to pack incoming deals
# When this is set to false no new sectors will be created for sealing incoming deals
# This is useful for forcing all deals to be assigned as snap deals to sectors marked for upgrade
#
# type: bool
# env var: LOTUS_SEALING_MAKENEWSECTORFORDEALS
#MakeNewSectorForDeals = true
2022-03-16 18:29:47 +00:00
# After sealing CC sectors, make them available for upgrading with deals
#
# type: bool
# env var: LOTUS_SEALING_MAKECCSECTORSAVAILABLE
#MakeCCSectorsAvailable = false
2021-09-30 16:23:53 +00:00
# Whether to use available miner balance for sector collateral instead of sending it with each message
#
# type: bool
# env var: LOTUS_SEALING_COLLATERALFROMMINERBALANCE
#CollateralFromMinerBalance = false
# Minimum available balance to keep in the miner actor before sending it with messages
#
# type: types.FIL
# env var: LOTUS_SEALING_AVAILABLEBALANCEBUFFER
#AvailableBalanceBuffer = "0 FIL"
# Don't send collateral with messages even if there is no available balance in the miner actor
#
# type: bool
# env var: LOTUS_SEALING_DISABLECOLLATERALFALLBACK
#DisableCollateralFallback = false
# enable / disable precommit batching (takes effect after nv13)
#
# type: bool
# env var: LOTUS_SEALING_BATCHPRECOMMITS
#BatchPreCommits = true
# maximum precommit batch size - batches will be sent immediately above this size
#
# type: int
# env var: LOTUS_SEALING_MAXPRECOMMITBATCH
#MaxPreCommitBatch = 256
# how long to wait before submitting a batch after crossing the minimum batch size
#
# type: Duration
# env var: LOTUS_SEALING_PRECOMMITBATCHWAIT
#PreCommitBatchWait = "24h0m0s"
# time buffer for forceful batch submission before sectors/deal in batch would start expiring
#
# type: Duration
# env var: LOTUS_SEALING_PRECOMMITBATCHSLACK
#PreCommitBatchSlack = "3h0m0s"
# enable / disable commit aggregation (takes effect after nv13)
#
# type: bool
# env var: LOTUS_SEALING_AGGREGATECOMMITS
#AggregateCommits = true
2022-05-20 15:09:55 +00:00
# minimum batched commit size - batches above this size will eventually be sent on a timeout
2021-09-30 16:23:53 +00:00
#
# type: int
# env var: LOTUS_SEALING_MINCOMMITBATCH
#MinCommitBatch = 4
2022-05-20 15:09:55 +00:00
# maximum batched commit size - batches will be sent immediately above this size
#
2021-09-30 16:23:53 +00:00
# type: int
# env var: LOTUS_SEALING_MAXCOMMITBATCH
#MaxCommitBatch = 819
# how long to wait before submitting a batch after crossing the minimum batch size
#
# type: Duration
# env var: LOTUS_SEALING_COMMITBATCHWAIT
#CommitBatchWait = "24h0m0s"
# time buffer for forceful batch submission before sectors/deals in batch would start expiring
#
# type: Duration
# env var: LOTUS_SEALING_COMMITBATCHSLACK
#CommitBatchSlack = "1h0m0s"
2021-10-04 06:08:35 +00:00
# network BaseFee below which to stop doing precommit batching, instead
# sending precommit messages to the chain individually
#
# type: types.FIL
# env var: LOTUS_SEALING_BATCHPRECOMMITABOVEBASEFEE
#BatchPreCommitAboveBaseFee = "0.00000000032 FIL"
2021-09-30 16:23:53 +00:00
# network BaseFee below which to stop doing commit aggregation, instead
# submitting proofs to the chain individually
#
# type: types.FIL
# env var: LOTUS_SEALING_AGGREGATEABOVEBASEFEE
2021-10-04 06:08:35 +00:00
#AggregateAboveBaseFee = "0.00000000032 FIL"
2021-09-30 16:23:53 +00:00
2023-04-01 23:30:32 +00:00
# When submitting several sector prove commit messages simultaneously, this option allows you to
# stagger the number of prove commits submitted per epoch
# This is done because gas estimates for ProveCommits are non deterministic and increasing as a large
# number of sectors get committed within the same epoch resulting in occasionally failed msgs.
# Submitting a smaller number of prove commits per epoch would reduce the possibility of failed msgs
#
# type: uint64
# env var: LOTUS_SEALING_MAXSECTORPROVECOMMITSSUBMITTEDPEREPOCH
2023-04-21 20:35:31 +00:00
#MaxSectorProveCommitsSubmittedPerEpoch = 20
2023-04-01 23:30:32 +00:00
2021-09-30 16:23:53 +00:00
# type: uint64
# env var: LOTUS_SEALING_TERMINATEBATCHMAX
#TerminateBatchMax = 100
# type: uint64
# env var: LOTUS_SEALING_TERMINATEBATCHMIN
#TerminateBatchMin = 1
# type: Duration
# env var: LOTUS_SEALING_TERMINATEBATCHWAIT
#TerminateBatchWait = "5m0s"
2023-06-01 04:03:52 +00:00
# SealWithSyntheticPoRep will reduce data holdings after PC1 by storing the precomputed responses
# to any challenge. This proof's PC1 step uses a cheaper-to-compute algorithm for the responses,
# but still must do more computation during PC1 in order to create this oracle.
#
# type: bool
# env var: LOTUS_SEALING_SEALWITHSYNTHETICPOREP
#SealWithSyntheticPoRep = false
2021-09-30 16:23:53 +00:00
[ Storage ]
2022-03-29 01:19:11 +00:00
# type: int
2021-09-30 16:23:53 +00:00
# env var: LOTUS_STORAGE_PARALLELFETCHLIMIT
#ParallelFetchLimit = 10
2022-09-06 09:06:30 +00:00
# type: bool
# env var: LOTUS_STORAGE_ALLOWSECTORDOWNLOAD
#AllowSectorDownload = true
2022-03-29 01:19:11 +00:00
# type: bool
2021-09-30 16:23:53 +00:00
# env var: LOTUS_STORAGE_ALLOWADDPIECE
#AllowAddPiece = true
2022-03-29 01:19:11 +00:00
# type: bool
2021-09-30 16:23:53 +00:00
# env var: LOTUS_STORAGE_ALLOWPRECOMMIT1
#AllowPreCommit1 = true
2022-03-29 01:19:11 +00:00
# type: bool
2021-09-30 16:23:53 +00:00
# env var: LOTUS_STORAGE_ALLOWPRECOMMIT2
#AllowPreCommit2 = true
2022-03-29 01:19:11 +00:00
# type: bool
2021-09-30 16:23:53 +00:00
# env var: LOTUS_STORAGE_ALLOWCOMMIT
#AllowCommit = true
2022-03-29 01:19:11 +00:00
# type: bool
2021-09-30 16:23:53 +00:00
# env var: LOTUS_STORAGE_ALLOWUNSEAL
#AllowUnseal = true
2022-03-29 01:19:11 +00:00
# type: bool
2021-12-08 17:11:19 +00:00
# env var: LOTUS_STORAGE_ALLOWREPLICAUPDATE
#AllowReplicaUpdate = true
2022-03-29 01:19:11 +00:00
# type: bool
2021-12-08 17:11:19 +00:00
# env var: LOTUS_STORAGE_ALLOWPROVEREPLICAUPDATE2
#AllowProveReplicaUpdate2 = true
2022-03-29 01:19:11 +00:00
# type: bool
2022-02-14 18:28:49 +00:00
# env var: LOTUS_STORAGE_ALLOWREGENSECTORKEY
#AllowRegenSectorKey = true
2022-08-03 10:54:32 +00:00
# LocalWorkerName specifies a custom name for the builtin worker.
# If set to an empty string (default) os hostname will be used
#
# type: string
# env var: LOTUS_STORAGE_LOCALWORKERNAME
#LocalWorkerName = ""
2022-05-23 15:32:54 +00:00
# Assigner specifies the worker assigner to use when scheduling tasks.
# "utilization" (default) - assign tasks to workers with lowest utilization.
# "spread" - assign tasks to as many distinct workers as possible.
#
# type: string
# env var: LOTUS_STORAGE_ASSIGNER
#Assigner = "utilization"
2022-05-23 23:33:56 +00:00
# DisallowRemoteFinalize when set to true will force all Finalize tasks to
# run on workers with local access to both long-term storage and the sealing
# path containing the sector.
# --
# WARNING: Only set this if all workers have access to long-term storage
# paths. If this flag is enabled, and there are workers without long-term
# storage access, sectors will not be moved from them, and Finalize tasks
# will appear to be stuck.
# --
2022-05-23 21:53:25 +00:00
# If you see stuck Finalize tasks after enabling this setting, check
# 'lotus-miner sealing sched-diag' and 'lotus-miner storage find [sector num]'
#
# type: bool
# env var: LOTUS_STORAGE_DISALLOWREMOTEFINALIZE
#DisallowRemoteFinalize = false
2022-03-29 01:19:11 +00:00
# ResourceFiltering instructs the system which resource filtering strategy
# to use when evaluating tasks against this worker. An empty value defaults
# to "hardware".
#
2022-11-06 16:38:25 +00:00
# type: ResourceFilteringStrategy
2021-09-30 16:23:53 +00:00
# env var: LOTUS_STORAGE_RESOURCEFILTERING
#ResourceFiltering = "hardware"
[ Fees ]
# type: types.FIL
# env var: LOTUS_FEES_MAXPRECOMMITGASFEE
#MaxPreCommitGasFee = "0.025 FIL"
# type: types.FIL
# env var: LOTUS_FEES_MAXCOMMITGASFEE
#MaxCommitGasFee = "0.05 FIL"
# type: types.FIL
# env var: LOTUS_FEES_MAXTERMINATEGASFEE
#MaxTerminateGasFee = "0.5 FIL"
# WindowPoSt is a high-value operation, so the default fee should be high.
#
# type: types.FIL
# env var: LOTUS_FEES_MAXWINDOWPOSTGASFEE
#MaxWindowPoStGasFee = "5 FIL"
# type: types.FIL
# env var: LOTUS_FEES_MAXPUBLISHDEALSFEE
#MaxPublishDealsFee = "0.05 FIL"
# type: types.FIL
# env var: LOTUS_FEES_MAXMARKETBALANCEADDFEE
#MaxMarketBalanceAddFee = "0.007 FIL"
[ Fees . MaxPreCommitBatchGasFee ]
# type: types.FIL
# env var: LOTUS_FEES_MAXPRECOMMITBATCHGASFEE_BASE
#Base = "0 FIL"
# type: types.FIL
# env var: LOTUS_FEES_MAXPRECOMMITBATCHGASFEE_PERSECTOR
#PerSector = "0.02 FIL"
[ Fees . MaxCommitBatchGasFee ]
# type: types.FIL
# env var: LOTUS_FEES_MAXCOMMITBATCHGASFEE_BASE
#Base = "0 FIL"
# type: types.FIL
# env var: LOTUS_FEES_MAXCOMMITBATCHGASFEE_PERSECTOR
#PerSector = "0.03 FIL"
[ Addresses ]
# Addresses to send PreCommit messages from
#
# type: []string
# env var: LOTUS_ADDRESSES_PRECOMMITCONTROL
#PreCommitControl = []
# Addresses to send Commit messages from
#
# type: []string
# env var: LOTUS_ADDRESSES_COMMITCONTROL
#CommitControl = []
# type: []string
# env var: LOTUS_ADDRESSES_TERMINATECONTROL
#TerminateControl = []
# type: []string
# env var: LOTUS_ADDRESSES_DEALPUBLISHCONTROL
#DealPublishControl = []
# DisableOwnerFallback disables usage of the owner address for messages
# sent automatically
#
# type: bool
# env var: LOTUS_ADDRESSES_DISABLEOWNERFALLBACK
#DisableOwnerFallback = false
# DisableWorkerFallback disables usage of the worker address for messages
# sent automatically, if control addresses are configured.
# A control address that doesn't have enough funds will still be chosen
# over the worker address if this flag is set.
#
# type: bool
# env var: LOTUS_ADDRESSES_DISABLEWORKERFALLBACK
#DisableWorkerFallback = false
[ DAGStore ]
# Path to the dagstore root directory. This directory contains three
# subdirectories, which can be symlinked to alternative locations if
# need be:
# - ./transients: caches unsealed deals that have been fetched from the
# storage subsystem for serving retrievals.
# - ./indices: stores shard indices.
# - ./datastore: holds the KV store tracking the state of every shard
# known to the DAG store.
# Default value: <LOTUS_MARKETS_PATH>/dagstore (split deployment) or
# <LOTUS_MINER_PATH>/dagstore (monolith deployment)
#
# type: string
# env var: LOTUS_DAGSTORE_ROOTDIR
#RootDir = ""
# The maximum amount of indexing jobs that can run simultaneously.
# 0 means unlimited.
# Default value: 5.
#
# type: int
# env var: LOTUS_DAGSTORE_MAXCONCURRENTINDEX
#MaxConcurrentIndex = 5
# The maximum amount of unsealed deals that can be fetched simultaneously
# from the storage subsystem. 0 means unlimited.
# Default value: 0 (unlimited).
#
# type: int
# env var: LOTUS_DAGSTORE_MAXCONCURRENTREADYFETCHES
#MaxConcurrentReadyFetches = 0
2022-01-13 18:26:13 +00:00
# The maximum amount of unseals that can be processed simultaneously
# from the storage subsystem. 0 means unlimited.
# Default value: 0 (unlimited).
#
# type: int
# env var: LOTUS_DAGSTORE_MAXCONCURRENTUNSEALS
#MaxConcurrentUnseals = 5
2021-09-30 16:23:53 +00:00
# The maximum number of simultaneous inflight API calls to the storage
# subsystem.
# Default value: 100.
#
# type: int
# env var: LOTUS_DAGSTORE_MAXCONCURRENCYSTORAGECALLS
#MaxConcurrencyStorageCalls = 100
# The time between calls to periodic dagstore GC, in time.Duration string
# representation, e.g. 1m, 5m, 1h.
# Default value: 1 minute.
#
# type: Duration
# env var: LOTUS_DAGSTORE_GCINTERVAL
#GCInterval = "1m0s"