[API] # Binding address for the Lotus API # # type: string # env var: LOTUS_API_LISTENADDRESS #ListenAddress = "/ip4/127.0.0.1/tcp/2345/http" # type: string # env var: LOTUS_API_REMOTELISTENADDRESS #RemoteListenAddress = "127.0.0.1:2345" # type: Duration # env var: LOTUS_API_TIMEOUT #Timeout = "30s" [Backup] # When set to true disables metadata log (.lotus/kvlog). This can save disk # space by reducing metadata redundancy. # # Note that in case of metadata corruption it might be much harder to recover # your node if metadata log is disabled # # type: bool # env var: LOTUS_BACKUP_DISABLEMETADATALOG #DisableMetadataLog = true [Logging] [Logging.SubsystemLevels] # env var: LOTUS_LOGGING_SUBSYSTEMLEVELS_EXAMPLE-SUBSYSTEM #example-subsystem = "INFO" [Libp2p] # Binding address for the libp2p host - 0 means random port. # Format: multiaddress; see https://multiformats.io/multiaddr/ # # type: []string # env var: LOTUS_LIBP2P_LISTENADDRESSES #ListenAddresses = ["/ip4/0.0.0.0/tcp/0", "/ip6/::/tcp/0", "/ip4/0.0.0.0/udp/0/quic-v1", "/ip6/::/udp/0/quic-v1", "/ip4/0.0.0.0/udp/0/quic-v1/webtransport", "/ip6/::/udp/0/quic-v1/webtransport"] # Addresses to explicitally announce to other peers. If not specified, # all interface addresses are announced # Format: multiaddress # # type: []string # env var: LOTUS_LIBP2P_ANNOUNCEADDRESSES #AnnounceAddresses = [] # Addresses to not announce # Format: multiaddress # # type: []string # env var: LOTUS_LIBP2P_NOANNOUNCEADDRESSES #NoAnnounceAddresses = [] # When not disabled (default), lotus asks NAT devices (e.g., routers), to # open up an external port and forward it to the port lotus is running on. # When this works (i.e., when your router supports NAT port forwarding), # it makes the local lotus node accessible from the public internet # # type: bool # env var: LOTUS_LIBP2P_DISABLENATPORTMAP #DisableNatPortMap = false # ConnMgrLow is the number of connections that the basic connection manager # will trim down to. # # type: uint # env var: LOTUS_LIBP2P_CONNMGRLOW #ConnMgrLow = 150 # ConnMgrHigh is the number of connections that, when exceeded, will trigger # a connection GC operation. Note: protected/recently formed connections don't # count towards this limit. # # type: uint # env var: LOTUS_LIBP2P_CONNMGRHIGH #ConnMgrHigh = 180 # ConnMgrGrace is a time duration that new connections are immune from being # closed by the connection manager. # # type: Duration # env var: LOTUS_LIBP2P_CONNMGRGRACE #ConnMgrGrace = "20s" [Pubsub] # Run the node in bootstrap-node mode # # type: bool # env var: LOTUS_PUBSUB_BOOTSTRAPPER #Bootstrapper = false # type: string # env var: LOTUS_PUBSUB_REMOTETRACER #RemoteTracer = "" # Path to file that will be used to output tracer content in JSON format. # If present tracer will save data to defined file. # Format: file path # # type: string # env var: LOTUS_PUBSUB_JSONTRACER #JsonTracer = "" # Connection string for elasticsearch instance. # If present tracer will save data to elasticsearch. # Format: https://:@:/ # # type: string # env var: LOTUS_PUBSUB_ELASTICSEARCHTRACER #ElasticSearchTracer = "" # Name of elasticsearch index that will be used to save tracer data. # This property is used only if ElasticSearchTracer propery is set. # # type: string # env var: LOTUS_PUBSUB_ELASTICSEARCHINDEX #ElasticSearchIndex = "" # Auth token that will be passed with logs to elasticsearch - used for weighted peers score. # # type: string # env var: LOTUS_PUBSUB_TRACERSOURCEAUTH #TracerSourceAuth = "" [Subsystems] # type: bool # env var: LOTUS_SUBSYSTEMS_ENABLEMINING #EnableMining = true # type: bool # env var: LOTUS_SUBSYSTEMS_ENABLESEALING #EnableSealing = true # type: bool # env var: LOTUS_SUBSYSTEMS_ENABLESECTORSTORAGE #EnableSectorStorage = true # type: bool # env var: LOTUS_SUBSYSTEMS_ENABLEMARKETS #EnableMarkets = false # When enabled, the sector index will reside in an external database # as opposed to the local KV store in the miner process # This is useful to allow workers to bypass the lotus miner to access sector information # # type: bool # env var: LOTUS_SUBSYSTEMS_ENABLESECTORINDEXDB #EnableSectorIndexDB = false # type: string # env var: LOTUS_SUBSYSTEMS_SEALERAPIINFO #SealerApiInfo = "" # type: string # env var: LOTUS_SUBSYSTEMS_SECTORINDEXAPIINFO #SectorIndexApiInfo = "" [Dealmaking] # When enabled, the miner can accept online deals # # type: bool # env var: LOTUS_DEALMAKING_CONSIDERONLINESTORAGEDEALS #ConsiderOnlineStorageDeals = true # When enabled, the miner can accept offline deals # # type: bool # env var: LOTUS_DEALMAKING_CONSIDEROFFLINESTORAGEDEALS #ConsiderOfflineStorageDeals = true # When enabled, the miner can accept retrieval deals # # type: bool # env var: LOTUS_DEALMAKING_CONSIDERONLINERETRIEVALDEALS #ConsiderOnlineRetrievalDeals = true # When enabled, the miner can accept offline retrieval deals # # type: bool # env var: LOTUS_DEALMAKING_CONSIDEROFFLINERETRIEVALDEALS #ConsiderOfflineRetrievalDeals = true # When enabled, the miner can accept verified deals # # type: bool # env var: LOTUS_DEALMAKING_CONSIDERVERIFIEDSTORAGEDEALS #ConsiderVerifiedStorageDeals = true # When enabled, the miner can accept unverified deals # # type: bool # env var: LOTUS_DEALMAKING_CONSIDERUNVERIFIEDSTORAGEDEALS #ConsiderUnverifiedStorageDeals = true # A list of Data CIDs to reject when making deals # # type: []cid.Cid # env var: LOTUS_DEALMAKING_PIECECIDBLOCKLIST #PieceCidBlocklist = [] # Maximum expected amount of time getting the deal into a sealed sector will take # This includes the time the deal will need to get transferred and published # before being assigned to a sector # # type: Duration # env var: LOTUS_DEALMAKING_EXPECTEDSEALDURATION #ExpectedSealDuration = "24h0m0s" # Maximum amount of time proposed deal StartEpoch can be in future # # type: Duration # env var: LOTUS_DEALMAKING_MAXDEALSTARTDELAY #MaxDealStartDelay = "336h0m0s" # When a deal is ready to publish, the amount of time to wait for more # deals to be ready to publish before publishing them all as a batch # # type: Duration # env var: LOTUS_DEALMAKING_PUBLISHMSGPERIOD #PublishMsgPeriod = "1h0m0s" # The maximum number of deals to include in a single PublishStorageDeals # message # # type: uint64 # env var: LOTUS_DEALMAKING_MAXDEALSPERPUBLISHMSG #MaxDealsPerPublishMsg = 8 # The maximum collateral that the provider will put up against a deal, # as a multiplier of the minimum collateral bound # # type: uint64 # env var: LOTUS_DEALMAKING_MAXPROVIDERCOLLATERALMULTIPLIER #MaxProviderCollateralMultiplier = 2 # The maximum allowed disk usage size in bytes of staging deals not yet # passed to the sealing node by the markets service. 0 is unlimited. # # type: int64 # env var: LOTUS_DEALMAKING_MAXSTAGINGDEALSBYTES #MaxStagingDealsBytes = 0 # The maximum number of parallel online data transfers for storage deals # # type: uint64 # env var: LOTUS_DEALMAKING_SIMULTANEOUSTRANSFERSFORSTORAGE #SimultaneousTransfersForStorage = 20 # The maximum number of simultaneous data transfers from any single client # for storage deals. # Unset by default (0), and values higher than SimultaneousTransfersForStorage # will have no effect; i.e. the total number of simultaneous data transfers # across all storage clients is bound by SimultaneousTransfersForStorage # regardless of this number. # # type: uint64 # env var: LOTUS_DEALMAKING_SIMULTANEOUSTRANSFERSFORSTORAGEPERCLIENT #SimultaneousTransfersForStoragePerClient = 0 # The maximum number of parallel online data transfers for retrieval deals # # type: uint64 # env var: LOTUS_DEALMAKING_SIMULTANEOUSTRANSFERSFORRETRIEVAL #SimultaneousTransfersForRetrieval = 20 # Minimum start epoch buffer to give time for sealing of sector with deal. # # type: uint64 # env var: LOTUS_DEALMAKING_STARTEPOCHSEALINGBUFFER #StartEpochSealingBuffer = 480 # A command used for fine-grained evaluation of storage deals # see https://lotus.filecoin.io/storage-providers/advanced-configurations/market/#using-filters-for-fine-grained-storage-and-retrieval-deal-acceptance for more details # # type: string # env var: LOTUS_DEALMAKING_FILTER #Filter = "" # A command used for fine-grained evaluation of retrieval deals # see https://lotus.filecoin.io/storage-providers/advanced-configurations/market/#using-filters-for-fine-grained-storage-and-retrieval-deal-acceptance for more details # # type: string # env var: LOTUS_DEALMAKING_RETRIEVALFILTER #RetrievalFilter = "" [Dealmaking.RetrievalPricing] # env var: LOTUS_DEALMAKING_RETRIEVALPRICING_STRATEGY #Strategy = "default" [Dealmaking.RetrievalPricing.Default] # env var: LOTUS_DEALMAKING_RETRIEVALPRICING_DEFAULT_VERIFIEDDEALSFREETRANSFER #VerifiedDealsFreeTransfer = true [Dealmaking.RetrievalPricing.External] # env var: LOTUS_DEALMAKING_RETRIEVALPRICING_EXTERNAL_PATH #Path = "" [IndexProvider] # Enable set whether to enable indexing announcement to the network and expose endpoints that # allow indexer nodes to process announcements. Enabled by default. # # type: bool # env var: LOTUS_INDEXPROVIDER_ENABLE #Enable = true # EntriesCacheCapacity sets the maximum capacity to use for caching the indexing advertisement # entries. Defaults to 1024 if not specified. The cache is evicted using LRU policy. The # maximum storage used by the cache is a factor of EntriesCacheCapacity, EntriesChunkSize and # the length of multihashes being advertised. For example, advertising 128-bit long multihashes # with the default EntriesCacheCapacity, and EntriesChunkSize means the cache size can grow to # 256MiB when full. # # type: int # env var: LOTUS_INDEXPROVIDER_ENTRIESCACHECAPACITY #EntriesCacheCapacity = 1024 # EntriesChunkSize sets the maximum number of multihashes to include in a single entries chunk. # Defaults to 16384 if not specified. Note that chunks are chained together for indexing # advertisements that include more multihashes than the configured EntriesChunkSize. # # type: int # env var: LOTUS_INDEXPROVIDER_ENTRIESCHUNKSIZE #EntriesChunkSize = 16384 # TopicName sets the topic name on which the changes to the advertised content are announced. # If not explicitly specified, the topic name is automatically inferred from the network name # in following format: '/indexer/ingest/' # Defaults to empty, which implies the topic name is inferred from network name. # # type: string # env var: LOTUS_INDEXPROVIDER_TOPICNAME #TopicName = "" # PurgeCacheOnStart sets whether to clear any cached entries chunks when the provider engine # starts. By default, the cache is rehydrated from previously cached entries stored in # datastore if any is present. # # type: bool # env var: LOTUS_INDEXPROVIDER_PURGECACHEONSTART #PurgeCacheOnStart = false [Proving] # Maximum number of sector checks to run in parallel. (0 = unlimited) # # WARNING: Setting this value too high may make the node crash by running out of stack # WARNING: Setting this value too low may make sector challenge reading much slower, resulting in failed PoSt due # to late submission. # # After changing this option, confirm that the new value works in your setup by invoking # 'lotus-miner proving compute window-post 0' # # type: int # env var: LOTUS_PROVING_PARALLELCHECKLIMIT #ParallelCheckLimit = 32 # Maximum amount of time a proving pre-check can take for a sector. If the check times out the sector will be skipped # # WARNING: Setting this value too low risks in sectors being skipped even though they are accessible, just reading the # test challenge took longer than this timeout # WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this sector are # blocked (e.g. in case of disconnected NFS mount) # # type: Duration # env var: LOTUS_PROVING_SINGLECHECKTIMEOUT #SingleCheckTimeout = "10m0s" # Maximum amount of time a proving pre-check can take for an entire partition. If the check times out, sectors in # the partition which didn't get checked on time will be skipped # # WARNING: Setting this value too low risks in sectors being skipped even though they are accessible, just reading the # test challenge took longer than this timeout # WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this partition are # blocked or slow # # type: Duration # env var: LOTUS_PROVING_PARTITIONCHECKTIMEOUT #PartitionCheckTimeout = "20m0s" # Disable Window PoSt computation on the lotus-miner process even if no window PoSt workers are present. # # WARNING: If no windowPoSt workers are connected, window PoSt WILL FAIL resulting in faulty sectors which will need # to be recovered. Before enabling this option, make sure your PoSt workers work correctly. # # After changing this option, confirm that the new value works in your setup by invoking # 'lotus-miner proving compute window-post 0' # # type: bool # env var: LOTUS_PROVING_DISABLEBUILTINWINDOWPOST #DisableBuiltinWindowPoSt = false # Disable Winning PoSt computation on the lotus-miner process even if no winning PoSt workers are present. # # WARNING: If no WinningPoSt workers are connected, Winning PoSt WILL FAIL resulting in lost block rewards. # Before enabling this option, make sure your PoSt workers work correctly. # # type: bool # env var: LOTUS_PROVING_DISABLEBUILTINWINNINGPOST #DisableBuiltinWinningPoSt = false # Disable WindowPoSt provable sector readability checks. # # In normal operation, when preparing to compute WindowPoSt, lotus-miner will perform a round of reading challenges # from all sectors to confirm that those sectors can be proven. Challenges read in this process are discarded, as # we're only interested in checking that sector data can be read. # # When using builtin proof computation (no PoSt workers, and DisableBuiltinWindowPoSt is set to false), this process # can save a lot of time and compute resources in the case that some sectors are not readable - this is caused by # the builtin logic not skipping snark computation when some sectors need to be skipped. # # When using PoSt workers, this process is mostly redundant, with PoSt workers challenges will be read once, and # if challenges for some sectors aren't readable, those sectors will just get skipped. # # Disabling sector pre-checks will slightly reduce IO load when proving sectors, possibly resulting in shorter # time to produce window PoSt. In setups with good IO capabilities the effect of this option on proving time should # be negligible. # # NOTE: It likely is a bad idea to disable sector pre-checks in setups with no PoSt workers. # # NOTE: Even when this option is enabled, recovering sectors will be checked before recovery declaration message is # sent to the chain # # After changing this option, confirm that the new value works in your setup by invoking # 'lotus-miner proving compute window-post 0' # # type: bool # env var: LOTUS_PROVING_DISABLEWDPOSTPRECHECKS #DisableWDPoStPreChecks = false # Maximum number of partitions to prove in a single SubmitWindowPoSt messace. 0 = network limit (3 in nv21) # # A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors. # // # Note that setting this value lower may result in less efficient gas use - more messages will be sent, # to prove each deadline, resulting in more total gas use (but each message will have lower gas limit) # # Setting this value above the network limit has no effect # # type: int # env var: LOTUS_PROVING_MAXPARTITIONSPERPOSTMESSAGE #MaxPartitionsPerPoStMessage = 0 # In some cases when submitting DeclareFaultsRecovered messages, # there may be too many recoveries to fit in a BlockGasLimit. # In those cases it may be necessary to set this value to something low (eg 1); # Note that setting this value lower may result in less efficient gas use - more messages will be sent than needed, # resulting in more total gas use (but each message will have lower gas limit) # # type: int # env var: LOTUS_PROVING_MAXPARTITIONSPERRECOVERYMESSAGE #MaxPartitionsPerRecoveryMessage = 0 # Enable single partition per PoSt Message for partitions containing recovery sectors # # In cases when submitting PoSt messages which contain recovering sectors, the default network limit may still be # too high to fit in the block gas limit. In those cases, it becomes useful to only house the single partition # with recovering sectors in the post message # # Note that setting this value lower may result in less efficient gas use - more messages will be sent, # to prove each deadline, resulting in more total gas use (but each message will have lower gas limit) # # type: bool # env var: LOTUS_PROVING_SINGLERECOVERINGPARTITIONPERPOSTMESSAGE #SingleRecoveringPartitionPerPostMessage = false [Sealing] # Upper bound on how many sectors can be waiting for more deals to be packed in it before it begins sealing at any given time. # If the miner is accepting multiple deals in parallel, up to MaxWaitDealsSectors of new sectors will be created. # If more than MaxWaitDealsSectors deals are accepted in parallel, only MaxWaitDealsSectors deals will be processed in parallel # Note that setting this number too high in relation to deal ingestion rate may result in poor sector packing efficiency # 0 = no limit # # type: uint64 # env var: LOTUS_SEALING_MAXWAITDEALSSECTORS #MaxWaitDealsSectors = 2 # Upper bound on how many sectors can be sealing+upgrading at the same time when creating new CC sectors (0 = unlimited) # # type: uint64 # env var: LOTUS_SEALING_MAXSEALINGSECTORS #MaxSealingSectors = 0 # Upper bound on how many sectors can be sealing+upgrading at the same time when creating new sectors with deals (0 = unlimited) # # type: uint64 # env var: LOTUS_SEALING_MAXSEALINGSECTORSFORDEALS #MaxSealingSectorsForDeals = 0 # Prefer creating new sectors even if there are sectors Available for upgrading. # This setting combined with MaxUpgradingSectors set to a value higher than MaxSealingSectorsForDeals makes it # possible to use fast sector upgrades to handle high volumes of storage deals, while still using the simple sealing # flow when the volume of storage deals is lower. # # type: bool # env var: LOTUS_SEALING_PREFERNEWSECTORSFORDEALS #PreferNewSectorsForDeals = false # Upper bound on how many sectors can be sealing+upgrading at the same time when upgrading CC sectors with deals (0 = MaxSealingSectorsForDeals) # # type: uint64 # env var: LOTUS_SEALING_MAXUPGRADINGSECTORS #MaxUpgradingSectors = 0 # When set to a non-zero value, minimum number of epochs until sector expiration required for sectors to be considered # for upgrades (0 = DealMinDuration = 180 days = 518400 epochs) # # Note that if all deals waiting in the input queue have lifetimes longer than this value, upgrade sectors will be # required to have expiration of at least the soonest-ending deal # # type: uint64 # env var: LOTUS_SEALING_MINUPGRADESECTOREXPIRATION #MinUpgradeSectorExpiration = 0 # DEPRECATED: Target expiration is no longer used # # type: uint64 # env var: LOTUS_SEALING_MINTARGETUPGRADESECTOREXPIRATION #MinTargetUpgradeSectorExpiration = 0 # CommittedCapacitySectorLifetime is the duration a Committed Capacity (CC) sector will # live before it must be extended or converted into sector containing deals before it is # terminated. Value must be between 180-1278 days (1278 in nv21, 540 before nv21). # # type: Duration # env var: LOTUS_SEALING_COMMITTEDCAPACITYSECTORLIFETIME #CommittedCapacitySectorLifetime = "12960h0m0s" # Period of time that a newly created sector will wait for more deals to be packed in to before it starts to seal. # Sectors which are fully filled will start sealing immediately # # type: Duration # env var: LOTUS_SEALING_WAITDEALSDELAY #WaitDealsDelay = "6h0m0s" # Whether to keep unsealed copies of deal data regardless of whether the client requested that. This lets the miner # avoid the relatively high cost of unsealing the data later, at the cost of more storage space # # type: bool # env var: LOTUS_SEALING_ALWAYSKEEPUNSEALEDCOPY #AlwaysKeepUnsealedCopy = true # Run sector finalization before submitting sector proof to the chain # # type: bool # env var: LOTUS_SEALING_FINALIZEEARLY #FinalizeEarly = false # Whether new sectors are created to pack incoming deals # When this is set to false no new sectors will be created for sealing incoming deals # This is useful for forcing all deals to be assigned as snap deals to sectors marked for upgrade # # type: bool # env var: LOTUS_SEALING_MAKENEWSECTORFORDEALS #MakeNewSectorForDeals = true # After sealing CC sectors, make them available for upgrading with deals # # type: bool # env var: LOTUS_SEALING_MAKECCSECTORSAVAILABLE #MakeCCSectorsAvailable = false # Whether to use available miner balance for sector collateral instead of sending it with each message # # type: bool # env var: LOTUS_SEALING_COLLATERALFROMMINERBALANCE #CollateralFromMinerBalance = false # Minimum available balance to keep in the miner actor before sending it with messages # # type: types.FIL # env var: LOTUS_SEALING_AVAILABLEBALANCEBUFFER #AvailableBalanceBuffer = "0 FIL" # Don't send collateral with messages even if there is no available balance in the miner actor # # type: bool # env var: LOTUS_SEALING_DISABLECOLLATERALFALLBACK #DisableCollateralFallback = false # maximum precommit batch size - batches will be sent immediately above this size # # type: int # env var: LOTUS_SEALING_MAXPRECOMMITBATCH #MaxPreCommitBatch = 256 # how long to wait before submitting a batch after crossing the minimum batch size # # type: Duration # env var: LOTUS_SEALING_PRECOMMITBATCHWAIT #PreCommitBatchWait = "24h0m0s" # time buffer for forceful batch submission before sectors/deal in batch would start expiring # # type: Duration # env var: LOTUS_SEALING_PRECOMMITBATCHSLACK #PreCommitBatchSlack = "3h0m0s" # enable / disable commit aggregation (takes effect after nv13) # # type: bool # env var: LOTUS_SEALING_AGGREGATECOMMITS #AggregateCommits = true # minimum batched commit size - batches above this size will eventually be sent on a timeout # # type: int # env var: LOTUS_SEALING_MINCOMMITBATCH #MinCommitBatch = 4 # maximum batched commit size - batches will be sent immediately above this size # # type: int # env var: LOTUS_SEALING_MAXCOMMITBATCH #MaxCommitBatch = 819 # how long to wait before submitting a batch after crossing the minimum batch size # # type: Duration # env var: LOTUS_SEALING_COMMITBATCHWAIT #CommitBatchWait = "24h0m0s" # time buffer for forceful batch submission before sectors/deals in batch would start expiring # # type: Duration # env var: LOTUS_SEALING_COMMITBATCHSLACK #CommitBatchSlack = "1h0m0s" # network BaseFee below which to stop doing precommit batching, instead # sending precommit messages to the chain individually. When the basefee is # below this threshold, precommit messages will get sent out immediately. # # type: types.FIL # env var: LOTUS_SEALING_BATCHPRECOMMITABOVEBASEFEE #BatchPreCommitAboveBaseFee = "0.00000000032 FIL" # network BaseFee below which to stop doing commit aggregation, instead # submitting proofs to the chain individually # # type: types.FIL # env var: LOTUS_SEALING_AGGREGATEABOVEBASEFEE #AggregateAboveBaseFee = "0.00000000032 FIL" # When submitting several sector prove commit messages simultaneously, this option allows you to # stagger the number of prove commits submitted per epoch # This is done because gas estimates for ProveCommits are non deterministic and increasing as a large # number of sectors get committed within the same epoch resulting in occasionally failed msgs. # Submitting a smaller number of prove commits per epoch would reduce the possibility of failed msgs # # type: uint64 # env var: LOTUS_SEALING_MAXSECTORPROVECOMMITSSUBMITTEDPEREPOCH #MaxSectorProveCommitsSubmittedPerEpoch = 20 # type: uint64 # env var: LOTUS_SEALING_TERMINATEBATCHMAX #TerminateBatchMax = 100 # type: uint64 # env var: LOTUS_SEALING_TERMINATEBATCHMIN #TerminateBatchMin = 1 # type: Duration # env var: LOTUS_SEALING_TERMINATEBATCHWAIT #TerminateBatchWait = "5m0s" # UseSyntheticPoRep, when set to true, will reduce the amount of cache data held on disk after the completion of PreCommit 2 to 11GiB. # # type: bool # env var: LOTUS_SEALING_USESYNTHETICPOREP #UseSyntheticPoRep = false [Storage] # type: int # env var: LOTUS_STORAGE_PARALLELFETCHLIMIT #ParallelFetchLimit = 10 # type: bool # env var: LOTUS_STORAGE_ALLOWSECTORDOWNLOAD #AllowSectorDownload = true # type: bool # env var: LOTUS_STORAGE_ALLOWADDPIECE #AllowAddPiece = true # type: bool # env var: LOTUS_STORAGE_ALLOWPRECOMMIT1 #AllowPreCommit1 = true # type: bool # env var: LOTUS_STORAGE_ALLOWPRECOMMIT2 #AllowPreCommit2 = true # type: bool # env var: LOTUS_STORAGE_ALLOWCOMMIT #AllowCommit = true # type: bool # env var: LOTUS_STORAGE_ALLOWUNSEAL #AllowUnseal = true # type: bool # env var: LOTUS_STORAGE_ALLOWREPLICAUPDATE #AllowReplicaUpdate = true # type: bool # env var: LOTUS_STORAGE_ALLOWPROVEREPLICAUPDATE2 #AllowProveReplicaUpdate2 = true # type: bool # env var: LOTUS_STORAGE_ALLOWREGENSECTORKEY #AllowRegenSectorKey = true # LocalWorkerName specifies a custom name for the builtin worker. # If set to an empty string (default) os hostname will be used # # type: string # env var: LOTUS_STORAGE_LOCALWORKERNAME #LocalWorkerName = "" # Assigner specifies the worker assigner to use when scheduling tasks. # "utilization" (default) - assign tasks to workers with lowest utilization. # "spread" - assign tasks to as many distinct workers as possible. # # type: string # env var: LOTUS_STORAGE_ASSIGNER #Assigner = "utilization" # DisallowRemoteFinalize when set to true will force all Finalize tasks to # run on workers with local access to both long-term storage and the sealing # path containing the sector. # -- # WARNING: Only set this if all workers have access to long-term storage # paths. If this flag is enabled, and there are workers without long-term # storage access, sectors will not be moved from them, and Finalize tasks # will appear to be stuck. # -- # If you see stuck Finalize tasks after enabling this setting, check # 'lotus-miner sealing sched-diag' and 'lotus-miner storage find [sector num]' # # type: bool # env var: LOTUS_STORAGE_DISALLOWREMOTEFINALIZE #DisallowRemoteFinalize = false # ResourceFiltering instructs the system which resource filtering strategy # to use when evaluating tasks against this worker. An empty value defaults # to "hardware". # # type: ResourceFilteringStrategy # env var: LOTUS_STORAGE_RESOURCEFILTERING #ResourceFiltering = "hardware" [Fees] # type: types.FIL # env var: LOTUS_FEES_MAXPRECOMMITGASFEE #MaxPreCommitGasFee = "0.025 FIL" # type: types.FIL # env var: LOTUS_FEES_MAXCOMMITGASFEE #MaxCommitGasFee = "0.05 FIL" # type: types.FIL # env var: LOTUS_FEES_MAXTERMINATEGASFEE #MaxTerminateGasFee = "0.5 FIL" # WindowPoSt is a high-value operation, so the default fee should be high. # # type: types.FIL # env var: LOTUS_FEES_MAXWINDOWPOSTGASFEE #MaxWindowPoStGasFee = "5 FIL" # type: types.FIL # env var: LOTUS_FEES_MAXPUBLISHDEALSFEE #MaxPublishDealsFee = "0.05 FIL" # type: types.FIL # env var: LOTUS_FEES_MAXMARKETBALANCEADDFEE #MaxMarketBalanceAddFee = "0.007 FIL" # type: bool # env var: LOTUS_FEES_MAXIMIZEWINDOWPOSTFEECAP #MaximizeWindowPoStFeeCap = true [Fees.MaxPreCommitBatchGasFee] # type: types.FIL # env var: LOTUS_FEES_MAXPRECOMMITBATCHGASFEE_BASE #Base = "0 FIL" # type: types.FIL # env var: LOTUS_FEES_MAXPRECOMMITBATCHGASFEE_PERSECTOR #PerSector = "0.02 FIL" [Fees.MaxCommitBatchGasFee] # type: types.FIL # env var: LOTUS_FEES_MAXCOMMITBATCHGASFEE_BASE #Base = "0 FIL" # type: types.FIL # env var: LOTUS_FEES_MAXCOMMITBATCHGASFEE_PERSECTOR #PerSector = "0.03 FIL" [Addresses] # Addresses to send PreCommit messages from # # type: []string # env var: LOTUS_ADDRESSES_PRECOMMITCONTROL #PreCommitControl = [] # Addresses to send Commit messages from # # type: []string # env var: LOTUS_ADDRESSES_COMMITCONTROL #CommitControl = [] # type: []string # env var: LOTUS_ADDRESSES_TERMINATECONTROL #TerminateControl = [] # type: []string # env var: LOTUS_ADDRESSES_DEALPUBLISHCONTROL #DealPublishControl = [] # DisableOwnerFallback disables usage of the owner address for messages # sent automatically # # type: bool # env var: LOTUS_ADDRESSES_DISABLEOWNERFALLBACK #DisableOwnerFallback = false # DisableWorkerFallback disables usage of the worker address for messages # sent automatically, if control addresses are configured. # A control address that doesn't have enough funds will still be chosen # over the worker address if this flag is set. # # type: bool # env var: LOTUS_ADDRESSES_DISABLEWORKERFALLBACK #DisableWorkerFallback = false [DAGStore] # Path to the dagstore root directory. This directory contains three # subdirectories, which can be symlinked to alternative locations if # need be: # - ./transients: caches unsealed deals that have been fetched from the # storage subsystem for serving retrievals. # - ./indices: stores shard indices. # - ./datastore: holds the KV store tracking the state of every shard # known to the DAG store. # Default value: /dagstore (split deployment) or # /dagstore (monolith deployment) # # type: string # env var: LOTUS_DAGSTORE_ROOTDIR #RootDir = "" # The maximum amount of indexing jobs that can run simultaneously. # 0 means unlimited. # Default value: 5. # # type: int # env var: LOTUS_DAGSTORE_MAXCONCURRENTINDEX #MaxConcurrentIndex = 5 # The maximum amount of unsealed deals that can be fetched simultaneously # from the storage subsystem. 0 means unlimited. # Default value: 0 (unlimited). # # type: int # env var: LOTUS_DAGSTORE_MAXCONCURRENTREADYFETCHES #MaxConcurrentReadyFetches = 0 # The maximum amount of unseals that can be processed simultaneously # from the storage subsystem. 0 means unlimited. # Default value: 0 (unlimited). # # type: int # env var: LOTUS_DAGSTORE_MAXCONCURRENTUNSEALS #MaxConcurrentUnseals = 5 # The maximum number of simultaneous inflight API calls to the storage # subsystem. # Default value: 100. # # type: int # env var: LOTUS_DAGSTORE_MAXCONCURRENCYSTORAGECALLS #MaxConcurrencyStorageCalls = 100 # The time between calls to periodic dagstore GC, in time.Duration string # representation, e.g. 1m, 5m, 1h. # Default value: 1 minute. # # type: Duration # env var: LOTUS_DAGSTORE_GCINTERVAL #GCInterval = "1m0s" [HarmonyDB] # HOSTS is a list of hostnames to nodes running YugabyteDB # in a cluster. Only 1 is required # # type: []string # env var: LOTUS_HARMONYDB_HOSTS #Hosts = ["127.0.0.1"] # The Yugabyte server's username with full credentials to operate on Lotus' Database. Blank for default. # # type: string # env var: LOTUS_HARMONYDB_USERNAME #Username = "yugabyte" # The password for the related username. Blank for default. # # type: string # env var: LOTUS_HARMONYDB_PASSWORD #Password = "yugabyte" # The database (logical partition) within Yugabyte. Blank for default. # # type: string # env var: LOTUS_HARMONYDB_DATABASE #Database = "yugabyte" # The port to find Yugabyte. Blank for default. # # type: string # env var: LOTUS_HARMONYDB_PORT #Port = "5433"