lotus/documentation/en/default-lotus-miner-config.toml

989 lines
34 KiB
TOML
Raw Normal View History

2021-09-30 16:23:53 +00:00
[API]
# Binding address for the Lotus API
#
# type: string
# env var: LOTUS_API_LISTENADDRESS
#ListenAddress = "/ip4/127.0.0.1/tcp/2345/http"
# type: string
# env var: LOTUS_API_REMOTELISTENADDRESS
#RemoteListenAddress = "127.0.0.1:2345"
# type: Duration
# env var: LOTUS_API_TIMEOUT
#Timeout = "30s"
[Backup]
# When set to true disables metadata log (.lotus/kvlog). This can save disk
# space by reducing metadata redundancy.
#
2021-09-30 16:23:53 +00:00
# Note that in case of metadata corruption it might be much harder to recover
# your node if metadata log is disabled
#
# type: bool
# env var: LOTUS_BACKUP_DISABLEMETADATALOG
2022-04-12 21:15:25 +00:00
#DisableMetadataLog = true
2021-09-30 16:23:53 +00:00
[Logging]
[Logging.SubsystemLevels]
# env var: LOTUS_LOGGING_SUBSYSTEMLEVELS_EXAMPLE-SUBSYSTEM
#example-subsystem = "INFO"
2021-09-30 16:23:53 +00:00
[Libp2p]
# Binding address for the libp2p host - 0 means random port.
# Format: multiaddress; see https://multiformats.io/multiaddr/
#
# type: []string
# env var: LOTUS_LIBP2P_LISTENADDRESSES
#ListenAddresses = ["/ip4/0.0.0.0/tcp/0", "/ip6/::/tcp/0", "/ip4/0.0.0.0/udp/0/quic-v1", "/ip6/::/udp/0/quic-v1", "/ip4/0.0.0.0/udp/0/quic-v1/webtransport", "/ip6/::/udp/0/quic-v1/webtransport"]
2021-09-30 16:23:53 +00:00
# Addresses to explicitally announce to other peers. If not specified,
# all interface addresses are announced
# Format: multiaddress
#
# type: []string
# env var: LOTUS_LIBP2P_ANNOUNCEADDRESSES
#AnnounceAddresses = []
# Addresses to not announce
# Format: multiaddress
#
# type: []string
# env var: LOTUS_LIBP2P_NOANNOUNCEADDRESSES
#NoAnnounceAddresses = []
# When not disabled (default), lotus asks NAT devices (e.g., routers), to
# open up an external port and forward it to the port lotus is running on.
# When this works (i.e., when your router supports NAT port forwarding),
# it makes the local lotus node accessible from the public internet
#
# type: bool
# env var: LOTUS_LIBP2P_DISABLENATPORTMAP
#DisableNatPortMap = false
# ConnMgrLow is the number of connections that the basic connection manager
# will trim down to.
#
# type: uint
# env var: LOTUS_LIBP2P_CONNMGRLOW
#ConnMgrLow = 150
# ConnMgrHigh is the number of connections that, when exceeded, will trigger
# a connection GC operation. Note: protected/recently formed connections don't
# count towards this limit.
#
# type: uint
# env var: LOTUS_LIBP2P_CONNMGRHIGH
#ConnMgrHigh = 180
# ConnMgrGrace is a time duration that new connections are immune from being
# closed by the connection manager.
#
# type: Duration
# env var: LOTUS_LIBP2P_CONNMGRGRACE
#ConnMgrGrace = "20s"
[Pubsub]
# Run the node in bootstrap-node mode
#
# type: bool
# env var: LOTUS_PUBSUB_BOOTSTRAPPER
#Bootstrapper = false
# type: string
# env var: LOTUS_PUBSUB_REMOTETRACER
#RemoteTracer = ""
2021-11-10 13:11:15 +00:00
# Path to file that will be used to output tracer content in JSON format.
# If present tracer will save data to defined file.
# Format: file path
#
# type: string
# env var: LOTUS_PUBSUB_JSONTRACER
#JsonTracer = ""
# Connection string for elasticsearch instance.
# If present tracer will save data to elasticsearch.
# Format: https://<username>:<password>@<elasticsearch_url>:<port>/
#
# type: string
# env var: LOTUS_PUBSUB_ELASTICSEARCHTRACER
#ElasticSearchTracer = ""
# Name of elasticsearch index that will be used to save tracer data.
# This property is used only if ElasticSearchTracer propery is set.
#
# type: string
# env var: LOTUS_PUBSUB_ELASTICSEARCHINDEX
#ElasticSearchIndex = ""
# Auth token that will be passed with logs to elasticsearch - used for weighted peers score.
#
# type: string
# env var: LOTUS_PUBSUB_TRACERSOURCEAUTH
#TracerSourceAuth = ""
2021-09-30 16:23:53 +00:00
[Subsystems]
# type: bool
# env var: LOTUS_SUBSYSTEMS_ENABLEMINING
#EnableMining = true
# type: bool
# env var: LOTUS_SUBSYSTEMS_ENABLESEALING
#EnableSealing = true
# type: bool
# env var: LOTUS_SUBSYSTEMS_ENABLESECTORSTORAGE
#EnableSectorStorage = true
# type: bool
# env var: LOTUS_SUBSYSTEMS_ENABLEMARKETS
#EnableMarkets = false
2021-09-30 16:23:53 +00:00
2023-08-16 15:13:52 +00:00
# When enabled, the sector index will reside in an external database
# as opposed to the local KV store in the miner process
# This is useful to allow workers to bypass the lotus miner to access sector information
#
2023-08-09 00:59:21 +00:00
# type: bool
# env var: LOTUS_SUBSYSTEMS_ENABLESECTORINDEXDB
#EnableSectorIndexDB = false
2021-09-30 16:23:53 +00:00
# type: string
# env var: LOTUS_SUBSYSTEMS_SEALERAPIINFO
#SealerApiInfo = ""
# type: string
# env var: LOTUS_SUBSYSTEMS_SECTORINDEXAPIINFO
#SectorIndexApiInfo = ""
2023-11-09 17:22:08 +00:00
# When window post is enabled, the miner will automatically submit window post proofs
# for all sectors that are eligible for window post
# IF WINDOW POST IS DISABLED, THE MINER WILL NOT SUBMIT WINDOW POST PROOFS
# THIS WILL RESULT IN FAULTS AND PENALTIES IF NO OTHER MECHANISM IS RUNNING
# TO SUBMIT WINDOW POST PROOFS.
# Note: This option is entirely disabling the window post scheduler,
# not just the builtin PoSt computation like Proving.DisableBuiltinWindowPoSt.
# This option will stop lotus-miner from performing any actions related
# to window post, including scheduling, submitting proofs, and recovering
# sectors.
#
# type: bool
# env var: LOTUS_SUBSYSTEMS_DISABLEWINDOWPOST
#DisableWindowPoSt = false
2023-11-15 12:50:31 +00:00
# When winning post is disabled, the miner process will NOT attempt to mine
# blocks. This should only be set when there's an external process mining
# blocks on behalf of the miner.
# When disabled and no external block producers are configured, all potential
# block rewards will be missed!
#
# type: bool
# env var: LOTUS_SUBSYSTEMS_DISABLEWINNINGPOST
#DisableWinningPoSt = false
2021-09-30 16:23:53 +00:00
[Dealmaking]
# When enabled, the miner can accept online deals
#
# type: bool
# env var: LOTUS_DEALMAKING_CONSIDERONLINESTORAGEDEALS
#ConsiderOnlineStorageDeals = true
# When enabled, the miner can accept offline deals
#
# type: bool
# env var: LOTUS_DEALMAKING_CONSIDEROFFLINESTORAGEDEALS
#ConsiderOfflineStorageDeals = true
# When enabled, the miner can accept retrieval deals
#
# type: bool
# env var: LOTUS_DEALMAKING_CONSIDERONLINERETRIEVALDEALS
#ConsiderOnlineRetrievalDeals = true
# When enabled, the miner can accept offline retrieval deals
#
# type: bool
# env var: LOTUS_DEALMAKING_CONSIDEROFFLINERETRIEVALDEALS
#ConsiderOfflineRetrievalDeals = true
# When enabled, the miner can accept verified deals
#
# type: bool
# env var: LOTUS_DEALMAKING_CONSIDERVERIFIEDSTORAGEDEALS
#ConsiderVerifiedStorageDeals = true
# When enabled, the miner can accept unverified deals
#
# type: bool
# env var: LOTUS_DEALMAKING_CONSIDERUNVERIFIEDSTORAGEDEALS
#ConsiderUnverifiedStorageDeals = true
# A list of Data CIDs to reject when making deals
#
# type: []cid.Cid
# env var: LOTUS_DEALMAKING_PIECECIDBLOCKLIST
#PieceCidBlocklist = []
# Maximum expected amount of time getting the deal into a sealed sector will take
# This includes the time the deal will need to get transferred and published
# before being assigned to a sector
#
# type: Duration
# env var: LOTUS_DEALMAKING_EXPECTEDSEALDURATION
#ExpectedSealDuration = "24h0m0s"
# Maximum amount of time proposed deal StartEpoch can be in future
#
# type: Duration
# env var: LOTUS_DEALMAKING_MAXDEALSTARTDELAY
#MaxDealStartDelay = "336h0m0s"
# When a deal is ready to publish, the amount of time to wait for more
# deals to be ready to publish before publishing them all as a batch
#
# type: Duration
# env var: LOTUS_DEALMAKING_PUBLISHMSGPERIOD
#PublishMsgPeriod = "1h0m0s"
# The maximum number of deals to include in a single PublishStorageDeals
# message
#
# type: uint64
# env var: LOTUS_DEALMAKING_MAXDEALSPERPUBLISHMSG
#MaxDealsPerPublishMsg = 8
# The maximum collateral that the provider will put up against a deal,
# as a multiplier of the minimum collateral bound
#
# type: uint64
# env var: LOTUS_DEALMAKING_MAXPROVIDERCOLLATERALMULTIPLIER
#MaxProviderCollateralMultiplier = 2
# The maximum allowed disk usage size in bytes of staging deals not yet
# passed to the sealing node by the markets service. 0 is unlimited.
#
# type: int64
# env var: LOTUS_DEALMAKING_MAXSTAGINGDEALSBYTES
#MaxStagingDealsBytes = 0
2021-10-05 03:23:49 +00:00
# The maximum number of parallel online data transfers for storage deals
2021-09-30 16:23:53 +00:00
#
# type: uint64
2021-10-05 03:23:49 +00:00
# env var: LOTUS_DEALMAKING_SIMULTANEOUSTRANSFERSFORSTORAGE
#SimultaneousTransfersForStorage = 20
# The maximum number of simultaneous data transfers from any single client
# for storage deals.
# Unset by default (0), and values higher than SimultaneousTransfersForStorage
# will have no effect; i.e. the total number of simultaneous data transfers
# across all storage clients is bound by SimultaneousTransfersForStorage
# regardless of this number.
#
# type: uint64
# env var: LOTUS_DEALMAKING_SIMULTANEOUSTRANSFERSFORSTORAGEPERCLIENT
#SimultaneousTransfersForStoragePerClient = 0
2021-10-05 03:23:49 +00:00
# The maximum number of parallel online data transfers for retrieval deals
#
# type: uint64
# env var: LOTUS_DEALMAKING_SIMULTANEOUSTRANSFERSFORRETRIEVAL
#SimultaneousTransfersForRetrieval = 20
2021-09-30 16:23:53 +00:00
# Minimum start epoch buffer to give time for sealing of sector with deal.
#
# type: uint64
# env var: LOTUS_DEALMAKING_STARTEPOCHSEALINGBUFFER
#StartEpochSealingBuffer = 480
2021-09-30 16:23:53 +00:00
# A command used for fine-grained evaluation of storage deals
# see https://lotus.filecoin.io/storage-providers/advanced-configurations/market/#using-filters-for-fine-grained-storage-and-retrieval-deal-acceptance for more details
2021-09-30 16:23:53 +00:00
#
# type: string
# env var: LOTUS_DEALMAKING_FILTER
#Filter = ""
# A command used for fine-grained evaluation of retrieval deals
# see https://lotus.filecoin.io/storage-providers/advanced-configurations/market/#using-filters-for-fine-grained-storage-and-retrieval-deal-acceptance for more details
2021-09-30 16:23:53 +00:00
#
# type: string
# env var: LOTUS_DEALMAKING_RETRIEVALFILTER
#RetrievalFilter = ""
[Dealmaking.RetrievalPricing]
# env var: LOTUS_DEALMAKING_RETRIEVALPRICING_STRATEGY
#Strategy = "default"
[Dealmaking.RetrievalPricing.Default]
# env var: LOTUS_DEALMAKING_RETRIEVALPRICING_DEFAULT_VERIFIEDDEALSFREETRANSFER
#VerifiedDealsFreeTransfer = true
[Dealmaking.RetrievalPricing.External]
# env var: LOTUS_DEALMAKING_RETRIEVALPRICING_EXTERNAL_PATH
#Path = ""
[IndexProvider]
# Enable set whether to enable indexing announcement to the network and expose endpoints that
2022-04-13 07:25:33 +00:00
# allow indexer nodes to process announcements. Enabled by default.
#
# type: bool
# env var: LOTUS_INDEXPROVIDER_ENABLE
2022-03-14 18:56:07 +00:00
#Enable = true
# EntriesCacheCapacity sets the maximum capacity to use for caching the indexing advertisement
# entries. Defaults to 1024 if not specified. The cache is evicted using LRU policy. The
# maximum storage used by the cache is a factor of EntriesCacheCapacity, EntriesChunkSize and
# the length of multihashes being advertised. For example, advertising 128-bit long multihashes
# with the default EntriesCacheCapacity, and EntriesChunkSize means the cache size can grow to
# 256MiB when full.
#
# type: int
# env var: LOTUS_INDEXPROVIDER_ENTRIESCACHECAPACITY
#EntriesCacheCapacity = 1024
2021-11-17 11:25:25 +00:00
# EntriesChunkSize sets the maximum number of multihashes to include in a single entries chunk.
# Defaults to 16384 if not specified. Note that chunks are chained together for indexing
# advertisements that include more multihashes than the configured EntriesChunkSize.
#
# type: int
# env var: LOTUS_INDEXPROVIDER_ENTRIESCHUNKSIZE
#EntriesChunkSize = 16384
2021-11-17 11:25:25 +00:00
# TopicName sets the topic name on which the changes to the advertised content are announced.
# If not explicitly specified, the topic name is automatically inferred from the network name
# in following format: '/indexer/ingest/<network-name>'
# Defaults to empty, which implies the topic name is inferred from network name.
#
# type: string
# env var: LOTUS_INDEXPROVIDER_TOPICNAME
#TopicName = ""
2022-02-03 16:24:49 +00:00
# PurgeCacheOnStart sets whether to clear any cached entries chunks when the provider engine
# starts. By default, the cache is rehydrated from previously cached entries stored in
# datastore if any is present.
#
# type: bool
# env var: LOTUS_INDEXPROVIDER_PURGECACHEONSTART
#PurgeCacheOnStart = false
2022-02-03 16:24:49 +00:00
2021-11-17 11:25:25 +00:00
2022-03-29 01:19:11 +00:00
[Proving]
# Maximum number of sector checks to run in parallel. (0 = unlimited)
#
# WARNING: Setting this value too high may make the node crash by running out of stack
# WARNING: Setting this value too low may make sector challenge reading much slower, resulting in failed PoSt due
# to late submission.
#
# After changing this option, confirm that the new value works in your setup by invoking
# 'lotus-miner proving compute window-post 0'
2022-03-29 01:19:11 +00:00
#
# type: int
# env var: LOTUS_PROVING_PARALLELCHECKLIMIT
#ParallelCheckLimit = 32
2022-03-29 01:19:11 +00:00
# Maximum amount of time a proving pre-check can take for a sector. If the check times out the sector will be skipped
#
# WARNING: Setting this value too low risks in sectors being skipped even though they are accessible, just reading the
# test challenge took longer than this timeout
# WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this sector are
# blocked (e.g. in case of disconnected NFS mount)
#
# type: Duration
# env var: LOTUS_PROVING_SINGLECHECKTIMEOUT
#SingleCheckTimeout = "10m0s"
# Maximum amount of time a proving pre-check can take for an entire partition. If the check times out, sectors in
# the partition which didn't get checked on time will be skipped
#
# WARNING: Setting this value too low risks in sectors being skipped even though they are accessible, just reading the
# test challenge took longer than this timeout
# WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this partition are
# blocked or slow
#
# type: Duration
# env var: LOTUS_PROVING_PARTITIONCHECKTIMEOUT
#PartitionCheckTimeout = "20m0s"
# Disable Window PoSt computation on the lotus-miner process even if no window PoSt workers are present.
#
# WARNING: If no windowPoSt workers are connected, window PoSt WILL FAIL resulting in faulty sectors which will need
# to be recovered. Before enabling this option, make sure your PoSt workers work correctly.
#
# After changing this option, confirm that the new value works in your setup by invoking
# 'lotus-miner proving compute window-post 0'
#
# type: bool
# env var: LOTUS_PROVING_DISABLEBUILTINWINDOWPOST
#DisableBuiltinWindowPoSt = false
# Disable Winning PoSt computation on the lotus-miner process even if no winning PoSt workers are present.
#
# WARNING: If no WinningPoSt workers are connected, Winning PoSt WILL FAIL resulting in lost block rewards.
# Before enabling this option, make sure your PoSt workers work correctly.
#
# type: bool
# env var: LOTUS_PROVING_DISABLEBUILTINWINNINGPOST
#DisableBuiltinWinningPoSt = false
# Disable WindowPoSt provable sector readability checks.
#
# In normal operation, when preparing to compute WindowPoSt, lotus-miner will perform a round of reading challenges
# from all sectors to confirm that those sectors can be proven. Challenges read in this process are discarded, as
2022-07-04 15:17:00 +00:00
# we're only interested in checking that sector data can be read.
#
# When using builtin proof computation (no PoSt workers, and DisableBuiltinWindowPoSt is set to false), this process
# can save a lot of time and compute resources in the case that some sectors are not readable - this is caused by
# the builtin logic not skipping snark computation when some sectors need to be skipped.
#
# When using PoSt workers, this process is mostly redundant, with PoSt workers challenges will be read once, and
# if challenges for some sectors aren't readable, those sectors will just get skipped.
#
2022-07-04 15:17:00 +00:00
# Disabling sector pre-checks will slightly reduce IO load when proving sectors, possibly resulting in shorter
# time to produce window PoSt. In setups with good IO capabilities the effect of this option on proving time should
# be negligible.
#
# NOTE: It likely is a bad idea to disable sector pre-checks in setups with no PoSt workers.
#
# NOTE: Even when this option is enabled, recovering sectors will be checked before recovery declaration message is
# sent to the chain
#
# After changing this option, confirm that the new value works in your setup by invoking
# 'lotus-miner proving compute window-post 0'
#
# type: bool
# env var: LOTUS_PROVING_DISABLEWDPOSTPRECHECKS
#DisableWDPoStPreChecks = false
2023-10-10 20:34:45 +00:00
# Maximum number of partitions to prove in a single SubmitWindowPoSt messace. 0 = network limit (3 in nv21)
#
# A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors.
2023-10-10 20:34:45 +00:00
# //
2022-10-04 18:44:00 +00:00
# Note that setting this value lower may result in less efficient gas use - more messages will be sent,
# to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
#
# Setting this value above the network limit has no effect
#
# type: int
# env var: LOTUS_PROVING_MAXPARTITIONSPERPOSTMESSAGE
#MaxPartitionsPerPoStMessage = 0
# In some cases when submitting DeclareFaultsRecovered messages,
# there may be too many recoveries to fit in a BlockGasLimit.
# In those cases it may be necessary to set this value to something low (eg 1);
# Note that setting this value lower may result in less efficient gas use - more messages will be sent than needed,
# resulting in more total gas use (but each message will have lower gas limit)
#
# type: int
# env var: LOTUS_PROVING_MAXPARTITIONSPERRECOVERYMESSAGE
#MaxPartitionsPerRecoveryMessage = 0
2022-10-04 19:21:55 +00:00
# Enable single partition per PoSt Message for partitions containing recovery sectors
2022-10-04 18:44:00 +00:00
#
# In cases when submitting PoSt messages which contain recovering sectors, the default network limit may still be
# too high to fit in the block gas limit. In those cases, it becomes useful to only house the single partition
2022-10-04 19:21:55 +00:00
# with recovering sectors in the post message
2022-10-04 18:44:00 +00:00
#
# Note that setting this value lower may result in less efficient gas use - more messages will be sent,
# to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
#
# type: bool
# env var: LOTUS_PROVING_SINGLERECOVERINGPARTITIONPERPOSTMESSAGE
#SingleRecoveringPartitionPerPostMessage = false
2022-03-29 01:19:11 +00:00
2021-09-30 16:23:53 +00:00
[Sealing]
# Upper bound on how many sectors can be waiting for more deals to be packed in it before it begins sealing at any given time.
# If the miner is accepting multiple deals in parallel, up to MaxWaitDealsSectors of new sectors will be created.
# If more than MaxWaitDealsSectors deals are accepted in parallel, only MaxWaitDealsSectors deals will be processed in parallel
# Note that setting this number too high in relation to deal ingestion rate may result in poor sector packing efficiency
# 0 = no limit
#
# type: uint64
# env var: LOTUS_SEALING_MAXWAITDEALSSECTORS
#MaxWaitDealsSectors = 2
2022-03-17 20:12:42 +00:00
# Upper bound on how many sectors can be sealing+upgrading at the same time when creating new CC sectors (0 = unlimited)
2021-09-30 16:23:53 +00:00
#
# type: uint64
# env var: LOTUS_SEALING_MAXSEALINGSECTORS
#MaxSealingSectors = 0
2022-03-17 20:12:42 +00:00
# Upper bound on how many sectors can be sealing+upgrading at the same time when creating new sectors with deals (0 = unlimited)
2021-09-30 16:23:53 +00:00
#
# type: uint64
# env var: LOTUS_SEALING_MAXSEALINGSECTORSFORDEALS
#MaxSealingSectorsForDeals = 0
# Prefer creating new sectors even if there are sectors Available for upgrading.
2022-03-17 20:12:42 +00:00
# This setting combined with MaxUpgradingSectors set to a value higher than MaxSealingSectorsForDeals makes it
# possible to use fast sector upgrades to handle high volumes of storage deals, while still using the simple sealing
# flow when the volume of storage deals is lower.
#
# type: bool
# env var: LOTUS_SEALING_PREFERNEWSECTORSFORDEALS
#PreferNewSectorsForDeals = false
# Upper bound on how many sectors can be sealing+upgrading at the same time when upgrading CC sectors with deals (0 = MaxSealingSectorsForDeals)
#
# type: uint64
# env var: LOTUS_SEALING_MAXUPGRADINGSECTORS
#MaxUpgradingSectors = 0
# When set to a non-zero value, minimum number of epochs until sector expiration required for sectors to be considered
# for upgrades (0 = DealMinDuration = 180 days = 518400 epochs)
#
# Note that if all deals waiting in the input queue have lifetimes longer than this value, upgrade sectors will be
# required to have expiration of at least the soonest-ending deal
#
# type: uint64
# env var: LOTUS_SEALING_MINUPGRADESECTOREXPIRATION
#MinUpgradeSectorExpiration = 0
# DEPRECATED: Target expiration is no longer used
#
# type: uint64
# env var: LOTUS_SEALING_MINTARGETUPGRADESECTOREXPIRATION
#MinTargetUpgradeSectorExpiration = 0
2021-09-30 16:23:53 +00:00
# CommittedCapacitySectorLifetime is the duration a Committed Capacity (CC) sector will
# live before it must be extended or converted into sector containing deals before it is
# terminated. Value must be between 180-1278 days (1278 in nv21, 540 before nv21).
2021-09-30 16:23:53 +00:00
#
# type: Duration
# env var: LOTUS_SEALING_COMMITTEDCAPACITYSECTORLIFETIME
#CommittedCapacitySectorLifetime = "12960h0m0s"
# Period of time that a newly created sector will wait for more deals to be packed in to before it starts to seal.
# Sectors which are fully filled will start sealing immediately
#
# type: Duration
# env var: LOTUS_SEALING_WAITDEALSDELAY
#WaitDealsDelay = "6h0m0s"
# Whether to keep unsealed copies of deal data regardless of whether the client requested that. This lets the miner
# avoid the relatively high cost of unsealing the data later, at the cost of more storage space
#
# type: bool
# env var: LOTUS_SEALING_ALWAYSKEEPUNSEALEDCOPY
#AlwaysKeepUnsealedCopy = true
# Run sector finalization before submitting sector proof to the chain
#
# type: bool
# env var: LOTUS_SEALING_FINALIZEEARLY
#FinalizeEarly = false
# Whether new sectors are created to pack incoming deals
# When this is set to false no new sectors will be created for sealing incoming deals
# This is useful for forcing all deals to be assigned as snap deals to sectors marked for upgrade
#
# type: bool
# env var: LOTUS_SEALING_MAKENEWSECTORFORDEALS
#MakeNewSectorForDeals = true
# After sealing CC sectors, make them available for upgrading with deals
#
# type: bool
# env var: LOTUS_SEALING_MAKECCSECTORSAVAILABLE
#MakeCCSectorsAvailable = false
2021-09-30 16:23:53 +00:00
# Whether to use available miner balance for sector collateral instead of sending it with each message
#
# type: bool
# env var: LOTUS_SEALING_COLLATERALFROMMINERBALANCE
#CollateralFromMinerBalance = false
# Minimum available balance to keep in the miner actor before sending it with messages
#
# type: types.FIL
# env var: LOTUS_SEALING_AVAILABLEBALANCEBUFFER
#AvailableBalanceBuffer = "0 FIL"
# Don't send collateral with messages even if there is no available balance in the miner actor
#
# type: bool
# env var: LOTUS_SEALING_DISABLECOLLATERALFALLBACK
#DisableCollateralFallback = false
# maximum precommit batch size - batches will be sent immediately above this size
#
# type: int
# env var: LOTUS_SEALING_MAXPRECOMMITBATCH
#MaxPreCommitBatch = 256
# how long to wait before submitting a batch after crossing the minimum batch size
#
# type: Duration
# env var: LOTUS_SEALING_PRECOMMITBATCHWAIT
#PreCommitBatchWait = "24h0m0s"
# time buffer for forceful batch submission before sectors/deal in batch would start expiring
#
# type: Duration
# env var: LOTUS_SEALING_PRECOMMITBATCHSLACK
#PreCommitBatchSlack = "3h0m0s"
# enable / disable commit aggregation (takes effect after nv13)
#
# type: bool
# env var: LOTUS_SEALING_AGGREGATECOMMITS
#AggregateCommits = true
2022-05-20 15:09:55 +00:00
# minimum batched commit size - batches above this size will eventually be sent on a timeout
2021-09-30 16:23:53 +00:00
#
# type: int
# env var: LOTUS_SEALING_MINCOMMITBATCH
#MinCommitBatch = 4
2022-05-20 15:09:55 +00:00
# maximum batched commit size - batches will be sent immediately above this size
#
2021-09-30 16:23:53 +00:00
# type: int
# env var: LOTUS_SEALING_MAXCOMMITBATCH
#MaxCommitBatch = 819
# how long to wait before submitting a batch after crossing the minimum batch size
#
# type: Duration
# env var: LOTUS_SEALING_COMMITBATCHWAIT
#CommitBatchWait = "24h0m0s"
# time buffer for forceful batch submission before sectors/deals in batch would start expiring
#
# type: Duration
# env var: LOTUS_SEALING_COMMITBATCHSLACK
#CommitBatchSlack = "1h0m0s"
# network BaseFee below which to stop doing precommit batching, instead
2023-09-21 15:37:02 +00:00
# sending precommit messages to the chain individually. When the basefee is
# below this threshold, precommit messages will get sent out immediately.
#
# type: types.FIL
# env var: LOTUS_SEALING_BATCHPRECOMMITABOVEBASEFEE
#BatchPreCommitAboveBaseFee = "0.00000000032 FIL"
2021-09-30 16:23:53 +00:00
# network BaseFee below which to stop doing commit aggregation, instead
# submitting proofs to the chain individually
#
# type: types.FIL
# env var: LOTUS_SEALING_AGGREGATEABOVEBASEFEE
#AggregateAboveBaseFee = "0.00000000032 FIL"
2021-09-30 16:23:53 +00:00
# When submitting several sector prove commit messages simultaneously, this option allows you to
# stagger the number of prove commits submitted per epoch
# This is done because gas estimates for ProveCommits are non deterministic and increasing as a large
# number of sectors get committed within the same epoch resulting in occasionally failed msgs.
# Submitting a smaller number of prove commits per epoch would reduce the possibility of failed msgs
#
# type: uint64
# env var: LOTUS_SEALING_MAXSECTORPROVECOMMITSSUBMITTEDPEREPOCH
#MaxSectorProveCommitsSubmittedPerEpoch = 20
2021-09-30 16:23:53 +00:00
# type: uint64
# env var: LOTUS_SEALING_TERMINATEBATCHMAX
#TerminateBatchMax = 100
# type: uint64
# env var: LOTUS_SEALING_TERMINATEBATCHMIN
#TerminateBatchMin = 1
# type: Duration
# env var: LOTUS_SEALING_TERMINATEBATCHWAIT
#TerminateBatchWait = "5m0s"
# UseSyntheticPoRep, when set to true, will reduce the amount of cache data held on disk after the completion of PreCommit 2 to 11GiB.
2023-06-01 04:03:52 +00:00
#
# type: bool
2023-07-13 08:13:35 +00:00
# env var: LOTUS_SEALING_USESYNTHETICPOREP
#UseSyntheticPoRep = false
2023-06-01 04:03:52 +00:00
chore: Merge nv22 into master (#11699) * [WIP] feat: Add nv22 skeleton Addition of Network Version 22 skeleton * update FFI * feat: drand: refactor round verification * feat: sealing: Support nv22 DDO features in the sealing pipeline (#11226) * Initial work supporting DDO pieces in lotus-miner * sealing: Update pipeline input to operate on UniversalPiece * sealing: Update pipeline checks/sealing states to operate on UniversalPiece * sealing: Make pipeline build with UniversalPiece * move PieceDealInfo out of api * make gen * make sealing pipeline unit tests pass * fix itest ensemble build * don't panic in SectorsStatus with deals * stop linter from complaining about checkPieces * fix sector import tests * mod tidy * sealing: Add logic for (pre)committing DDO sectors * sealing: state-types with method defs * DDO non-snap pipeline works(?), DDO Itests * DDO support in snapdeals pipeline * make gen * update actor bundles * update the gst market fix * fix: chain: use PreCommitSectorsBatch2 when setting up genesis * some bug fixes * integration working changes * update actor bundles * Make TestOnboardRawPieceSnap pass * Appease the linter * Make deadlines test pass with v12 actors * Update go-state-types, abstract market DealState * make gen * mod tidy, lint fixes * Fix some more tests * Bump version in master Bump version in master * Make gen Make gen * fix sender * fix: lotus-provider: Fix winning PoSt * fix: sql Scan cannot write to an object * Actually show miner-addrs in info-log Actually show miner-addrs in lotus-provider info-log * [WIP] feat: Add nv22 skeleton Addition of Network Version 22 skeleton * update FFI * ddo is now nv22 * make gen * temp actor bundle with ddo * use working go-state-types * gst with v13 market migration * update bundle, builtin.MethodsMiner.ProveCommitSectors2 -> 3 * actually working v13 migration, v13 migration itest * Address review * sealing: Correct DDO snap pledge math * itests: Mixed ddo itest * pipeline: Fix sectorWeight * sealing: convert market deals into PAMs in mixed sectors * sealing: make market to ddo conversion work * fix lint * update gst * Update actors and GST to lastest integ branch * commit batcher: Update ProveCommitSectors3Params builder logic * make gen * use builtin-actors master * ddo: address review * itests: Add commd assertions to ddo tests * make gen * gst with fixed types * config knobs for RequireActivationSuccess * storage: Drop obsolete flaky tasts --------- Co-authored-by: Jennifer Wang <jiayingw703@gmail.com> Co-authored-by: Aayush <arajasek94@gmail.com> Co-authored-by: Shrenuj Bansal <shrenuj.bansal@protocol.ai> Co-authored-by: Phi <orjan.roren@gmail.com> Co-authored-by: Andrew Jackson (Ajax) <snadrus@gmail.com> Co-authored-by: TippyFlits <james.bluett@protocol.ai> * feat: implement FIP-0063 * chore: deps: update to go-multiaddr v0.12.2 (#11602) * feat: fvm: update the FVM/FFI to v4.1 (#11608) (#11612) This: 1. Adds nv22 support. 2. Updates the message tracing format. Co-authored-by: Steven Allen <steven@stebalien.com> * AggregateProofType nil when doing batch updates Use latest nv22 go-state-types version with matching update * Update to v13.0.0-rc.2 bundle * chore: Upgrade heights and codename Update upgrade heights Co-Authored-By: Steven Allen <steven@stebalien.com> * Update epoch after nv22 DRAND switch Update epoch after nv22 DRAND switch * Update Mango codename to Phoneix Make the codename for the Drand-change inline with Dragon style. * Add UpgradePhoenixHeight to API params * set UpgradePhoenixHeight to be one hour after Dragon * Make gen Make gen and UpgradePhoenixHeight in butterfly and local devnet to be in line with Calibration and Mainnet * Update epoch heights (#11637) Update epoch heights * new: add forest bootstrap nodes (#11636) Signed-off-by: samuelarogbonlo <sbayo971@gmail.com> * Merge pull request #11491 from filecoin-project/fix/remove-decommissioned-pl-bootstrap-nodes Remove PL operated bootstrap nodes from mainnet.pi * feat: api: new verified registry methods to get all allocations and claims (#11631) * new verireg methods * update changelog and add itest * update itest and cli * update new method's support till v9 * remove gateway APIs * fix cli internal var names * chore:: backport #11609 to the feat/nv22 branch (#11644) * feat: api: improve the correctness of Eth's trace_block (#11609) * Improve the correctness of Eth's trace_block - Improve encoding/decoding of parameters and return values: - Encode "native" parameters and return values with Solidity ABI. - Correctly decode parameters to "create" calls. - Use the correct (ish) output for "create" calls. - Handle all forms of "create". - Make robust with respect to reverts: - Use the actor ID/address from the trace instead of looking it up in the state-tree (may not exist in the state-tree due to a revert). - Gracefully handle failed actor/contract creation. - Improve performance: - We avoid looking anything up in the state-tree when translating the trace, which should significantly improve performance. - Improve code readability: - Remove all "backtracking" logic. - Use an "environment" struct to store temporary state instead of attaching it to the trace. - Fix random bugs: - Fix an allocation bug in the "address" logic (need to set the capacity before modifying the slice). - Improved error checking/handling. - Use correct types for `trace_block` action/results (create, call, etc.). - And use the correct types for Result/Action structs instead of reusing the same "Call" action every time. - Improve error messages. * Make gen Make gen --------- Co-authored-by: Steven Allen <steven@stebalien.com> * fix: add UpgradePhoenixHeight to StateGetNetworkParams (#11648) * chore: deps: update to go-state-types v13.0.0-rc.1 * do NOT update the cache when running the real migration * Merge pull request #11632 from hanabi1224/hm/drand-test feat: drand quicknet: allow scheduling drand quicknet upgrade before nv22 on 2k devnet * chore: deps: update to go-state-types v13.0.0-rc.2 chore: deps: update to go-state-types v13.0.0-rc.2 * feat: set migration config UpgradeEpoch for v13 actors upgrade * Built-in actor events first draft * itest for DDO non-market verified data w/ builtin actor events * Tests for builtin actor events API * Clean up DDO+Events tests, add lots of explainer comments * Minor tweaks to events types * Avoid duplicate messages when looking for receipts * Rename internal events modules for clarity * Adjust actor event API after review * s/ActorEvents/Events/g in global config * Manage event sending rate for SubscribeActorEvents * Terminate SubscribeActorEvents chan when at max height * Document future API changes * More clarity in actor event API docs * More post-review changes, lots of tests for SubscribeActorEvents Use BlockDelay as the window for receiving events on the SubscribeActorEvents channel. We expect the user to have received the initial batch of historical events (if any) in one block's time. For real-time events we expect them to not fall behind by roughly one block's time. * Remove duplicate code from actor event type marshalling tests Reduce verbosity and remove duplicate test logic from actor event types JSON marshalling tests. * Rename actor events test to follow go convention Add missing `s` to `actor_events` test file to follow golang convention used across the repo. * Run actor events table tests in deterministic order Refactor `map` usage for actor event table tests to ensure deterministic test execution order, making debugging potential issues easier. If non-determinism is a target, leverage Go's built-in parallel testing capabilities. * Reduce scope for filter removal failure when getting actor events Use a fresh context to remove the temporary filter installed solely to get the actor events. This should reduce chances of failure in a case where the original context may be expired/cancelled. Refactor removal into a `defer` statement for a more readable, concise return statement. * Use fixed RNG seed for actor event tests Improve determinism in actor event tests by using a fixed RNG seed. This makes up a more reproducible test suit. * Use provided libraries to assert eventual conditions Use the functionalities already provided by `testify` to assert eventual conditions, and remove the use of `time.Sleep`. Remove duplicate code in utility functions that are already defined. Refactor assertion helper functions to use consistent terminology: "require" implies fatal error, whereas "assert" implies error where the test may proceed executing. * Update changelog for actor events APIs * Fix concerns and docs identified by review * Update actor bundle to v13.0.0-rc3 Update actor bundle to v13.0.0-rc3 * Prep Lotus v1.26.0-rc1 - For sanity reverting the mainnet upgrade epoch to 99999999, and then only set it when cutting the final release -Update Calibnet CIDs to v13.0.0-rc3 - Add GetActorEvents, SubscribeActorEvents, GetAllClaims and GetAllAllocations methods to the changelog Co-Authored-By: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> * Update CHANGELOG.md Co-authored-by: Masih H. Derkani <m@derkani.org> * Make gen Make gen * fix: beacon: validate drand change at nv16 correctly * bump to v1.26.0-rc2 * test: cleanup ddo verified itest, extract steps to functions also add allocation-removed event case * test: extract verified DDO test to separate file, add more checks * test: add additional actor events checks * Add verification for "deal-activated" actor event * docs(drand): document the meaning of "IsChained" (#11692) * Resolve conflicts I encountered multiple issues when trying to run make gen. And these changes fixed a couple of them: - go mod tidy - Remove RaftState/RaftLeader - Revert `if ts.Height() > claim.TermMax+claim.TermStart || !cctx.IsSet("expired")` to the what is in the release/v1.26.0: `if tsHeight > val.TermMax || !expired` * fixup imports, make jen * Update version Update version in master to v1.27.0-dev * Update node/impl/full/dummy.go Co-authored-by: Łukasz Magiera <magik6k@users.noreply.github.com> * Adjust ListClaimsCmd Adjust ListClaimsCmd according to review --------- Signed-off-by: samuelarogbonlo <sbayo971@gmail.com> Co-authored-by: TippyFlits <james.bluett@protocol.ai> Co-authored-by: Aayush <arajasek94@gmail.com> Co-authored-by: Łukasz Magiera <magik6k@users.noreply.github.com> Co-authored-by: Jennifer Wang <jiayingw703@gmail.com> Co-authored-by: Shrenuj Bansal <shrenuj.bansal@protocol.ai> Co-authored-by: Andrew Jackson (Ajax) <snadrus@gmail.com> Co-authored-by: Steven Allen <steven@stebalien.com> Co-authored-by: Rod Vagg <rod@vagg.org> Co-authored-by: Samuel Arogbonlo <47984109+samuelarogbonlo@users.noreply.github.com> Co-authored-by: LexLuthr <88259624+LexLuthr@users.noreply.github.com> Co-authored-by: tom123222 <160735201+tom123222@users.noreply.github.com> Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Masih H. Derkani <m@derkani.org> Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com>
2024-03-12 09:33:58 +00:00
# Whether to abort if any sector activation in a batch fails (newly sealed sectors, only with ProveCommitSectors3).
#
# type: bool
# env var: LOTUS_SEALING_REQUIREACTIVATIONSUCCESS
#RequireActivationSuccess = false
# Whether to abort if any piece activation notification returns a non-zero exit code (newly sealed sectors, only with ProveCommitSectors3).
#
# type: bool
# env var: LOTUS_SEALING_REQUIREACTIVATIONSUCCESSUPDATE
#RequireActivationSuccessUpdate = false
# Whether to abort if any sector activation in a batch fails (updating sectors, only with ProveReplicaUpdates3).
#
# type: bool
# env var: LOTUS_SEALING_REQUIRENOTIFICATIONSUCCESS
#RequireNotificationSuccess = false
# Whether to abort if any piece activation notification returns a non-zero exit code (updating sectors, only with ProveReplicaUpdates3).
#
# type: bool
# env var: LOTUS_SEALING_REQUIRENOTIFICATIONSUCCESSUPDATE
#RequireNotificationSuccessUpdate = false
2021-09-30 16:23:53 +00:00
[Storage]
2022-03-29 01:19:11 +00:00
# type: int
2021-09-30 16:23:53 +00:00
# env var: LOTUS_STORAGE_PARALLELFETCHLIMIT
#ParallelFetchLimit = 10
# type: bool
# env var: LOTUS_STORAGE_ALLOWSECTORDOWNLOAD
#AllowSectorDownload = true
2022-03-29 01:19:11 +00:00
# type: bool
2021-09-30 16:23:53 +00:00
# env var: LOTUS_STORAGE_ALLOWADDPIECE
#AllowAddPiece = true
2022-03-29 01:19:11 +00:00
# type: bool
2021-09-30 16:23:53 +00:00
# env var: LOTUS_STORAGE_ALLOWPRECOMMIT1
#AllowPreCommit1 = true
2022-03-29 01:19:11 +00:00
# type: bool
2021-09-30 16:23:53 +00:00
# env var: LOTUS_STORAGE_ALLOWPRECOMMIT2
#AllowPreCommit2 = true
2022-03-29 01:19:11 +00:00
# type: bool
2021-09-30 16:23:53 +00:00
# env var: LOTUS_STORAGE_ALLOWCOMMIT
#AllowCommit = true
2022-03-29 01:19:11 +00:00
# type: bool
2021-09-30 16:23:53 +00:00
# env var: LOTUS_STORAGE_ALLOWUNSEAL
#AllowUnseal = true
2022-03-29 01:19:11 +00:00
# type: bool
# env var: LOTUS_STORAGE_ALLOWREPLICAUPDATE
#AllowReplicaUpdate = true
2022-03-29 01:19:11 +00:00
# type: bool
# env var: LOTUS_STORAGE_ALLOWPROVEREPLICAUPDATE2
#AllowProveReplicaUpdate2 = true
2022-03-29 01:19:11 +00:00
# type: bool
# env var: LOTUS_STORAGE_ALLOWREGENSECTORKEY
#AllowRegenSectorKey = true
# LocalWorkerName specifies a custom name for the builtin worker.
# If set to an empty string (default) os hostname will be used
#
# type: string
# env var: LOTUS_STORAGE_LOCALWORKERNAME
#LocalWorkerName = ""
2022-05-23 15:32:54 +00:00
# Assigner specifies the worker assigner to use when scheduling tasks.
# "utilization" (default) - assign tasks to workers with lowest utilization.
# "spread" - assign tasks to as many distinct workers as possible.
#
# type: string
# env var: LOTUS_STORAGE_ASSIGNER
#Assigner = "utilization"
2022-05-23 23:33:56 +00:00
# DisallowRemoteFinalize when set to true will force all Finalize tasks to
# run on workers with local access to both long-term storage and the sealing
# path containing the sector.
# --
# WARNING: Only set this if all workers have access to long-term storage
# paths. If this flag is enabled, and there are workers without long-term
# storage access, sectors will not be moved from them, and Finalize tasks
# will appear to be stuck.
# --
2022-05-23 21:53:25 +00:00
# If you see stuck Finalize tasks after enabling this setting, check
# 'lotus-miner sealing sched-diag' and 'lotus-miner storage find [sector num]'
#
# type: bool
# env var: LOTUS_STORAGE_DISALLOWREMOTEFINALIZE
#DisallowRemoteFinalize = false
2022-03-29 01:19:11 +00:00
# ResourceFiltering instructs the system which resource filtering strategy
# to use when evaluating tasks against this worker. An empty value defaults
# to "hardware".
#
2022-11-06 16:38:25 +00:00
# type: ResourceFilteringStrategy
2021-09-30 16:23:53 +00:00
# env var: LOTUS_STORAGE_RESOURCEFILTERING
#ResourceFiltering = "hardware"
[Fees]
# type: types.FIL
# env var: LOTUS_FEES_MAXPRECOMMITGASFEE
#MaxPreCommitGasFee = "0.025 FIL"
# type: types.FIL
# env var: LOTUS_FEES_MAXCOMMITGASFEE
#MaxCommitGasFee = "0.05 FIL"
# type: types.FIL
# env var: LOTUS_FEES_MAXTERMINATEGASFEE
#MaxTerminateGasFee = "0.5 FIL"
# WindowPoSt is a high-value operation, so the default fee should be high.
#
# type: types.FIL
# env var: LOTUS_FEES_MAXWINDOWPOSTGASFEE
#MaxWindowPoStGasFee = "5 FIL"
# type: types.FIL
# env var: LOTUS_FEES_MAXPUBLISHDEALSFEE
#MaxPublishDealsFee = "0.05 FIL"
# type: types.FIL
# env var: LOTUS_FEES_MAXMARKETBALANCEADDFEE
#MaxMarketBalanceAddFee = "0.007 FIL"
# type: bool
# env var: LOTUS_FEES_MAXIMIZEWINDOWPOSTFEECAP
#MaximizeWindowPoStFeeCap = true
2021-09-30 16:23:53 +00:00
[Fees.MaxPreCommitBatchGasFee]
# type: types.FIL
# env var: LOTUS_FEES_MAXPRECOMMITBATCHGASFEE_BASE
#Base = "0 FIL"
# type: types.FIL
# env var: LOTUS_FEES_MAXPRECOMMITBATCHGASFEE_PERSECTOR
#PerSector = "0.02 FIL"
[Fees.MaxCommitBatchGasFee]
# type: types.FIL
# env var: LOTUS_FEES_MAXCOMMITBATCHGASFEE_BASE
#Base = "0 FIL"
# type: types.FIL
# env var: LOTUS_FEES_MAXCOMMITBATCHGASFEE_PERSECTOR
#PerSector = "0.03 FIL"
[Addresses]
# Addresses to send PreCommit messages from
#
# type: []string
# env var: LOTUS_ADDRESSES_PRECOMMITCONTROL
#PreCommitControl = []
# Addresses to send Commit messages from
#
# type: []string
# env var: LOTUS_ADDRESSES_COMMITCONTROL
#CommitControl = []
# type: []string
# env var: LOTUS_ADDRESSES_TERMINATECONTROL
#TerminateControl = []
# type: []string
# env var: LOTUS_ADDRESSES_DEALPUBLISHCONTROL
#DealPublishControl = []
# DisableOwnerFallback disables usage of the owner address for messages
# sent automatically
#
# type: bool
# env var: LOTUS_ADDRESSES_DISABLEOWNERFALLBACK
#DisableOwnerFallback = false
# DisableWorkerFallback disables usage of the worker address for messages
# sent automatically, if control addresses are configured.
# A control address that doesn't have enough funds will still be chosen
# over the worker address if this flag is set.
#
# type: bool
# env var: LOTUS_ADDRESSES_DISABLEWORKERFALLBACK
#DisableWorkerFallback = false
[DAGStore]
# Path to the dagstore root directory. This directory contains three
# subdirectories, which can be symlinked to alternative locations if
# need be:
# - ./transients: caches unsealed deals that have been fetched from the
# storage subsystem for serving retrievals.
# - ./indices: stores shard indices.
# - ./datastore: holds the KV store tracking the state of every shard
# known to the DAG store.
# Default value: <LOTUS_MARKETS_PATH>/dagstore (split deployment) or
# <LOTUS_MINER_PATH>/dagstore (monolith deployment)
#
# type: string
# env var: LOTUS_DAGSTORE_ROOTDIR
#RootDir = ""
# The maximum amount of indexing jobs that can run simultaneously.
# 0 means unlimited.
# Default value: 5.
#
# type: int
# env var: LOTUS_DAGSTORE_MAXCONCURRENTINDEX
#MaxConcurrentIndex = 5
# The maximum amount of unsealed deals that can be fetched simultaneously
# from the storage subsystem. 0 means unlimited.
# Default value: 0 (unlimited).
#
# type: int
# env var: LOTUS_DAGSTORE_MAXCONCURRENTREADYFETCHES
#MaxConcurrentReadyFetches = 0
# The maximum amount of unseals that can be processed simultaneously
# from the storage subsystem. 0 means unlimited.
# Default value: 0 (unlimited).
#
# type: int
# env var: LOTUS_DAGSTORE_MAXCONCURRENTUNSEALS
#MaxConcurrentUnseals = 5
2021-09-30 16:23:53 +00:00
# The maximum number of simultaneous inflight API calls to the storage
# subsystem.
# Default value: 100.
#
# type: int
# env var: LOTUS_DAGSTORE_MAXCONCURRENCYSTORAGECALLS
#MaxConcurrencyStorageCalls = 100
# The time between calls to periodic dagstore GC, in time.Duration string
# representation, e.g. 1m, 5m, 1h.
# Default value: 1 minute.
#
# type: Duration
# env var: LOTUS_DAGSTORE_GCINTERVAL
#GCInterval = "1m0s"
2023-07-14 23:05:49 +00:00
[HarmonyDB]
# HOSTS is a list of hostnames to nodes running YugabyteDB
# in a cluster. Only 1 is required
#
# type: []string
# env var: LOTUS_HARMONYDB_HOSTS
#Hosts = ["127.0.0.1"]
# The Yugabyte server's username with full credentials to operate on Lotus' Database. Blank for default.
#
# type: string
# env var: LOTUS_HARMONYDB_USERNAME
#Username = "yugabyte"
# The password for the related username. Blank for default.
#
# type: string
# env var: LOTUS_HARMONYDB_PASSWORD
#Password = "yugabyte"
# The database (logical partition) within Yugabyte. Blank for default.
#
# type: string
# env var: LOTUS_HARMONYDB_DATABASE
#Database = "yugabyte"
# The port to find Yugabyte. Blank for default.
#
# type: string
# env var: LOTUS_HARMONYDB_PORT
#Port = "5433"