2023-09-25 19:25:02 +00:00
|
|
|
[Subsystems]
|
2024-04-04 12:06:21 +00:00
|
|
|
# EnableWindowPost enables window post to be executed on this curio instance. Each machine in the cluster
|
2024-02-21 16:00:30 +00:00
|
|
|
# with WindowPoSt enabled will also participate in the window post scheduler. It is possible to have multiple
|
|
|
|
# machines with WindowPoSt enabled which will provide redundancy, and in case of multiple partitions per deadline,
|
|
|
|
# will allow for parallel processing of partitions.
|
|
|
|
#
|
|
|
|
# It is possible to have instances handling both WindowPoSt and WinningPoSt, which can provide redundancy without
|
|
|
|
# the need for additional machines. In setups like this it is generally recommended to run
|
|
|
|
# partitionsPerDeadline+1 machines.
|
|
|
|
#
|
2023-09-25 19:25:02 +00:00
|
|
|
# type: bool
|
|
|
|
#EnableWindowPost = false
|
2023-09-20 21:17:51 +00:00
|
|
|
|
2023-11-06 22:10:57 +00:00
|
|
|
# type: int
|
|
|
|
#WindowPostMaxTasks = 0
|
|
|
|
|
2024-04-04 12:06:21 +00:00
|
|
|
# EnableWinningPost enables winning post to be executed on this curio instance.
|
2024-02-21 16:00:30 +00:00
|
|
|
# Each machine in the cluster with WinningPoSt enabled will also participate in the winning post scheduler.
|
|
|
|
# It is possible to mix machines with WindowPoSt and WinningPoSt enabled, for details see the EnableWindowPost
|
|
|
|
# documentation.
|
|
|
|
#
|
2023-09-25 19:25:02 +00:00
|
|
|
# type: bool
|
|
|
|
#EnableWinningPost = false
|
2023-09-20 21:17:51 +00:00
|
|
|
|
2023-11-10 19:36:41 +00:00
|
|
|
# type: int
|
|
|
|
#WinningPostMaxTasks = 0
|
|
|
|
|
2024-02-28 20:50:12 +00:00
|
|
|
# EnableParkPiece enables the "piece parking" task to run on this node. This task is responsible for fetching
|
|
|
|
# pieces from the network and storing them in the storage subsystem until sectors are sealed. This task is
|
|
|
|
# only applicable when integrating with boost, and should be enabled on nodes which will hold deal data
|
|
|
|
# from boost until sectors containing the related pieces have the TreeD/TreeR constructed.
|
|
|
|
# Note that future Curio implementations will have a separate task type for fetching pieces from the internet.
|
|
|
|
#
|
|
|
|
# type: bool
|
|
|
|
#EnableParkPiece = false
|
|
|
|
|
|
|
|
# type: int
|
|
|
|
#ParkPieceMaxTasks = 0
|
|
|
|
|
2024-01-12 10:03:37 +00:00
|
|
|
# EnableSealSDR enables SDR tasks to run. SDR is the long sequential computation
|
2024-02-21 16:00:30 +00:00
|
|
|
# creating 11 layer files in sector cache directory.
|
|
|
|
#
|
|
|
|
# SDR is the first task in the sealing pipeline. It's inputs are just the hash of the
|
|
|
|
# unsealed data (CommD), sector number, miner id, and the seal proof type.
|
|
|
|
# It's outputs are the 11 layer files in the sector cache directory.
|
|
|
|
#
|
2024-01-12 10:03:37 +00:00
|
|
|
# In lotus-miner this was run as part of PreCommit1.
|
|
|
|
#
|
|
|
|
# type: bool
|
|
|
|
#EnableSealSDR = false
|
|
|
|
|
2024-02-21 16:00:30 +00:00
|
|
|
# The maximum amount of SDR tasks that can run simultaneously. Note that the maximum number of tasks will
|
|
|
|
# also be bounded by resources available on the machine.
|
|
|
|
#
|
2024-01-12 10:03:37 +00:00
|
|
|
# type: int
|
|
|
|
#SealSDRMaxTasks = 0
|
|
|
|
|
|
|
|
# EnableSealSDRTrees enables the SDR pipeline tree-building task to run.
|
|
|
|
# This task handles encoding of unsealed data into last sdr layer and building
|
|
|
|
# of TreeR, TreeC and TreeD.
|
2024-02-21 16:00:30 +00:00
|
|
|
#
|
|
|
|
# This task runs after SDR
|
|
|
|
# TreeD is first computed with optional input of unsealed data
|
|
|
|
# TreeR is computed from replica, which is first computed as field
|
|
|
|
# addition of the last SDR layer and the bottom layer of TreeD (which is the unsealed data)
|
|
|
|
# TreeC is computed from the 11 SDR layers
|
|
|
|
# The 3 trees will later be used to compute the PoRep proof.
|
|
|
|
#
|
|
|
|
# In case of SyntheticPoRep challenges for PoRep will be pre-generated at this step, and trees and layers
|
|
|
|
# will be dropped. SyntheticPoRep works by pre-generating a very large set of challenges (~30GiB on disk)
|
|
|
|
# then using a small subset of them for the actual PoRep computation. This allows for significant scratch space
|
|
|
|
# saving between PreCommit and PoRep generation at the expense of more computation (generating challenges in this step)
|
|
|
|
#
|
2024-01-12 10:03:37 +00:00
|
|
|
# In lotus-miner this was run as part of PreCommit2 (TreeD was run in PreCommit1).
|
2024-01-30 22:43:27 +00:00
|
|
|
# Note that nodes with SDRTrees enabled will also answer to Finalize tasks,
|
|
|
|
# which just remove unneeded tree data after PoRep is computed.
|
2024-01-12 10:03:37 +00:00
|
|
|
#
|
|
|
|
# type: bool
|
|
|
|
#EnableSealSDRTrees = false
|
|
|
|
|
2024-02-21 16:00:30 +00:00
|
|
|
# The maximum amount of SealSDRTrees tasks that can run simultaneously. Note that the maximum number of tasks will
|
|
|
|
# also be bounded by resources available on the machine.
|
|
|
|
#
|
2024-01-12 10:03:37 +00:00
|
|
|
# type: int
|
|
|
|
#SealSDRTreesMaxTasks = 0
|
|
|
|
|
2024-02-21 16:00:30 +00:00
|
|
|
# FinalizeMaxTasks is the maximum amount of finalize tasks that can run simultaneously.
|
|
|
|
# The finalize task is enabled on all machines which also handle SDRTrees tasks. Finalize ALWAYS runs on whichever
|
|
|
|
# machine holds sector cache files, as it removes unneeded tree data after PoRep is computed.
|
|
|
|
# Finalize will run in parallel with the SubmitCommitMsg task.
|
|
|
|
#
|
2024-01-30 22:43:27 +00:00
|
|
|
# type: int
|
|
|
|
#FinalizeMaxTasks = 0
|
|
|
|
|
2024-01-12 10:03:37 +00:00
|
|
|
# EnableSendPrecommitMsg enables the sending of precommit messages to the chain
|
2024-04-04 12:06:21 +00:00
|
|
|
# from this curio instance.
|
2024-02-21 16:00:30 +00:00
|
|
|
# This runs after SDRTrees and uses the output CommD / CommR (roots of TreeD / TreeR) for the message
|
2024-01-12 10:03:37 +00:00
|
|
|
#
|
|
|
|
# type: bool
|
|
|
|
#EnableSendPrecommitMsg = false
|
|
|
|
|
2024-02-21 16:00:30 +00:00
|
|
|
# EnablePoRepProof enables the computation of the porep proof
|
|
|
|
#
|
|
|
|
# This task runs after interactive-porep seed becomes available, which happens 150 epochs (75min) after the
|
|
|
|
# precommit message lands on chain. This task should run on a machine with a GPU. Vanilla PoRep proofs are
|
|
|
|
# requested from the machine which holds sector cache files which most likely is the machine which ran the SDRTrees
|
|
|
|
# task.
|
|
|
|
#
|
2024-01-12 10:03:37 +00:00
|
|
|
# In lotus-miner this was Commit1 / Commit2
|
|
|
|
#
|
|
|
|
# type: bool
|
|
|
|
#EnablePoRepProof = false
|
|
|
|
|
2024-02-21 16:00:30 +00:00
|
|
|
# The maximum amount of PoRepProof tasks that can run simultaneously. Note that the maximum number of tasks will
|
|
|
|
# also be bounded by resources available on the machine.
|
|
|
|
#
|
2024-01-12 10:03:37 +00:00
|
|
|
# type: int
|
|
|
|
#PoRepProofMaxTasks = 0
|
|
|
|
|
|
|
|
# EnableSendCommitMsg enables the sending of commit messages to the chain
|
2024-04-04 12:06:21 +00:00
|
|
|
# from this curio instance.
|
2024-01-12 10:03:37 +00:00
|
|
|
#
|
|
|
|
# type: bool
|
|
|
|
#EnableSendCommitMsg = false
|
|
|
|
|
2024-05-22 08:08:32 +00:00
|
|
|
# Whether to abort if any sector activation in a batch fails (newly sealed sectors, only with ProveCommitSectors3).
|
|
|
|
#
|
|
|
|
# type: bool
|
|
|
|
#RequireActivationSuccess = true
|
|
|
|
|
|
|
|
# Whether to abort if any sector activation in a batch fails (updating sectors, only with ProveReplicaUpdates3).
|
|
|
|
#
|
|
|
|
# type: bool
|
|
|
|
#RequireNotificationSuccess = true
|
|
|
|
|
2024-04-04 12:06:21 +00:00
|
|
|
# EnableMoveStorage enables the move-into-long-term-storage task to run on this curio instance.
|
2024-02-21 16:00:30 +00:00
|
|
|
# This tasks should only be enabled on nodes with long-term storage.
|
|
|
|
#
|
|
|
|
# The MoveStorage task is the last task in the sealing pipeline. It moves the sealed sector data from the
|
|
|
|
# SDRTrees machine into long-term storage. This task runs after the Finalize task.
|
2024-01-30 22:43:27 +00:00
|
|
|
#
|
|
|
|
# type: bool
|
|
|
|
#EnableMoveStorage = false
|
|
|
|
|
2024-02-21 16:00:30 +00:00
|
|
|
# The maximum amount of MoveStorage tasks that can run simultaneously. Note that the maximum number of tasks will
|
|
|
|
# also be bounded by resources available on the machine. It is recommended that this value is set to a number which
|
|
|
|
# uses all available network (or disk) bandwidth on the machine without causing bottlenecks.
|
|
|
|
#
|
2024-01-30 22:43:27 +00:00
|
|
|
# type: int
|
|
|
|
#MoveStorageMaxTasks = 0
|
|
|
|
|
2024-04-03 16:31:43 +00:00
|
|
|
# BoostAdapters is a list of tuples of miner address and port/ip to listen for market (e.g. boost) requests.
|
|
|
|
# This interface is compatible with the lotus-miner RPC, implementing a subset needed for storage market operations.
|
2024-05-07 15:20:00 +00:00
|
|
|
# Strings should be in the format "actor:ip:port". IP cannot be 0.0.0.0. We recommend using a private IP.
|
|
|
|
# Example: "f0123:127.0.0.1:32100". Multiple addresses can be specified.
|
2024-04-03 16:31:43 +00:00
|
|
|
#
|
|
|
|
# When a market node like boost gives Curio's market RPC a deal to placing into a sector, Curio will first store the
|
|
|
|
# deal data in a temporary location "Piece Park" before assigning it to a sector. This requires that at least one
|
|
|
|
# node in the cluster has the EnableParkPiece option enabled and has sufficient scratch space to store the deal data.
|
|
|
|
# This is different from lotus-miner which stored the deal data into an "unsealed" sector as soon as the deal was
|
|
|
|
# received. Deal data in PiecePark is accessed when the sector TreeD and TreeR are computed, but isn't needed for
|
|
|
|
# the initial SDR layers computation. Pieces in PiecePark are removed after all sectors referencing the piece are
|
|
|
|
# sealed.
|
|
|
|
#
|
|
|
|
# To get API info for boost configuration run 'curio market rpc-info'
|
|
|
|
#
|
|
|
|
# NOTE: All deal data will flow through this service, so it should be placed on a machine running boost or on
|
|
|
|
# a machine which handles ParkPiece tasks.
|
|
|
|
#
|
|
|
|
# type: []string
|
|
|
|
#BoostAdapters = []
|
|
|
|
|
2024-04-04 12:06:21 +00:00
|
|
|
# EnableWebGui enables the web GUI on this curio instance. The UI has minimal local overhead, but it should
|
2024-02-21 16:00:30 +00:00
|
|
|
# only need to be run on a single machine in the cluster.
|
|
|
|
#
|
2023-12-19 15:12:45 +00:00
|
|
|
# type: bool
|
|
|
|
#EnableWebGui = false
|
|
|
|
|
|
|
|
# The address that should listen for Web GUI requests.
|
|
|
|
#
|
|
|
|
# type: string
|
2024-05-22 09:54:51 +00:00
|
|
|
#GuiAddress = "0.0.0.0:4701"
|
2023-12-19 15:12:45 +00:00
|
|
|
|
2023-09-20 21:17:51 +00:00
|
|
|
|
|
|
|
[Fees]
|
|
|
|
# type: types.FIL
|
|
|
|
#DefaultMaxFee = "0.07 FIL"
|
|
|
|
|
|
|
|
# type: types.FIL
|
|
|
|
#MaxPreCommitGasFee = "0.025 FIL"
|
|
|
|
|
|
|
|
# type: types.FIL
|
|
|
|
#MaxCommitGasFee = "0.05 FIL"
|
|
|
|
|
|
|
|
# type: types.FIL
|
|
|
|
#MaxTerminateGasFee = "0.5 FIL"
|
|
|
|
|
|
|
|
# WindowPoSt is a high-value operation, so the default fee should be high.
|
|
|
|
#
|
|
|
|
# type: types.FIL
|
|
|
|
#MaxWindowPoStGasFee = "5 FIL"
|
|
|
|
|
|
|
|
# type: types.FIL
|
|
|
|
#MaxPublishDealsFee = "0.05 FIL"
|
|
|
|
|
|
|
|
[Fees.MaxPreCommitBatchGasFee]
|
|
|
|
# type: types.FIL
|
|
|
|
#Base = "0 FIL"
|
|
|
|
|
|
|
|
# type: types.FIL
|
|
|
|
#PerSector = "0.02 FIL"
|
|
|
|
|
|
|
|
[Fees.MaxCommitBatchGasFee]
|
|
|
|
# type: types.FIL
|
|
|
|
#Base = "0 FIL"
|
|
|
|
|
|
|
|
# type: types.FIL
|
|
|
|
#PerSector = "0.03 FIL"
|
|
|
|
|
|
|
|
|
2024-01-30 00:22:50 +00:00
|
|
|
[[Addresses]]
|
2023-09-20 21:17:51 +00:00
|
|
|
#PreCommitControl = []
|
|
|
|
|
|
|
|
#CommitControl = []
|
|
|
|
|
|
|
|
#TerminateControl = []
|
|
|
|
|
|
|
|
#DisableOwnerFallback = false
|
|
|
|
|
|
|
|
#DisableWorkerFallback = false
|
|
|
|
|
2024-02-21 16:00:30 +00:00
|
|
|
#MinerAddresses = []
|
|
|
|
|
2023-09-20 21:17:51 +00:00
|
|
|
|
|
|
|
[Proving]
|
|
|
|
# Maximum number of sector checks to run in parallel. (0 = unlimited)
|
|
|
|
#
|
|
|
|
# WARNING: Setting this value too high may make the node crash by running out of stack
|
|
|
|
# WARNING: Setting this value too low may make sector challenge reading much slower, resulting in failed PoSt due
|
|
|
|
# to late submission.
|
|
|
|
#
|
|
|
|
# After changing this option, confirm that the new value works in your setup by invoking
|
|
|
|
# 'lotus-miner proving compute window-post 0'
|
|
|
|
#
|
|
|
|
# type: int
|
2023-09-28 15:47:40 +00:00
|
|
|
#ParallelCheckLimit = 32
|
2023-09-20 21:17:51 +00:00
|
|
|
|
|
|
|
# Maximum amount of time a proving pre-check can take for a sector. If the check times out the sector will be skipped
|
|
|
|
#
|
|
|
|
# WARNING: Setting this value too low risks in sectors being skipped even though they are accessible, just reading the
|
|
|
|
# test challenge took longer than this timeout
|
|
|
|
# WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this sector are
|
|
|
|
# blocked (e.g. in case of disconnected NFS mount)
|
|
|
|
#
|
|
|
|
# type: Duration
|
2023-09-28 15:47:40 +00:00
|
|
|
#SingleCheckTimeout = "10m0s"
|
2023-09-20 21:17:51 +00:00
|
|
|
|
|
|
|
# Maximum amount of time a proving pre-check can take for an entire partition. If the check times out, sectors in
|
|
|
|
# the partition which didn't get checked on time will be skipped
|
|
|
|
#
|
|
|
|
# WARNING: Setting this value too low risks in sectors being skipped even though they are accessible, just reading the
|
|
|
|
# test challenge took longer than this timeout
|
|
|
|
# WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this partition are
|
|
|
|
# blocked or slow
|
|
|
|
#
|
|
|
|
# type: Duration
|
2023-09-28 15:47:40 +00:00
|
|
|
#PartitionCheckTimeout = "20m0s"
|
2023-09-20 21:17:51 +00:00
|
|
|
|
|
|
|
# Disable WindowPoSt provable sector readability checks.
|
|
|
|
#
|
|
|
|
# In normal operation, when preparing to compute WindowPoSt, lotus-miner will perform a round of reading challenges
|
|
|
|
# from all sectors to confirm that those sectors can be proven. Challenges read in this process are discarded, as
|
|
|
|
# we're only interested in checking that sector data can be read.
|
|
|
|
#
|
|
|
|
# When using builtin proof computation (no PoSt workers, and DisableBuiltinWindowPoSt is set to false), this process
|
|
|
|
# can save a lot of time and compute resources in the case that some sectors are not readable - this is caused by
|
|
|
|
# the builtin logic not skipping snark computation when some sectors need to be skipped.
|
|
|
|
#
|
|
|
|
# When using PoSt workers, this process is mostly redundant, with PoSt workers challenges will be read once, and
|
|
|
|
# if challenges for some sectors aren't readable, those sectors will just get skipped.
|
|
|
|
#
|
|
|
|
# Disabling sector pre-checks will slightly reduce IO load when proving sectors, possibly resulting in shorter
|
|
|
|
# time to produce window PoSt. In setups with good IO capabilities the effect of this option on proving time should
|
|
|
|
# be negligible.
|
|
|
|
#
|
|
|
|
# NOTE: It likely is a bad idea to disable sector pre-checks in setups with no PoSt workers.
|
|
|
|
#
|
|
|
|
# NOTE: Even when this option is enabled, recovering sectors will be checked before recovery declaration message is
|
|
|
|
# sent to the chain
|
|
|
|
#
|
|
|
|
# After changing this option, confirm that the new value works in your setup by invoking
|
|
|
|
# 'lotus-miner proving compute window-post 0'
|
|
|
|
#
|
|
|
|
# type: bool
|
|
|
|
#DisableWDPoStPreChecks = false
|
|
|
|
|
2023-11-06 22:10:57 +00:00
|
|
|
# Maximum number of partitions to prove in a single SubmitWindowPoSt messace. 0 = network limit (3 in nv21)
|
2023-09-20 21:17:51 +00:00
|
|
|
#
|
|
|
|
# A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors.
|
2023-11-06 22:10:57 +00:00
|
|
|
# //
|
2023-09-20 21:17:51 +00:00
|
|
|
# Note that setting this value lower may result in less efficient gas use - more messages will be sent,
|
|
|
|
# to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
|
|
|
|
#
|
|
|
|
# Setting this value above the network limit has no effect
|
|
|
|
#
|
|
|
|
# type: int
|
|
|
|
#MaxPartitionsPerPoStMessage = 0
|
|
|
|
|
|
|
|
# In some cases when submitting DeclareFaultsRecovered messages,
|
|
|
|
# there may be too many recoveries to fit in a BlockGasLimit.
|
|
|
|
# In those cases it may be necessary to set this value to something low (eg 1);
|
|
|
|
# Note that setting this value lower may result in less efficient gas use - more messages will be sent than needed,
|
|
|
|
# resulting in more total gas use (but each message will have lower gas limit)
|
|
|
|
#
|
|
|
|
# type: int
|
|
|
|
#MaxPartitionsPerRecoveryMessage = 0
|
|
|
|
|
|
|
|
# Enable single partition per PoSt Message for partitions containing recovery sectors
|
|
|
|
#
|
|
|
|
# In cases when submitting PoSt messages which contain recovering sectors, the default network limit may still be
|
|
|
|
# too high to fit in the block gas limit. In those cases, it becomes useful to only house the single partition
|
|
|
|
# with recovering sectors in the post message
|
|
|
|
#
|
|
|
|
# Note that setting this value lower may result in less efficient gas use - more messages will be sent,
|
|
|
|
# to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
|
|
|
|
#
|
|
|
|
# type: bool
|
|
|
|
#SingleRecoveringPartitionPerPostMessage = false
|
|
|
|
|
2023-11-06 22:10:57 +00:00
|
|
|
|
2024-04-12 02:36:38 +00:00
|
|
|
[Ingest]
|
2024-05-22 08:08:32 +00:00
|
|
|
# Maximum number of sectors that can be queued waiting for deals to start processing.
|
|
|
|
# 0 = unlimited
|
|
|
|
# Note: This mechanism will delay taking deal data from markets, providing backpressure to the market subsystem.
|
|
|
|
# The DealSector queue includes deals which are ready to enter the sealing pipeline but are not yet part of it -
|
|
|
|
# size of this queue will also impact the maximum number of ParkPiece tasks which can run concurrently.
|
|
|
|
# DealSector queue is the first queue in the sealing pipeline, meaning that it should be used as the primary backpressure mechanism.
|
|
|
|
#
|
|
|
|
# type: int
|
|
|
|
#MaxQueueDealSector = 8
|
|
|
|
|
2024-04-12 02:36:38 +00:00
|
|
|
# Maximum number of sectors that can be queued waiting for SDR to start processing.
|
|
|
|
# 0 = unlimited
|
|
|
|
# Note: This mechanism will delay taking deal data from markets, providing backpressure to the market subsystem.
|
2024-05-22 08:08:32 +00:00
|
|
|
# The SDR queue includes deals which are in the process of entering the sealing pipeline. In case of the SDR tasks it is
|
|
|
|
# possible that this queue grows more than this limit(CC sectors), the backpressure is only applied to sectors
|
|
|
|
# entering the pipeline.
|
2024-04-12 02:36:38 +00:00
|
|
|
#
|
|
|
|
# type: int
|
|
|
|
#MaxQueueSDR = 8
|
|
|
|
|
|
|
|
# Maximum number of sectors that can be queued waiting for SDRTrees to start processing.
|
|
|
|
# 0 = unlimited
|
|
|
|
# Note: This mechanism will delay taking deal data from markets, providing backpressure to the market subsystem.
|
|
|
|
# In case of the trees tasks it is possible that this queue grows more than this limit, the backpressure is only
|
|
|
|
# applied to sectors entering the pipeline.
|
|
|
|
#
|
|
|
|
# type: int
|
|
|
|
#MaxQueueTrees = 0
|
|
|
|
|
|
|
|
# Maximum number of sectors that can be queued waiting for PoRep to start processing.
|
|
|
|
# 0 = unlimited
|
|
|
|
# Note: This mechanism will delay taking deal data from markets, providing backpressure to the market subsystem.
|
|
|
|
# Like with the trees tasks, it is possible that this queue grows more than this limit, the backpressure is only
|
|
|
|
# applied to sectors entering the pipeline.
|
|
|
|
#
|
|
|
|
# type: int
|
|
|
|
#MaxQueuePoRep = 0
|
|
|
|
|
2024-05-22 08:08:32 +00:00
|
|
|
# Maximum time an open deal sector should wait for more deal before it starts sealing
|
|
|
|
#
|
|
|
|
# type: Duration
|
|
|
|
#MaxDealWaitTime = "1h0m0s"
|
|
|
|
|
2024-04-12 02:36:38 +00:00
|
|
|
|
2023-11-06 22:10:57 +00:00
|
|
|
[Journal]
|
|
|
|
# Events of the form: "system1:event1,system1:event2[,...]"
|
|
|
|
#
|
|
|
|
# type: string
|
|
|
|
#DisabledEvents = ""
|
|
|
|
|
|
|
|
|
|
|
|
[Apis]
|
|
|
|
# RPC Secret for the storage subsystem.
|
|
|
|
# If integrating with lotus-miner this must match the value from
|
|
|
|
# cat ~/.lotusminer/keystore/MF2XI2BNNJ3XILLQOJUXMYLUMU | jq -r .PrivateKey
|
|
|
|
#
|
|
|
|
# type: string
|
|
|
|
#StorageRPCSecret = ""
|
|
|
|
|
2024-05-08 13:08:15 +00:00
|
|
|
|
|
|
|
[Alerting]
|
|
|
|
# PagerDutyEventURL is URL for PagerDuty.com Events API v2 URL. Events sent to this API URL are ultimately
|
|
|
|
# routed to a PagerDuty.com service and processed.
|
|
|
|
# The default is sufficient for integration with the stock commercial PagerDuty.com company's service.
|
|
|
|
#
|
|
|
|
# type: string
|
|
|
|
#PagerDutyEventURL = "https://events.pagerduty.com/v2/enqueue"
|
|
|
|
|
|
|
|
# PageDutyIntegrationKey is the integration key for a PagerDuty.com service. You can find this unique service
|
|
|
|
# identifier in the integration page for the service.
|
|
|
|
#
|
|
|
|
# type: string
|
|
|
|
#PageDutyIntegrationKey = ""
|
|
|
|
|
|
|
|
# MinimumWalletBalance is the minimum balance all active wallets. If the balance is below this value, an
|
|
|
|
# alerts will be triggered for the wallet
|
|
|
|
#
|
|
|
|
# type: types.FIL
|
|
|
|
#MinimumWalletBalance = "5 FIL"
|
|
|
|
|