lotus/node/config/def.go

266 lines
8.5 KiB
Go

package config
import (
"encoding"
"os"
"strconv"
"time"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/types"
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
)
const (
// RetrievalPricingDefault configures the node to use the default retrieval pricing policy.
RetrievalPricingDefaultMode = "default"
// RetrievalPricingExternal configures the node to use the external retrieval pricing script
// configured by the user.
RetrievalPricingExternalMode = "external"
)
// MaxTraversalLinks configures the maximum number of links to traverse in a DAG while calculating
// CommP and traversing a DAG with graphsync; invokes a budget on DAG depth and density.
var MaxTraversalLinks uint64 = 32 * (1 << 20)
func init() {
if envMaxTraversal, err := strconv.ParseUint(os.Getenv("LOTUS_MAX_TRAVERSAL_LINKS"), 10, 64); err == nil {
MaxTraversalLinks = envMaxTraversal
}
}
func (b *BatchFeeConfig) FeeForSectors(nSectors int) abi.TokenAmount {
return big.Add(big.Int(b.Base), big.Mul(big.NewInt(int64(nSectors)), big.Int(b.PerSector)))
}
func defCommon() Common {
return Common{
API: API{
ListenAddress: "/ip4/127.0.0.1/tcp/1234/http",
Timeout: Duration(30 * time.Second),
},
Logging: Logging{
SubsystemLevels: map[string]string{
"example-subsystem": "INFO",
},
},
Libp2p: Libp2p{
ListenAddresses: []string{
"/ip4/0.0.0.0/tcp/0",
"/ip6/::/tcp/0",
},
AnnounceAddresses: []string{},
NoAnnounceAddresses: []string{},
ConnMgrLow: 150,
ConnMgrHigh: 180,
ConnMgrGrace: Duration(20 * time.Second),
},
Pubsub: Pubsub{
Bootstrapper: false,
DirectPeers: nil,
},
}
}
var DefaultDefaultMaxFee = types.MustParseFIL("0.07")
var DefaultSimultaneousTransfers = uint64(20)
// DefaultFullNode returns the default config
func DefaultFullNode() *FullNode {
return &FullNode{
Common: defCommon(),
Fees: FeeConfig{
DefaultMaxFee: DefaultDefaultMaxFee,
},
Client: Client{
SimultaneousTransfersForStorage: DefaultSimultaneousTransfers,
SimultaneousTransfersForRetrieval: DefaultSimultaneousTransfers,
},
Chainstore: Chainstore{
EnableSplitstore: false,
Splitstore: Splitstore{
ColdStoreType: "universal",
HotStoreType: "badger",
MarkSetType: "badger",
HotStoreFullGCFrequency: 20,
},
},
}
}
func DefaultStorageMiner() *StorageMiner {
cfg := &StorageMiner{
Common: defCommon(),
Sealing: SealingConfig{
MaxWaitDealsSectors: 2, // 64G with 32G sectors
MaxSealingSectors: 0,
MaxSealingSectorsForDeals: 0,
WaitDealsDelay: Duration(time.Hour * 6),
AlwaysKeepUnsealedCopy: true,
FinalizeEarly: false,
MakeNewSectorForDeals: true,
CollateralFromMinerBalance: false,
AvailableBalanceBuffer: types.FIL(big.Zero()),
DisableCollateralFallback: false,
BatchPreCommits: true,
MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize, // up to 256 sectors
PreCommitBatchWait: Duration(24 * time.Hour), // this should be less than 31.5 hours, which is the expiration of a precommit ticket
// XXX snap deals wait deals slack if first
PreCommitBatchSlack: Duration(3 * time.Hour), // time buffer for forceful batch submission before sectors/deals in batch would start expiring, higher value will lower the chances for message fail due to expiration
CommittedCapacitySectorLifetime: Duration(builtin.EpochDurationSeconds * uint64(policy.GetMaxSectorExpirationExtension()) * uint64(time.Second)),
AggregateCommits: true,
MinCommitBatch: miner5.MinAggregatedSectors, // per FIP13, we must have at least four proofs to aggregate, where 4 is the cross over point where aggregation wins out on single provecommit gas costs
MaxCommitBatch: miner5.MaxAggregatedSectors, // maximum 819 sectors, this is the maximum aggregation per FIP13
CommitBatchWait: Duration(24 * time.Hour), // this can be up to 30 days
CommitBatchSlack: Duration(1 * time.Hour), // time buffer for forceful batch submission before sectors/deals in batch would start expiring, higher value will lower the chances for message fail due to expiration
BatchPreCommitAboveBaseFee: types.FIL(types.BigMul(types.PicoFil, types.NewInt(320))), // 0.32 nFIL
AggregateAboveBaseFee: types.FIL(types.BigMul(types.PicoFil, types.NewInt(320))), // 0.32 nFIL
TerminateBatchMin: 1,
TerminateBatchMax: 100,
TerminateBatchWait: Duration(5 * time.Minute),
},
Storage: sectorstorage.SealerConfig{
AllowAddPiece: true,
AllowPreCommit1: true,
AllowPreCommit2: true,
AllowCommit: true,
AllowUnseal: true,
AllowReplicaUpdate: true,
AllowProveReplicaUpdate2: true,
AllowRegenSectorKey: true,
// Default to 10 - tcp should still be able to figure this out, and
// it's the ratio between 10gbit / 1gbit
ParallelFetchLimit: 10,
// By default use the hardware resource filtering strategy.
ResourceFiltering: sectorstorage.ResourceFilteringHardware,
},
Dealmaking: DealmakingConfig{
ConsiderOnlineStorageDeals: true,
ConsiderOfflineStorageDeals: true,
ConsiderOnlineRetrievalDeals: true,
ConsiderOfflineRetrievalDeals: true,
ConsiderVerifiedStorageDeals: true,
ConsiderUnverifiedStorageDeals: true,
PieceCidBlocklist: []cid.Cid{},
// TODO: It'd be nice to set this based on sector size
MaxDealStartDelay: Duration(time.Hour * 24 * 14),
ExpectedSealDuration: Duration(time.Hour * 24),
PublishMsgPeriod: Duration(time.Hour),
MaxDealsPerPublishMsg: 8,
MaxProviderCollateralMultiplier: 2,
SimultaneousTransfersForStorage: DefaultSimultaneousTransfers,
SimultaneousTransfersForStoragePerClient: 0,
SimultaneousTransfersForRetrieval: DefaultSimultaneousTransfers,
StartEpochSealingBuffer: 480, // 480 epochs buffer == 4 hours from adding deal to sector to sector being sealed
RetrievalPricing: &RetrievalPricing{
Strategy: RetrievalPricingDefaultMode,
Default: &RetrievalPricingDefault{
VerifiedDealsFreeTransfer: true,
},
External: &RetrievalPricingExternal{
Path: "",
},
},
},
IndexProvider: IndexProviderConfig{
Enable: true,
EntriesCacheCapacity: 1024,
EntriesChunkSize: 16384,
TopicName: "/indexer/ingest/mainnet",
PurgeCacheOnStart: false,
},
Subsystems: MinerSubsystemConfig{
EnableMining: true,
EnableSealing: true,
EnableSectorStorage: true,
EnableMarkets: true,
},
Fees: MinerFeeConfig{
MaxPreCommitGasFee: types.MustParseFIL("0.025"),
MaxCommitGasFee: types.MustParseFIL("0.05"),
MaxPreCommitBatchGasFee: BatchFeeConfig{
Base: types.MustParseFIL("0"),
PerSector: types.MustParseFIL("0.02"),
},
MaxCommitBatchGasFee: BatchFeeConfig{
Base: types.MustParseFIL("0"),
PerSector: types.MustParseFIL("0.03"), // enough for 6 agg and 1nFIL base fee
},
MaxTerminateGasFee: types.MustParseFIL("0.5"),
MaxWindowPoStGasFee: types.MustParseFIL("5"),
MaxPublishDealsFee: types.MustParseFIL("0.05"),
MaxMarketBalanceAddFee: types.MustParseFIL("0.007"),
},
Addresses: MinerAddressConfig{
PreCommitControl: []string{},
CommitControl: []string{},
TerminateControl: []string{},
DealPublishControl: []string{},
},
DAGStore: DAGStoreConfig{
MaxConcurrentIndex: 5,
MaxConcurrencyStorageCalls: 100,
MaxConcurrentUnseals: 5,
GCInterval: Duration(1 * time.Minute),
},
}
cfg.Common.API.ListenAddress = "/ip4/127.0.0.1/tcp/2345/http"
cfg.Common.API.RemoteListenAddress = "127.0.0.1:2345"
return cfg
}
var _ encoding.TextMarshaler = (*Duration)(nil)
var _ encoding.TextUnmarshaler = (*Duration)(nil)
// Duration is a wrapper type for time.Duration
// for decoding and encoding from/to TOML
type Duration time.Duration
// UnmarshalText implements interface for TOML decoding
func (dur *Duration) UnmarshalText(text []byte) error {
d, err := time.ParseDuration(string(text))
if err != nil {
return err
}
*dur = Duration(d)
return err
}
func (dur Duration) MarshalText() ([]byte, error) {
d := time.Duration(dur)
return []byte(d.String()), nil
}