From 6376001a50eac815456fb803e36ebbb67a08afac Mon Sep 17 00:00:00 2001 From: "Andrew Jackson (Ajax)" Date: Mon, 2 Oct 2023 17:08:42 -0500 Subject: [PATCH] config PR changes --- base.toml | 190 ----------------------- cmd/lotus-provider/config.go | 39 +++-- orjanCheckThisIn.toml | 190 ----------------------- storage/sealer/ffiwrapper/sealer_test.go | 24 +-- 4 files changed, 45 insertions(+), 398 deletions(-) delete mode 100644 base.toml delete mode 100644 orjanCheckThisIn.toml diff --git a/base.toml b/base.toml deleted file mode 100644 index 788aeacab..000000000 --- a/base.toml +++ /dev/null @@ -1,190 +0,0 @@ -[Subsystems] - # type: bool - #EnableWindowPost = false - - # type: bool - #EnableWinningPost = false - - -[Fees] - # type: types.FIL - #DefaultMaxFee = "0.07 FIL" - - # type: types.FIL - #MaxPreCommitGasFee = "0.025 FIL" - - # type: types.FIL - #MaxCommitGasFee = "0.05 FIL" - - # type: types.FIL - #MaxTerminateGasFee = "0.5 FIL" - - # WindowPoSt is a high-value operation, so the default fee should be high. - # - # type: types.FIL - #MaxWindowPoStGasFee = "5 FIL" - - # type: types.FIL - #MaxPublishDealsFee = "0.05 FIL" - - [Fees.MaxPreCommitBatchGasFee] - # type: types.FIL - #Base = "0 FIL" - - # type: types.FIL - #PerSector = "0.02 FIL" - - [Fees.MaxCommitBatchGasFee] - # type: types.FIL - #Base = "0 FIL" - - # type: types.FIL - #PerSector = "0.03 FIL" - - -[Addresses] - # Addresses to send PreCommit messages from - # - # type: []string - #PreCommitControl = [] - - # Addresses to send Commit messages from - # - # type: []string - #CommitControl = [] - - # type: []string - #TerminateControl = [] - - # DisableOwnerFallback disables usage of the owner address for messages - # sent automatically - # - # type: bool - #DisableOwnerFallback = false - - # DisableWorkerFallback disables usage of the worker address for messages - # sent automatically, if control addresses are configured. - # A control address that doesn't have enough funds will still be chosen - # over the worker address if this flag is set. - # - # type: bool - #DisableWorkerFallback = false - - -[Proving] - # Maximum number of sector checks to run in parallel. (0 = unlimited) - # - # WARNING: Setting this value too high may make the node crash by running out of stack - # WARNING: Setting this value too low may make sector challenge reading much slower, resulting in failed PoSt due - # to late submission. - # - # After changing this option, confirm that the new value works in your setup by invoking - # 'lotus-miner proving compute window-post 0' - # - # type: int - #ParallelCheckLimit = 0 - - # Maximum amount of time a proving pre-check can take for a sector. If the check times out the sector will be skipped - # - # WARNING: Setting this value too low risks in sectors being skipped even though they are accessible, just reading the - # test challenge took longer than this timeout - # WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this sector are - # blocked (e.g. in case of disconnected NFS mount) - # - # type: Duration - #SingleCheckTimeout = "0s" - - # Maximum amount of time a proving pre-check can take for an entire partition. If the check times out, sectors in - # the partition which didn't get checked on time will be skipped - # - # WARNING: Setting this value too low risks in sectors being skipped even though they are accessible, just reading the - # test challenge took longer than this timeout - # WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this partition are - # blocked or slow - # - # type: Duration - #PartitionCheckTimeout = "0s" - - # Disable Window PoSt computation on the lotus-miner process even if no window PoSt workers are present. - # - # WARNING: If no windowPoSt workers are connected, window PoSt WILL FAIL resulting in faulty sectors which will need - # to be recovered. Before enabling this option, make sure your PoSt workers work correctly. - # - # After changing this option, confirm that the new value works in your setup by invoking - # 'lotus-miner proving compute window-post 0' - # - # type: bool - #DisableBuiltinWindowPoSt = false - - # Disable Winning PoSt computation on the lotus-miner process even if no winning PoSt workers are present. - # - # WARNING: If no WinningPoSt workers are connected, Winning PoSt WILL FAIL resulting in lost block rewards. - # Before enabling this option, make sure your PoSt workers work correctly. - # - # type: bool - #DisableBuiltinWinningPoSt = false - - # Disable WindowPoSt provable sector readability checks. - # - # In normal operation, when preparing to compute WindowPoSt, lotus-miner will perform a round of reading challenges - # from all sectors to confirm that those sectors can be proven. Challenges read in this process are discarded, as - # we're only interested in checking that sector data can be read. - # - # When using builtin proof computation (no PoSt workers, and DisableBuiltinWindowPoSt is set to false), this process - # can save a lot of time and compute resources in the case that some sectors are not readable - this is caused by - # the builtin logic not skipping snark computation when some sectors need to be skipped. - # - # When using PoSt workers, this process is mostly redundant, with PoSt workers challenges will be read once, and - # if challenges for some sectors aren't readable, those sectors will just get skipped. - # - # Disabling sector pre-checks will slightly reduce IO load when proving sectors, possibly resulting in shorter - # time to produce window PoSt. In setups with good IO capabilities the effect of this option on proving time should - # be negligible. - # - # NOTE: It likely is a bad idea to disable sector pre-checks in setups with no PoSt workers. - # - # NOTE: Even when this option is enabled, recovering sectors will be checked before recovery declaration message is - # sent to the chain - # - # After changing this option, confirm that the new value works in your setup by invoking - # 'lotus-miner proving compute window-post 0' - # - # type: bool - #DisableWDPoStPreChecks = false - - # Maximum number of partitions to prove in a single SubmitWindowPoSt messace. 0 = network limit (10 in nv16) - # - # A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors. - # - # The maximum number of sectors which can be proven in a single PoSt message is 25000 in network version 16, which - # means that a single message can prove at most 10 partitions - # - # Note that setting this value lower may result in less efficient gas use - more messages will be sent, - # to prove each deadline, resulting in more total gas use (but each message will have lower gas limit) - # - # Setting this value above the network limit has no effect - # - # type: int - #MaxPartitionsPerPoStMessage = 0 - - # In some cases when submitting DeclareFaultsRecovered messages, - # there may be too many recoveries to fit in a BlockGasLimit. - # In those cases it may be necessary to set this value to something low (eg 1); - # Note that setting this value lower may result in less efficient gas use - more messages will be sent than needed, - # resulting in more total gas use (but each message will have lower gas limit) - # - # type: int - #MaxPartitionsPerRecoveryMessage = 0 - - # Enable single partition per PoSt Message for partitions containing recovery sectors - # - # In cases when submitting PoSt messages which contain recovering sectors, the default network limit may still be - # too high to fit in the block gas limit. In those cases, it becomes useful to only house the single partition - # with recovering sectors in the post message - # - # Note that setting this value lower may result in less efficient gas use - more messages will be sent, - # to prove each deadline, resulting in more total gas use (but each message will have lower gas limit) - # - # type: bool - #SingleRecoveringPartitionPerPostMessage = false - diff --git a/cmd/lotus-provider/config.go b/cmd/lotus-provider/config.go index 295816e1b..9ec5cb52b 100644 --- a/cmd/lotus-provider/config.go +++ b/cmd/lotus-provider/config.go @@ -5,6 +5,7 @@ import ( "database/sql" "errors" "fmt" + "io" "os" "strings" @@ -55,22 +56,40 @@ var configDefaultCmd = &cli.Command{ var configSetCmd = &cli.Command{ Name: "set", Aliases: []string{"add"}, - Usage: "Set a config layer or the base", - ArgsUsage: "a layer's name", + Usage: "Set a config layer or the base by providing a filename or stdin.", + ArgsUsage: "a layer's file name", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "title", + Usage: "title of the config layer (req'd for stdin)", + }, + }, Action: func(cctx *cli.Context) error { args := cctx.Args() - if args.Len() != 1 { - return errors.New("must have exactly 1 arg for the file name") - } + db, err := makeDB(cctx) if err != nil { return err } - fn := args.First() - bytes, err := os.ReadFile(fn) + name := cctx.String("title") + var stream io.Reader = os.Stdin + if args.Len() != 1 { + if cctx.String("title") == "" { + return errors.New("must have a title for stdin, or a file name") + } + } else { + stream, err = os.Open(args.First()) + if err != nil { + return fmt.Errorf("cannot open file %s: %w", args.First(), err) + } + if name == "" { + name = strings.Split(args.First(), ".")[0] + } + } + bytes, err := io.ReadAll(stream) if err != nil { - return fmt.Errorf("cannot read file %w", err) + return fmt.Errorf("cannot read stream/file %w", err) } lp := config.DefaultLotusProvider() // ensure it's toml @@ -80,7 +99,6 @@ var configSetCmd = &cli.Command{ } _ = lp - name := strings.Split(fn, ".")[0] _, err = db.Exec(context.Background(), `INSERT INTO harmony_config (title, config) VALUES ($1, $2) ON CONFLICT (title) DO UPDATE SET config = excluded.config`, name, string(bytes)) @@ -217,5 +235,8 @@ func getConfig(cctx *cli.Context, db *harmonydb.DB) (*config.LotusProviderConfig } } _ = have // FUTURE: verify that required fields are here. + // If config includes 3rd-party config, consider JSONSchema as a way that + // 3rd-parties can dynamically include config requirements and we can + // validate the config. Because of layering, we must validate @ startup. return lp, nil } diff --git a/orjanCheckThisIn.toml b/orjanCheckThisIn.toml deleted file mode 100644 index c9c0c9e78..000000000 --- a/orjanCheckThisIn.toml +++ /dev/null @@ -1,190 +0,0 @@ -[Subsystems] - # type: bool - #EnableWindowPost = false - - # type: bool - #EnableWinningPost = false - - -[Fees] - # type: types.FIL - #DefaultMaxFee = "0.07 FIL" - - # type: types.FIL - #MaxPreCommitGasFee = "0.025 FIL" - - # type: types.FIL - #MaxCommitGasFee = "0.05 FIL" - - # type: types.FIL - #MaxTerminateGasFee = "0.5 FIL" - - # WindowPoSt is a high-value operation, so the default fee should be high. - # - # type: types.FIL - #MaxWindowPoStGasFee = "5 FIL" - - # type: types.FIL - #MaxPublishDealsFee = "0.05 FIL" - - [Fees.MaxPreCommitBatchGasFee] - # type: types.FIL - #Base = "0 FIL" - - # type: types.FIL - #PerSector = "0.02 FIL" - - [Fees.MaxCommitBatchGasFee] - # type: types.FIL - #Base = "0 FIL" - - # type: types.FIL - #PerSector = "0.03 FIL" - - -[Addresses] - # Addresses to send PreCommit messages from - # - # type: []string - #PreCommitControl = [] - - # Addresses to send Commit messages from - # - # type: []string - #CommitControl = [] - - # type: []string - #TerminateControl = [] - - # DisableOwnerFallback disables usage of the owner address for messages - # sent automatically - # - # type: bool - #DisableOwnerFallback = false - - # DisableWorkerFallback disables usage of the worker address for messages - # sent automatically, if control addresses are configured. - # A control address that doesn't have enough funds will still be chosen - # over the worker address if this flag is set. - # - # type: bool - #DisableWorkerFallback = false - - -[Proving] - # Maximum number of sector checks to run in parallel. (0 = unlimited) - # - # WARNING: Setting this value too high may make the node crash by running out of stack - # WARNING: Setting this value too low may make sector challenge reading much slower, resulting in failed PoSt due - # to late submission. - # - # After changing this option, confirm that the new value works in your setup by invoking - # 'lotus-miner proving compute window-post 0' - # - # type: int - #ParallelCheckLimit = 32 - - # Maximum amount of time a proving pre-check can take for a sector. If the check times out the sector will be skipped - # - # WARNING: Setting this value too low risks in sectors being skipped even though they are accessible, just reading the - # test challenge took longer than this timeout - # WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this sector are - # blocked (e.g. in case of disconnected NFS mount) - # - # type: Duration - #SingleCheckTimeout = "10m0s" - - # Maximum amount of time a proving pre-check can take for an entire partition. If the check times out, sectors in - # the partition which didn't get checked on time will be skipped - # - # WARNING: Setting this value too low risks in sectors being skipped even though they are accessible, just reading the - # test challenge took longer than this timeout - # WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this partition are - # blocked or slow - # - # type: Duration - #PartitionCheckTimeout = "20m0s" - - # Disable Window PoSt computation on the lotus-miner process even if no window PoSt workers are present. - # - # WARNING: If no windowPoSt workers are connected, window PoSt WILL FAIL resulting in faulty sectors which will need - # to be recovered. Before enabling this option, make sure your PoSt workers work correctly. - # - # After changing this option, confirm that the new value works in your setup by invoking - # 'lotus-miner proving compute window-post 0' - # - # type: bool - #DisableBuiltinWindowPoSt = false - - # Disable Winning PoSt computation on the lotus-miner process even if no winning PoSt workers are present. - # - # WARNING: If no WinningPoSt workers are connected, Winning PoSt WILL FAIL resulting in lost block rewards. - # Before enabling this option, make sure your PoSt workers work correctly. - # - # type: bool - #DisableBuiltinWinningPoSt = false - - # Disable WindowPoSt provable sector readability checks. - # - # In normal operation, when preparing to compute WindowPoSt, lotus-miner will perform a round of reading challenges - # from all sectors to confirm that those sectors can be proven. Challenges read in this process are discarded, as - # we're only interested in checking that sector data can be read. - # - # When using builtin proof computation (no PoSt workers, and DisableBuiltinWindowPoSt is set to false), this process - # can save a lot of time and compute resources in the case that some sectors are not readable - this is caused by - # the builtin logic not skipping snark computation when some sectors need to be skipped. - # - # When using PoSt workers, this process is mostly redundant, with PoSt workers challenges will be read once, and - # if challenges for some sectors aren't readable, those sectors will just get skipped. - # - # Disabling sector pre-checks will slightly reduce IO load when proving sectors, possibly resulting in shorter - # time to produce window PoSt. In setups with good IO capabilities the effect of this option on proving time should - # be negligible. - # - # NOTE: It likely is a bad idea to disable sector pre-checks in setups with no PoSt workers. - # - # NOTE: Even when this option is enabled, recovering sectors will be checked before recovery declaration message is - # sent to the chain - # - # After changing this option, confirm that the new value works in your setup by invoking - # 'lotus-miner proving compute window-post 0' - # - # type: bool - #DisableWDPoStPreChecks = false - - # Maximum number of partitions to prove in a single SubmitWindowPoSt messace. 0 = network limit (10 in nv16) - # - # A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors. - # - # The maximum number of sectors which can be proven in a single PoSt message is 25000 in network version 16, which - # means that a single message can prove at most 10 partitions - # - # Note that setting this value lower may result in less efficient gas use - more messages will be sent, - # to prove each deadline, resulting in more total gas use (but each message will have lower gas limit) - # - # Setting this value above the network limit has no effect - # - # type: int - #MaxPartitionsPerPoStMessage = 0 - - # In some cases when submitting DeclareFaultsRecovered messages, - # there may be too many recoveries to fit in a BlockGasLimit. - # In those cases it may be necessary to set this value to something low (eg 1); - # Note that setting this value lower may result in less efficient gas use - more messages will be sent than needed, - # resulting in more total gas use (but each message will have lower gas limit) - # - # type: int - #MaxPartitionsPerRecoveryMessage = 0 - - # Enable single partition per PoSt Message for partitions containing recovery sectors - # - # In cases when submitting PoSt messages which contain recovering sectors, the default network limit may still be - # too high to fit in the block gas limit. In those cases, it becomes useful to only house the single partition - # with recovering sectors in the post message - # - # Note that setting this value lower may result in less efficient gas use - more messages will be sent, - # to prove each deadline, resulting in more total gas use (but each message will have lower gas limit) - # - # type: bool - #SingleRecoveringPartitionPerPostMessage = false - diff --git a/storage/sealer/ffiwrapper/sealer_test.go b/storage/sealer/ffiwrapper/sealer_test.go index 72de77872..73b2ad52f 100644 --- a/storage/sealer/ffiwrapper/sealer_test.go +++ b/storage/sealer/ffiwrapper/sealer_test.go @@ -3,10 +3,11 @@ package ffiwrapper import ( "bytes" "context" - "crypto/rand" + crand "crypto/rand" "fmt" "io" "io/fs" + "math/rand" "os" "path/filepath" "runtime" @@ -52,8 +53,8 @@ type seal struct { func data(sn abi.SectorNumber, dlen abi.UnpaddedPieceSize) io.Reader { return io.MultiReader( - io.LimitReader(rand.Reader, int64(123)), - io.LimitReader(rand.Reader, int64(dlen-123)), + io.LimitReader(rand.New(rand.NewSource(42+int64(sn))), int64(123)), + io.LimitReader(rand.New(rand.NewSource(42+int64(sn))), int64(dlen-123)), ) } @@ -790,13 +791,15 @@ func TestAddPiece512M(t *testing.T) { } t.Cleanup(cleanup) + r := rand.New(rand.NewSource(0x7e5)) + c, err := sb.AddPiece(context.TODO(), storiface.SectorRef{ ID: abi.SectorID{ Miner: miner, Number: 0, }, ProofType: abi.RegisteredSealProof_StackedDrg512MiBV1_1, - }, nil, sz, io.LimitReader(rand.Reader, int64(sz))) + }, nil, sz, io.LimitReader(r, int64(sz))) if err != nil { t.Fatal(err) } @@ -874,13 +877,15 @@ func TestAddPiece512MPadded(t *testing.T) { } t.Cleanup(cleanup) + r := rand.New(rand.NewSource(0x7e5)) + c, err := sb.AddPiece(context.TODO(), storiface.SectorRef{ ID: abi.SectorID{ Miner: miner, Number: 0, }, ProofType: abi.RegisteredSealProof_StackedDrg512MiBV1_1, - }, nil, sz, io.LimitReader(rand.Reader, int64(sz/4))) + }, nil, sz, io.LimitReader(r, int64(sz/4))) if err != nil { t.Fatalf("add piece failed: %s", err) } @@ -967,8 +972,7 @@ func TestMulticoreSDR(t *testing.T) { func TestPoStChallengeAssumptions(t *testing.T) { var r [32]byte - _, err := rand.Read(r[:]) - if err != nil { + if _, err := crand.Read(r[:]); err != nil { panic(err) } r[31] &= 0x3f @@ -1050,9 +1054,10 @@ func TestDCAPCloses(t *testing.T) { t.Cleanup(cleanup) t.Run("DataCid", func(t *testing.T) { + r := rand.New(rand.NewSource(0x7e5)) clr := &closeAssertReader{ - Reader: io.LimitReader(rand.Reader, int64(sz)), + Reader: io.LimitReader(r, int64(sz)), } c, err := sb.DataCid(context.TODO(), sz, clr) @@ -1065,9 +1070,10 @@ func TestDCAPCloses(t *testing.T) { }) t.Run("AddPiece", func(t *testing.T) { + r := rand.New(rand.NewSource(0x7e5)) clr := &closeAssertReader{ - Reader: io.LimitReader(rand.Reader, int64(sz)), + Reader: io.LimitReader(r, int64(sz)), } c, err := sb.AddPiece(context.TODO(), storiface.SectorRef{