diff --git a/chain/gen/genesis/miners.go b/chain/gen/genesis/miners.go index 2d9942464..0880f12aa 100644 --- a/chain/gen/genesis/miners.go +++ b/chain/gen/genesis/miners.go @@ -247,7 +247,8 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.Syscal } params := &markettypes.PublishStorageDealsParams{} - for _, preseal := range m.Sectors { + for _, presealTmp := range m.Sectors { + preseal := presealTmp preseal.Deal.VerifiedDeal = true preseal.Deal.EndEpoch = minerInfos[i].presealExp p := markettypes.ClientDealProposal{ diff --git a/chain/vectors/gen/main.go b/chain/vectors/gen/main.go index 658a41dc9..f4b7c82da 100644 --- a/chain/vectors/gen/main.go +++ b/chain/vectors/gen/main.go @@ -146,7 +146,10 @@ func MakeUnsignedMessageVectors() []vectors.UnsignedMessageVector { } params := make([]byte, 32) - crand.Read(params) + _, err = crand.Read(params) + if err != nil { + panic(err) + } msg := &types.Message{ To: to, diff --git a/cmd/lotus-bench/main.go b/cmd/lotus-bench/main.go index fc484c4e3..3b6f7ddae 100644 --- a/cmd/lotus-bench/main.go +++ b/cmd/lotus-bench/main.go @@ -3,10 +3,10 @@ package main import ( "bytes" "context" + "crypto/rand" "encoding/json" "fmt" "math/big" - "math/rand" "os" "path/filepath" "sync" @@ -546,7 +546,10 @@ var sealBenchCmd = &cli.Command{ } var challenge [32]byte - rand.Read(challenge[:]) + _, err = rand.Read(challenge[:]) + if err != nil { + return err + } beforePost := time.Now() @@ -776,9 +779,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par start := time.Now() log.Infof("[%d] Writing piece into sector...", i) - r := rand.New(rand.NewSource(100 + int64(i))) - - pi, err := sb.AddPiece(context.TODO(), sid, nil, abi.PaddedPieceSize(sectorSize).Unpadded(), r) + pi, err := sb.AddPiece(context.TODO(), sid, nil, abi.PaddedPieceSize(sectorSize).Unpadded(), rand.Reader) if err != nil { return nil, nil, err } diff --git a/cmd/lotus-provider/config.go b/cmd/lotus-provider/config.go index a6940ddaf..b9d8a93e6 100644 --- a/cmd/lotus-provider/config.go +++ b/cmd/lotus-provider/config.go @@ -25,6 +25,7 @@ var configCmd = &cli.Command{ configGetCmd, configListCmd, configViewCmd, + configRmCmd, }, } @@ -138,6 +139,30 @@ var configListCmd = &cli.Command{ }, } +var configRmCmd = &cli.Command{ + Name: "rm", + Usage: "Remvoe a named config layer.", + Flags: []cli.Flag{}, + Action: func(cctx *cli.Context) error { + args := cctx.Args() + if args.Len() != 1 { + return errors.New("must have exactly 1 arg for the layer name") + } + db, err := makeDB(cctx) + if err != nil { + return err + } + ct, err := db.Exec(context.Background(), `DELETE FROM harmony_config WHERE title=$1`, args.First()) + if err != nil { + return fmt.Errorf("unable to read from db: %w", err) + } + if ct == 0 { + return fmt.Errorf("no layer named %s", args.First()) + } + + return nil + }, +} var configViewCmd = &cli.Command{ Name: "view", Usage: "View stacked config layers as it will be interpreted by this version of lotus-provider.", diff --git a/cmd/lotus-sim/simulation/stages/funding_stage.go b/cmd/lotus-sim/simulation/stages/funding_stage.go index f75a9910d..4ce4afae1 100644 --- a/cmd/lotus-sim/simulation/stages/funding_stage.go +++ b/cmd/lotus-sim/simulation/stages/funding_stage.go @@ -166,7 +166,8 @@ func (fs *FundingStage) PackMessages(ctx context.Context, bb *blockbuilder.Block ) }() - for _, actor := range targets { + for _, actorTmp := range targets { + actor := actorTmp switch { case builtin.IsAccountActor(actor.Code): if _, err := bb.PushMessage(&types.Message{ diff --git a/lib/harmony/harmonydb/harmonydb.go b/lib/harmony/harmonydb/harmonydb.go index 793841931..279fa2ec8 100644 --- a/lib/harmony/harmonydb/harmonydb.go +++ b/lib/harmony/harmonydb/harmonydb.go @@ -169,8 +169,11 @@ func (db *DB) addStatsAndConnect() error { return nil } + // Timeout the first connection so we know if the DB is down. + ctx, ctxClose := context.WithDeadline(context.Background(), time.Now().Add(5*time.Second)) + defer ctxClose() var err error - db.pgx, err = pgxpool.NewWithConfig(context.Background(), db.cfg) + db.pgx, err = pgxpool.NewWithConfig(ctx, db.cfg) if err != nil { logger.Error(fmt.Sprintf("Unable to connect to database: %v\n", err)) return err diff --git a/orjanCheckThisIn.toml b/orjanCheckThisIn.toml new file mode 100644 index 000000000..c9c0c9e78 --- /dev/null +++ b/orjanCheckThisIn.toml @@ -0,0 +1,190 @@ +[Subsystems] + # type: bool + #EnableWindowPost = false + + # type: bool + #EnableWinningPost = false + + +[Fees] + # type: types.FIL + #DefaultMaxFee = "0.07 FIL" + + # type: types.FIL + #MaxPreCommitGasFee = "0.025 FIL" + + # type: types.FIL + #MaxCommitGasFee = "0.05 FIL" + + # type: types.FIL + #MaxTerminateGasFee = "0.5 FIL" + + # WindowPoSt is a high-value operation, so the default fee should be high. + # + # type: types.FIL + #MaxWindowPoStGasFee = "5 FIL" + + # type: types.FIL + #MaxPublishDealsFee = "0.05 FIL" + + [Fees.MaxPreCommitBatchGasFee] + # type: types.FIL + #Base = "0 FIL" + + # type: types.FIL + #PerSector = "0.02 FIL" + + [Fees.MaxCommitBatchGasFee] + # type: types.FIL + #Base = "0 FIL" + + # type: types.FIL + #PerSector = "0.03 FIL" + + +[Addresses] + # Addresses to send PreCommit messages from + # + # type: []string + #PreCommitControl = [] + + # Addresses to send Commit messages from + # + # type: []string + #CommitControl = [] + + # type: []string + #TerminateControl = [] + + # DisableOwnerFallback disables usage of the owner address for messages + # sent automatically + # + # type: bool + #DisableOwnerFallback = false + + # DisableWorkerFallback disables usage of the worker address for messages + # sent automatically, if control addresses are configured. + # A control address that doesn't have enough funds will still be chosen + # over the worker address if this flag is set. + # + # type: bool + #DisableWorkerFallback = false + + +[Proving] + # Maximum number of sector checks to run in parallel. (0 = unlimited) + # + # WARNING: Setting this value too high may make the node crash by running out of stack + # WARNING: Setting this value too low may make sector challenge reading much slower, resulting in failed PoSt due + # to late submission. + # + # After changing this option, confirm that the new value works in your setup by invoking + # 'lotus-miner proving compute window-post 0' + # + # type: int + #ParallelCheckLimit = 32 + + # Maximum amount of time a proving pre-check can take for a sector. If the check times out the sector will be skipped + # + # WARNING: Setting this value too low risks in sectors being skipped even though they are accessible, just reading the + # test challenge took longer than this timeout + # WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this sector are + # blocked (e.g. in case of disconnected NFS mount) + # + # type: Duration + #SingleCheckTimeout = "10m0s" + + # Maximum amount of time a proving pre-check can take for an entire partition. If the check times out, sectors in + # the partition which didn't get checked on time will be skipped + # + # WARNING: Setting this value too low risks in sectors being skipped even though they are accessible, just reading the + # test challenge took longer than this timeout + # WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this partition are + # blocked or slow + # + # type: Duration + #PartitionCheckTimeout = "20m0s" + + # Disable Window PoSt computation on the lotus-miner process even if no window PoSt workers are present. + # + # WARNING: If no windowPoSt workers are connected, window PoSt WILL FAIL resulting in faulty sectors which will need + # to be recovered. Before enabling this option, make sure your PoSt workers work correctly. + # + # After changing this option, confirm that the new value works in your setup by invoking + # 'lotus-miner proving compute window-post 0' + # + # type: bool + #DisableBuiltinWindowPoSt = false + + # Disable Winning PoSt computation on the lotus-miner process even if no winning PoSt workers are present. + # + # WARNING: If no WinningPoSt workers are connected, Winning PoSt WILL FAIL resulting in lost block rewards. + # Before enabling this option, make sure your PoSt workers work correctly. + # + # type: bool + #DisableBuiltinWinningPoSt = false + + # Disable WindowPoSt provable sector readability checks. + # + # In normal operation, when preparing to compute WindowPoSt, lotus-miner will perform a round of reading challenges + # from all sectors to confirm that those sectors can be proven. Challenges read in this process are discarded, as + # we're only interested in checking that sector data can be read. + # + # When using builtin proof computation (no PoSt workers, and DisableBuiltinWindowPoSt is set to false), this process + # can save a lot of time and compute resources in the case that some sectors are not readable - this is caused by + # the builtin logic not skipping snark computation when some sectors need to be skipped. + # + # When using PoSt workers, this process is mostly redundant, with PoSt workers challenges will be read once, and + # if challenges for some sectors aren't readable, those sectors will just get skipped. + # + # Disabling sector pre-checks will slightly reduce IO load when proving sectors, possibly resulting in shorter + # time to produce window PoSt. In setups with good IO capabilities the effect of this option on proving time should + # be negligible. + # + # NOTE: It likely is a bad idea to disable sector pre-checks in setups with no PoSt workers. + # + # NOTE: Even when this option is enabled, recovering sectors will be checked before recovery declaration message is + # sent to the chain + # + # After changing this option, confirm that the new value works in your setup by invoking + # 'lotus-miner proving compute window-post 0' + # + # type: bool + #DisableWDPoStPreChecks = false + + # Maximum number of partitions to prove in a single SubmitWindowPoSt messace. 0 = network limit (10 in nv16) + # + # A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors. + # + # The maximum number of sectors which can be proven in a single PoSt message is 25000 in network version 16, which + # means that a single message can prove at most 10 partitions + # + # Note that setting this value lower may result in less efficient gas use - more messages will be sent, + # to prove each deadline, resulting in more total gas use (but each message will have lower gas limit) + # + # Setting this value above the network limit has no effect + # + # type: int + #MaxPartitionsPerPoStMessage = 0 + + # In some cases when submitting DeclareFaultsRecovered messages, + # there may be too many recoveries to fit in a BlockGasLimit. + # In those cases it may be necessary to set this value to something low (eg 1); + # Note that setting this value lower may result in less efficient gas use - more messages will be sent than needed, + # resulting in more total gas use (but each message will have lower gas limit) + # + # type: int + #MaxPartitionsPerRecoveryMessage = 0 + + # Enable single partition per PoSt Message for partitions containing recovery sectors + # + # In cases when submitting PoSt messages which contain recovering sectors, the default network limit may still be + # too high to fit in the block gas limit. In those cases, it becomes useful to only house the single partition + # with recovering sectors in the post message + # + # Note that setting this value lower may result in less efficient gas use - more messages will be sent, + # to prove each deadline, resulting in more total gas use (but each message will have lower gas limit) + # + # type: bool + #SingleRecoveringPartitionPerPostMessage = false + diff --git a/storage/sealer/ffiwrapper/sealer_cgo.go b/storage/sealer/ffiwrapper/sealer_cgo.go index fb4a6e42e..c8087875e 100644 --- a/storage/sealer/ffiwrapper/sealer_cgo.go +++ b/storage/sealer/ffiwrapper/sealer_cgo.go @@ -147,7 +147,8 @@ func (sb *Sealer) DataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSize, }) } - if len(piecePromises) > 0 && len(piecePromises) == 1 { // weird for linter + /* #nosec G601 -- length is verified */ + if len(piecePromises) == 1 { p := piecePromises[0] return p() } @@ -348,7 +349,8 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector storiface.SectorRef, exis } stagedFile = nil - if len(piecePromises) > 0 && len(piecePromises) == 1 { // weird for linter + /* #nosec G601 -- length is verified */ + if len(piecePromises) == 1 { p := piecePromises[0] return p() } diff --git a/storage/sealer/ffiwrapper/sealer_test.go b/storage/sealer/ffiwrapper/sealer_test.go index 78c0ffb06..72de77872 100644 --- a/storage/sealer/ffiwrapper/sealer_test.go +++ b/storage/sealer/ffiwrapper/sealer_test.go @@ -3,10 +3,10 @@ package ffiwrapper import ( "bytes" "context" + "crypto/rand" "fmt" "io" "io/fs" - "math/rand" "os" "path/filepath" "runtime" @@ -52,8 +52,8 @@ type seal struct { func data(sn abi.SectorNumber, dlen abi.UnpaddedPieceSize) io.Reader { return io.MultiReader( - io.LimitReader(rand.New(rand.NewSource(42+int64(sn))), int64(123)), - io.LimitReader(rand.New(rand.NewSource(42+int64(sn))), int64(dlen-123)), + io.LimitReader(rand.Reader, int64(123)), + io.LimitReader(rand.Reader, int64(dlen-123)), ) } @@ -790,15 +790,13 @@ func TestAddPiece512M(t *testing.T) { } t.Cleanup(cleanup) - r := rand.New(rand.NewSource(0x7e5)) - c, err := sb.AddPiece(context.TODO(), storiface.SectorRef{ ID: abi.SectorID{ Miner: miner, Number: 0, }, ProofType: abi.RegisteredSealProof_StackedDrg512MiBV1_1, - }, nil, sz, io.LimitReader(r, int64(sz))) + }, nil, sz, io.LimitReader(rand.Reader, int64(sz))) if err != nil { t.Fatal(err) } @@ -876,15 +874,13 @@ func TestAddPiece512MPadded(t *testing.T) { } t.Cleanup(cleanup) - r := rand.New(rand.NewSource(0x7e5)) - c, err := sb.AddPiece(context.TODO(), storiface.SectorRef{ ID: abi.SectorID{ Miner: miner, Number: 0, }, ProofType: abi.RegisteredSealProof_StackedDrg512MiBV1_1, - }, nil, sz, io.LimitReader(r, int64(sz/4))) + }, nil, sz, io.LimitReader(rand.Reader, int64(sz/4))) if err != nil { t.Fatalf("add piece failed: %s", err) } @@ -971,7 +967,10 @@ func TestMulticoreSDR(t *testing.T) { func TestPoStChallengeAssumptions(t *testing.T) { var r [32]byte - rand.Read(r[:]) + _, err := rand.Read(r[:]) + if err != nil { + panic(err) + } r[31] &= 0x3f // behaves like a pure function @@ -1051,10 +1050,9 @@ func TestDCAPCloses(t *testing.T) { t.Cleanup(cleanup) t.Run("DataCid", func(t *testing.T) { - r := rand.New(rand.NewSource(0x7e5)) clr := &closeAssertReader{ - Reader: io.LimitReader(r, int64(sz)), + Reader: io.LimitReader(rand.Reader, int64(sz)), } c, err := sb.DataCid(context.TODO(), sz, clr) @@ -1067,10 +1065,9 @@ func TestDCAPCloses(t *testing.T) { }) t.Run("AddPiece", func(t *testing.T) { - r := rand.New(rand.NewSource(0x7e5)) clr := &closeAssertReader{ - Reader: io.LimitReader(r, int64(sz)), + Reader: io.LimitReader(rand.Reader, int64(sz)), } c, err := sb.AddPiece(context.TODO(), storiface.SectorRef{ diff --git a/storage/sealer/piece_provider_test.go b/storage/sealer/piece_provider_test.go index 4cbc79a93..de1e07a78 100644 --- a/storage/sealer/piece_provider_test.go +++ b/storage/sealer/piece_provider_test.go @@ -3,8 +3,8 @@ package sealer import ( "bytes" "context" + "crypto/rand" "io" - "math/rand" "net" "net/http" "os" @@ -195,7 +195,10 @@ type pieceProviderTestHarness struct { func generatePieceData(size uint64) []byte { bz := make([]byte, size) - rand.Read(bz) + _, err := rand.Read(bz) + if err != nil { + panic(err) + } return bz }