lint, db timout, cfg rm
This commit is contained in:
parent
a9d472a40e
commit
ea035f4a7f
@ -247,7 +247,8 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.Syscal
|
|||||||
}
|
}
|
||||||
|
|
||||||
params := &markettypes.PublishStorageDealsParams{}
|
params := &markettypes.PublishStorageDealsParams{}
|
||||||
for _, preseal := range m.Sectors {
|
for _, presealTmp := range m.Sectors {
|
||||||
|
preseal := presealTmp
|
||||||
preseal.Deal.VerifiedDeal = true
|
preseal.Deal.VerifiedDeal = true
|
||||||
preseal.Deal.EndEpoch = minerInfos[i].presealExp
|
preseal.Deal.EndEpoch = minerInfos[i].presealExp
|
||||||
p := markettypes.ClientDealProposal{
|
p := markettypes.ClientDealProposal{
|
||||||
|
@ -146,7 +146,10 @@ func MakeUnsignedMessageVectors() []vectors.UnsignedMessageVector {
|
|||||||
}
|
}
|
||||||
|
|
||||||
params := make([]byte, 32)
|
params := make([]byte, 32)
|
||||||
crand.Read(params)
|
_, err = crand.Read(params)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
msg := &types.Message{
|
msg := &types.Message{
|
||||||
To: to,
|
To: to,
|
||||||
|
@ -3,10 +3,10 @@ package main
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"crypto/rand"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
"math/rand"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync"
|
"sync"
|
||||||
@ -546,7 +546,10 @@ var sealBenchCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
var challenge [32]byte
|
var challenge [32]byte
|
||||||
rand.Read(challenge[:])
|
_, err = rand.Read(challenge[:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
beforePost := time.Now()
|
beforePost := time.Now()
|
||||||
|
|
||||||
@ -776,9 +779,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
|
|||||||
start := time.Now()
|
start := time.Now()
|
||||||
log.Infof("[%d] Writing piece into sector...", i)
|
log.Infof("[%d] Writing piece into sector...", i)
|
||||||
|
|
||||||
r := rand.New(rand.NewSource(100 + int64(i)))
|
pi, err := sb.AddPiece(context.TODO(), sid, nil, abi.PaddedPieceSize(sectorSize).Unpadded(), rand.Reader)
|
||||||
|
|
||||||
pi, err := sb.AddPiece(context.TODO(), sid, nil, abi.PaddedPieceSize(sectorSize).Unpadded(), r)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
@ -25,6 +25,7 @@ var configCmd = &cli.Command{
|
|||||||
configGetCmd,
|
configGetCmd,
|
||||||
configListCmd,
|
configListCmd,
|
||||||
configViewCmd,
|
configViewCmd,
|
||||||
|
configRmCmd,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -138,6 +139,30 @@ var configListCmd = &cli.Command{
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var configRmCmd = &cli.Command{
|
||||||
|
Name: "rm",
|
||||||
|
Usage: "Remvoe a named config layer.",
|
||||||
|
Flags: []cli.Flag{},
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
args := cctx.Args()
|
||||||
|
if args.Len() != 1 {
|
||||||
|
return errors.New("must have exactly 1 arg for the layer name")
|
||||||
|
}
|
||||||
|
db, err := makeDB(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ct, err := db.Exec(context.Background(), `DELETE FROM harmony_config WHERE title=$1`, args.First())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to read from db: %w", err)
|
||||||
|
}
|
||||||
|
if ct == 0 {
|
||||||
|
return fmt.Errorf("no layer named %s", args.First())
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
var configViewCmd = &cli.Command{
|
var configViewCmd = &cli.Command{
|
||||||
Name: "view",
|
Name: "view",
|
||||||
Usage: "View stacked config layers as it will be interpreted by this version of lotus-provider.",
|
Usage: "View stacked config layers as it will be interpreted by this version of lotus-provider.",
|
||||||
|
@ -166,7 +166,8 @@ func (fs *FundingStage) PackMessages(ctx context.Context, bb *blockbuilder.Block
|
|||||||
)
|
)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
for _, actor := range targets {
|
for _, actorTmp := range targets {
|
||||||
|
actor := actorTmp
|
||||||
switch {
|
switch {
|
||||||
case builtin.IsAccountActor(actor.Code):
|
case builtin.IsAccountActor(actor.Code):
|
||||||
if _, err := bb.PushMessage(&types.Message{
|
if _, err := bb.PushMessage(&types.Message{
|
||||||
|
@ -169,8 +169,11 @@ func (db *DB) addStatsAndConnect() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Timeout the first connection so we know if the DB is down.
|
||||||
|
ctx, ctxClose := context.WithDeadline(context.Background(), time.Now().Add(5*time.Second))
|
||||||
|
defer ctxClose()
|
||||||
var err error
|
var err error
|
||||||
db.pgx, err = pgxpool.NewWithConfig(context.Background(), db.cfg)
|
db.pgx, err = pgxpool.NewWithConfig(ctx, db.cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error(fmt.Sprintf("Unable to connect to database: %v\n", err))
|
logger.Error(fmt.Sprintf("Unable to connect to database: %v\n", err))
|
||||||
return err
|
return err
|
||||||
|
190
orjanCheckThisIn.toml
Normal file
190
orjanCheckThisIn.toml
Normal file
@ -0,0 +1,190 @@
|
|||||||
|
[Subsystems]
|
||||||
|
# type: bool
|
||||||
|
#EnableWindowPost = false
|
||||||
|
|
||||||
|
# type: bool
|
||||||
|
#EnableWinningPost = false
|
||||||
|
|
||||||
|
|
||||||
|
[Fees]
|
||||||
|
# type: types.FIL
|
||||||
|
#DefaultMaxFee = "0.07 FIL"
|
||||||
|
|
||||||
|
# type: types.FIL
|
||||||
|
#MaxPreCommitGasFee = "0.025 FIL"
|
||||||
|
|
||||||
|
# type: types.FIL
|
||||||
|
#MaxCommitGasFee = "0.05 FIL"
|
||||||
|
|
||||||
|
# type: types.FIL
|
||||||
|
#MaxTerminateGasFee = "0.5 FIL"
|
||||||
|
|
||||||
|
# WindowPoSt is a high-value operation, so the default fee should be high.
|
||||||
|
#
|
||||||
|
# type: types.FIL
|
||||||
|
#MaxWindowPoStGasFee = "5 FIL"
|
||||||
|
|
||||||
|
# type: types.FIL
|
||||||
|
#MaxPublishDealsFee = "0.05 FIL"
|
||||||
|
|
||||||
|
[Fees.MaxPreCommitBatchGasFee]
|
||||||
|
# type: types.FIL
|
||||||
|
#Base = "0 FIL"
|
||||||
|
|
||||||
|
# type: types.FIL
|
||||||
|
#PerSector = "0.02 FIL"
|
||||||
|
|
||||||
|
[Fees.MaxCommitBatchGasFee]
|
||||||
|
# type: types.FIL
|
||||||
|
#Base = "0 FIL"
|
||||||
|
|
||||||
|
# type: types.FIL
|
||||||
|
#PerSector = "0.03 FIL"
|
||||||
|
|
||||||
|
|
||||||
|
[Addresses]
|
||||||
|
# Addresses to send PreCommit messages from
|
||||||
|
#
|
||||||
|
# type: []string
|
||||||
|
#PreCommitControl = []
|
||||||
|
|
||||||
|
# Addresses to send Commit messages from
|
||||||
|
#
|
||||||
|
# type: []string
|
||||||
|
#CommitControl = []
|
||||||
|
|
||||||
|
# type: []string
|
||||||
|
#TerminateControl = []
|
||||||
|
|
||||||
|
# DisableOwnerFallback disables usage of the owner address for messages
|
||||||
|
# sent automatically
|
||||||
|
#
|
||||||
|
# type: bool
|
||||||
|
#DisableOwnerFallback = false
|
||||||
|
|
||||||
|
# DisableWorkerFallback disables usage of the worker address for messages
|
||||||
|
# sent automatically, if control addresses are configured.
|
||||||
|
# A control address that doesn't have enough funds will still be chosen
|
||||||
|
# over the worker address if this flag is set.
|
||||||
|
#
|
||||||
|
# type: bool
|
||||||
|
#DisableWorkerFallback = false
|
||||||
|
|
||||||
|
|
||||||
|
[Proving]
|
||||||
|
# Maximum number of sector checks to run in parallel. (0 = unlimited)
|
||||||
|
#
|
||||||
|
# WARNING: Setting this value too high may make the node crash by running out of stack
|
||||||
|
# WARNING: Setting this value too low may make sector challenge reading much slower, resulting in failed PoSt due
|
||||||
|
# to late submission.
|
||||||
|
#
|
||||||
|
# After changing this option, confirm that the new value works in your setup by invoking
|
||||||
|
# 'lotus-miner proving compute window-post 0'
|
||||||
|
#
|
||||||
|
# type: int
|
||||||
|
#ParallelCheckLimit = 32
|
||||||
|
|
||||||
|
# Maximum amount of time a proving pre-check can take for a sector. If the check times out the sector will be skipped
|
||||||
|
#
|
||||||
|
# WARNING: Setting this value too low risks in sectors being skipped even though they are accessible, just reading the
|
||||||
|
# test challenge took longer than this timeout
|
||||||
|
# WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this sector are
|
||||||
|
# blocked (e.g. in case of disconnected NFS mount)
|
||||||
|
#
|
||||||
|
# type: Duration
|
||||||
|
#SingleCheckTimeout = "10m0s"
|
||||||
|
|
||||||
|
# Maximum amount of time a proving pre-check can take for an entire partition. If the check times out, sectors in
|
||||||
|
# the partition which didn't get checked on time will be skipped
|
||||||
|
#
|
||||||
|
# WARNING: Setting this value too low risks in sectors being skipped even though they are accessible, just reading the
|
||||||
|
# test challenge took longer than this timeout
|
||||||
|
# WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this partition are
|
||||||
|
# blocked or slow
|
||||||
|
#
|
||||||
|
# type: Duration
|
||||||
|
#PartitionCheckTimeout = "20m0s"
|
||||||
|
|
||||||
|
# Disable Window PoSt computation on the lotus-miner process even if no window PoSt workers are present.
|
||||||
|
#
|
||||||
|
# WARNING: If no windowPoSt workers are connected, window PoSt WILL FAIL resulting in faulty sectors which will need
|
||||||
|
# to be recovered. Before enabling this option, make sure your PoSt workers work correctly.
|
||||||
|
#
|
||||||
|
# After changing this option, confirm that the new value works in your setup by invoking
|
||||||
|
# 'lotus-miner proving compute window-post 0'
|
||||||
|
#
|
||||||
|
# type: bool
|
||||||
|
#DisableBuiltinWindowPoSt = false
|
||||||
|
|
||||||
|
# Disable Winning PoSt computation on the lotus-miner process even if no winning PoSt workers are present.
|
||||||
|
#
|
||||||
|
# WARNING: If no WinningPoSt workers are connected, Winning PoSt WILL FAIL resulting in lost block rewards.
|
||||||
|
# Before enabling this option, make sure your PoSt workers work correctly.
|
||||||
|
#
|
||||||
|
# type: bool
|
||||||
|
#DisableBuiltinWinningPoSt = false
|
||||||
|
|
||||||
|
# Disable WindowPoSt provable sector readability checks.
|
||||||
|
#
|
||||||
|
# In normal operation, when preparing to compute WindowPoSt, lotus-miner will perform a round of reading challenges
|
||||||
|
# from all sectors to confirm that those sectors can be proven. Challenges read in this process are discarded, as
|
||||||
|
# we're only interested in checking that sector data can be read.
|
||||||
|
#
|
||||||
|
# When using builtin proof computation (no PoSt workers, and DisableBuiltinWindowPoSt is set to false), this process
|
||||||
|
# can save a lot of time and compute resources in the case that some sectors are not readable - this is caused by
|
||||||
|
# the builtin logic not skipping snark computation when some sectors need to be skipped.
|
||||||
|
#
|
||||||
|
# When using PoSt workers, this process is mostly redundant, with PoSt workers challenges will be read once, and
|
||||||
|
# if challenges for some sectors aren't readable, those sectors will just get skipped.
|
||||||
|
#
|
||||||
|
# Disabling sector pre-checks will slightly reduce IO load when proving sectors, possibly resulting in shorter
|
||||||
|
# time to produce window PoSt. In setups with good IO capabilities the effect of this option on proving time should
|
||||||
|
# be negligible.
|
||||||
|
#
|
||||||
|
# NOTE: It likely is a bad idea to disable sector pre-checks in setups with no PoSt workers.
|
||||||
|
#
|
||||||
|
# NOTE: Even when this option is enabled, recovering sectors will be checked before recovery declaration message is
|
||||||
|
# sent to the chain
|
||||||
|
#
|
||||||
|
# After changing this option, confirm that the new value works in your setup by invoking
|
||||||
|
# 'lotus-miner proving compute window-post 0'
|
||||||
|
#
|
||||||
|
# type: bool
|
||||||
|
#DisableWDPoStPreChecks = false
|
||||||
|
|
||||||
|
# Maximum number of partitions to prove in a single SubmitWindowPoSt messace. 0 = network limit (10 in nv16)
|
||||||
|
#
|
||||||
|
# A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors.
|
||||||
|
#
|
||||||
|
# The maximum number of sectors which can be proven in a single PoSt message is 25000 in network version 16, which
|
||||||
|
# means that a single message can prove at most 10 partitions
|
||||||
|
#
|
||||||
|
# Note that setting this value lower may result in less efficient gas use - more messages will be sent,
|
||||||
|
# to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
|
||||||
|
#
|
||||||
|
# Setting this value above the network limit has no effect
|
||||||
|
#
|
||||||
|
# type: int
|
||||||
|
#MaxPartitionsPerPoStMessage = 0
|
||||||
|
|
||||||
|
# In some cases when submitting DeclareFaultsRecovered messages,
|
||||||
|
# there may be too many recoveries to fit in a BlockGasLimit.
|
||||||
|
# In those cases it may be necessary to set this value to something low (eg 1);
|
||||||
|
# Note that setting this value lower may result in less efficient gas use - more messages will be sent than needed,
|
||||||
|
# resulting in more total gas use (but each message will have lower gas limit)
|
||||||
|
#
|
||||||
|
# type: int
|
||||||
|
#MaxPartitionsPerRecoveryMessage = 0
|
||||||
|
|
||||||
|
# Enable single partition per PoSt Message for partitions containing recovery sectors
|
||||||
|
#
|
||||||
|
# In cases when submitting PoSt messages which contain recovering sectors, the default network limit may still be
|
||||||
|
# too high to fit in the block gas limit. In those cases, it becomes useful to only house the single partition
|
||||||
|
# with recovering sectors in the post message
|
||||||
|
#
|
||||||
|
# Note that setting this value lower may result in less efficient gas use - more messages will be sent,
|
||||||
|
# to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
|
||||||
|
#
|
||||||
|
# type: bool
|
||||||
|
#SingleRecoveringPartitionPerPostMessage = false
|
||||||
|
|
@ -147,7 +147,8 @@ func (sb *Sealer) DataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSize,
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(piecePromises) > 0 && len(piecePromises) == 1 { // weird for linter
|
/* #nosec G601 -- length is verified */
|
||||||
|
if len(piecePromises) == 1 {
|
||||||
p := piecePromises[0]
|
p := piecePromises[0]
|
||||||
return p()
|
return p()
|
||||||
}
|
}
|
||||||
@ -348,7 +349,8 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector storiface.SectorRef, exis
|
|||||||
}
|
}
|
||||||
stagedFile = nil
|
stagedFile = nil
|
||||||
|
|
||||||
if len(piecePromises) > 0 && len(piecePromises) == 1 { // weird for linter
|
/* #nosec G601 -- length is verified */
|
||||||
|
if len(piecePromises) == 1 {
|
||||||
p := piecePromises[0]
|
p := piecePromises[0]
|
||||||
return p()
|
return p()
|
||||||
}
|
}
|
||||||
|
@ -3,10 +3,10 @@ package ffiwrapper
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"crypto/rand"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"math/rand"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
@ -52,8 +52,8 @@ type seal struct {
|
|||||||
|
|
||||||
func data(sn abi.SectorNumber, dlen abi.UnpaddedPieceSize) io.Reader {
|
func data(sn abi.SectorNumber, dlen abi.UnpaddedPieceSize) io.Reader {
|
||||||
return io.MultiReader(
|
return io.MultiReader(
|
||||||
io.LimitReader(rand.New(rand.NewSource(42+int64(sn))), int64(123)),
|
io.LimitReader(rand.Reader, int64(123)),
|
||||||
io.LimitReader(rand.New(rand.NewSource(42+int64(sn))), int64(dlen-123)),
|
io.LimitReader(rand.Reader, int64(dlen-123)),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -790,15 +790,13 @@ func TestAddPiece512M(t *testing.T) {
|
|||||||
}
|
}
|
||||||
t.Cleanup(cleanup)
|
t.Cleanup(cleanup)
|
||||||
|
|
||||||
r := rand.New(rand.NewSource(0x7e5))
|
|
||||||
|
|
||||||
c, err := sb.AddPiece(context.TODO(), storiface.SectorRef{
|
c, err := sb.AddPiece(context.TODO(), storiface.SectorRef{
|
||||||
ID: abi.SectorID{
|
ID: abi.SectorID{
|
||||||
Miner: miner,
|
Miner: miner,
|
||||||
Number: 0,
|
Number: 0,
|
||||||
},
|
},
|
||||||
ProofType: abi.RegisteredSealProof_StackedDrg512MiBV1_1,
|
ProofType: abi.RegisteredSealProof_StackedDrg512MiBV1_1,
|
||||||
}, nil, sz, io.LimitReader(r, int64(sz)))
|
}, nil, sz, io.LimitReader(rand.Reader, int64(sz)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -876,15 +874,13 @@ func TestAddPiece512MPadded(t *testing.T) {
|
|||||||
}
|
}
|
||||||
t.Cleanup(cleanup)
|
t.Cleanup(cleanup)
|
||||||
|
|
||||||
r := rand.New(rand.NewSource(0x7e5))
|
|
||||||
|
|
||||||
c, err := sb.AddPiece(context.TODO(), storiface.SectorRef{
|
c, err := sb.AddPiece(context.TODO(), storiface.SectorRef{
|
||||||
ID: abi.SectorID{
|
ID: abi.SectorID{
|
||||||
Miner: miner,
|
Miner: miner,
|
||||||
Number: 0,
|
Number: 0,
|
||||||
},
|
},
|
||||||
ProofType: abi.RegisteredSealProof_StackedDrg512MiBV1_1,
|
ProofType: abi.RegisteredSealProof_StackedDrg512MiBV1_1,
|
||||||
}, nil, sz, io.LimitReader(r, int64(sz/4)))
|
}, nil, sz, io.LimitReader(rand.Reader, int64(sz/4)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("add piece failed: %s", err)
|
t.Fatalf("add piece failed: %s", err)
|
||||||
}
|
}
|
||||||
@ -971,7 +967,10 @@ func TestMulticoreSDR(t *testing.T) {
|
|||||||
|
|
||||||
func TestPoStChallengeAssumptions(t *testing.T) {
|
func TestPoStChallengeAssumptions(t *testing.T) {
|
||||||
var r [32]byte
|
var r [32]byte
|
||||||
rand.Read(r[:])
|
_, err := rand.Read(r[:])
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
r[31] &= 0x3f
|
r[31] &= 0x3f
|
||||||
|
|
||||||
// behaves like a pure function
|
// behaves like a pure function
|
||||||
@ -1051,10 +1050,9 @@ func TestDCAPCloses(t *testing.T) {
|
|||||||
t.Cleanup(cleanup)
|
t.Cleanup(cleanup)
|
||||||
|
|
||||||
t.Run("DataCid", func(t *testing.T) {
|
t.Run("DataCid", func(t *testing.T) {
|
||||||
r := rand.New(rand.NewSource(0x7e5))
|
|
||||||
|
|
||||||
clr := &closeAssertReader{
|
clr := &closeAssertReader{
|
||||||
Reader: io.LimitReader(r, int64(sz)),
|
Reader: io.LimitReader(rand.Reader, int64(sz)),
|
||||||
}
|
}
|
||||||
|
|
||||||
c, err := sb.DataCid(context.TODO(), sz, clr)
|
c, err := sb.DataCid(context.TODO(), sz, clr)
|
||||||
@ -1067,10 +1065,9 @@ func TestDCAPCloses(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("AddPiece", func(t *testing.T) {
|
t.Run("AddPiece", func(t *testing.T) {
|
||||||
r := rand.New(rand.NewSource(0x7e5))
|
|
||||||
|
|
||||||
clr := &closeAssertReader{
|
clr := &closeAssertReader{
|
||||||
Reader: io.LimitReader(r, int64(sz)),
|
Reader: io.LimitReader(rand.Reader, int64(sz)),
|
||||||
}
|
}
|
||||||
|
|
||||||
c, err := sb.AddPiece(context.TODO(), storiface.SectorRef{
|
c, err := sb.AddPiece(context.TODO(), storiface.SectorRef{
|
||||||
|
@ -3,8 +3,8 @@ package sealer
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"crypto/rand"
|
||||||
"io"
|
"io"
|
||||||
"math/rand"
|
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
@ -195,7 +195,10 @@ type pieceProviderTestHarness struct {
|
|||||||
|
|
||||||
func generatePieceData(size uint64) []byte {
|
func generatePieceData(size uint64) []byte {
|
||||||
bz := make([]byte, size)
|
bz := make([]byte, size)
|
||||||
rand.Read(bz)
|
_, err := rand.Read(bz)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
return bz
|
return bz
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user