chore: lint: fix lint errors with new linting config
Ref: https://github.com/filecoin-project/lotus/issues/11967
This commit is contained in:
parent
6a8110322d
commit
26d3fd2ecc
@ -109,11 +109,9 @@ func (bs *BufferedBlockstore) DeleteMany(ctx context.Context, cids []cid.Cid) er
|
||||
|
||||
func (bs *BufferedBlockstore) View(ctx context.Context, c cid.Cid, callback func([]byte) error) error {
|
||||
// both stores are viewable.
|
||||
if err := bs.write.View(ctx, c, callback); ipld.IsNotFound(err) {
|
||||
// not found in write blockstore; fall through.
|
||||
} else {
|
||||
if err := bs.write.View(ctx, c, callback); !ipld.IsNotFound(err) {
|
||||
return err // propagate errors, or nil, i.e. found.
|
||||
}
|
||||
} // else not found in write blockstore; fall through.
|
||||
return bs.read.View(ctx, c, callback)
|
||||
}
|
||||
|
||||
|
@ -282,14 +282,14 @@ func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Co
|
||||
if ss.checkpointExists() {
|
||||
log.Info("found compaction checkpoint; resuming compaction")
|
||||
if err := ss.completeCompaction(); err != nil {
|
||||
markSetEnv.Close() //nolint:errcheck
|
||||
_ = markSetEnv.Close()
|
||||
return nil, xerrors.Errorf("error resuming compaction: %w", err)
|
||||
}
|
||||
}
|
||||
if ss.pruneCheckpointExists() {
|
||||
log.Info("found prune checkpoint; resuming prune")
|
||||
if err := ss.completePrune(); err != nil {
|
||||
markSetEnv.Close() //nolint:errcheck
|
||||
_ = markSetEnv.Close()
|
||||
return nil, xerrors.Errorf("error resuming prune: %w", err)
|
||||
}
|
||||
}
|
||||
|
@ -109,16 +109,13 @@ func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error {
|
||||
// TODO: ok to use hysteresis with no transitions between 30s and 1m?
|
||||
if time.Since(timestamp) < SyncWaitTime {
|
||||
/* Chain in sync */
|
||||
if atomic.CompareAndSwapInt32(&s.outOfSync, 0, 0) {
|
||||
// already in sync, no signaling necessary
|
||||
} else {
|
||||
if !atomic.CompareAndSwapInt32(&s.outOfSync, 0, 0) {
|
||||
// transition from out of sync to in sync
|
||||
s.chainSyncMx.Lock()
|
||||
s.chainSyncFinished = true
|
||||
s.chainSyncCond.Broadcast()
|
||||
s.chainSyncMx.Unlock()
|
||||
}
|
||||
|
||||
} // else already in sync, no signaling necessary
|
||||
}
|
||||
// 2. protect the new tipset(s)
|
||||
s.protectTipSets(apply)
|
||||
|
@ -32,7 +32,7 @@ func init() {
|
||||
CompactionBoundary = 2
|
||||
WarmupBoundary = 0
|
||||
SyncWaitTime = time.Millisecond
|
||||
logging.SetLogLevel("splitstore", "DEBUG")
|
||||
_ = logging.SetLogLevel("splitstore", "DEBUG")
|
||||
}
|
||||
|
||||
func testSplitStore(t *testing.T, cfg *Config) {
|
||||
|
@ -6,7 +6,7 @@ import (
|
||||
"github.com/ipfs/go-cid"
|
||||
)
|
||||
|
||||
var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMetadata{{
|
||||
var EmbeddedBuiltinActorsMetadata = []*BuiltinActorsMetadata{{
|
||||
Network: "butterflynet",
|
||||
Version: 8,
|
||||
|
||||
|
@ -166,5 +166,5 @@ const BootstrapPeerThreshold = 4
|
||||
// As per https://github.com/ethereum-lists/chains
|
||||
const Eip155ChainId = 314
|
||||
|
||||
// we skip checks on message validity in this block to sidestep the zero-bls signature
|
||||
// WhitelistedBlock skips checks on message validity in this block to sidestep the zero-bls signature
|
||||
var WhitelistedBlock = MustParseCid("bafy2bzaceapyg2uyzk7vueh3xccxkuwbz3nxewjyguoxvhx77malc2lzn2ybi")
|
||||
|
@ -124,6 +124,7 @@ const MinimumBaseFee = 100
|
||||
const PackingEfficiencyNum = 4
|
||||
const PackingEfficiencyDenom = 5
|
||||
|
||||
// revive:disable-next-line:exported
|
||||
// Actor consts
|
||||
// TODO: pieceSize unused from actors
|
||||
var MinDealDuration, MaxDealDuration = policy.DealDurationBounds(0)
|
||||
|
@ -25,15 +25,15 @@ func (syncer *Syncer) SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) e
|
||||
}
|
||||
|
||||
hts := syncer.ChainStore().GetHeaviestTipSet()
|
||||
if hts.Equals(ts) {
|
||||
// Current head, no need to switch.
|
||||
} else if anc, err := syncer.store.IsAncestorOf(ctx, ts, hts); err != nil {
|
||||
return xerrors.Errorf("failed to walk the chain when checkpointing: %w", err)
|
||||
} else if anc {
|
||||
// New checkpoint is on the current chain, we definitely have the tipsets.
|
||||
} else if err := syncer.collectChain(ctx, ts, hts, true); err != nil {
|
||||
return xerrors.Errorf("failed to collect chain for checkpoint: %w", err)
|
||||
}
|
||||
if !hts.Equals(ts) {
|
||||
if anc, err := syncer.store.IsAncestorOf(ctx, ts, hts); err != nil {
|
||||
return xerrors.Errorf("failed to walk the chain when checkpointing: %w", err)
|
||||
} else if !anc {
|
||||
if err := syncer.collectChain(ctx, ts, hts, true); err != nil {
|
||||
return xerrors.Errorf("failed to collect chain for checkpoint: %w", err)
|
||||
}
|
||||
} // else new checkpoint is on the current chain, we definitely have the tipsets.
|
||||
} // else current head, no need to switch.
|
||||
|
||||
if err := syncer.ChainStore().SetCheckpoint(ctx, ts); err != nil {
|
||||
return xerrors.Errorf("failed to set the chain checkpoint: %w", err)
|
||||
|
@ -778,9 +778,7 @@ func (mp *MessagePool) Add(ctx context.Context, m *types.SignedMessage) error {
|
||||
_, _ = mp.getStateNonce(ctx, m.Message.From, tmpCurTs)
|
||||
|
||||
mp.curTsLk.Lock()
|
||||
if tmpCurTs == mp.curTs {
|
||||
//with the lock enabled, mp.curTs is the same Ts as we just had, so we know that our computations are cached
|
||||
} else {
|
||||
if tmpCurTs != mp.curTs {
|
||||
//curTs has been updated so we want to cache the new one:
|
||||
tmpCurTs = mp.curTs
|
||||
//we want to release the lock, cache the computations then grab it again
|
||||
@ -789,7 +787,7 @@ func (mp *MessagePool) Add(ctx context.Context, m *types.SignedMessage) error {
|
||||
_, _ = mp.getStateNonce(ctx, m.Message.From, tmpCurTs)
|
||||
mp.curTsLk.Lock()
|
||||
//now that we have the lock, we continue, we could do this as a loop forever, but that's bad to loop forever, and this was added as an optimization and it seems once is enough because the computation < block time
|
||||
}
|
||||
} // else with the lock enabled, mp.curTs is the same Ts as we just had, so we know that our computations are cached
|
||||
|
||||
defer mp.curTsLk.Unlock()
|
||||
|
||||
|
@ -1321,7 +1321,7 @@ func testCompetitiveMessageSelection(t *testing.T, rng *rand.Rand, getPremium fu
|
||||
mustAdd(t, mp, m)
|
||||
}
|
||||
|
||||
logging.SetLogLevel("messagepool", "error")
|
||||
_ = logging.SetLogLevel("messagepool", "error")
|
||||
|
||||
// 1. greedy selection
|
||||
gm, err := mp.selectMessagesGreedy(context.Background(), ts, ts)
|
||||
@ -1414,7 +1414,7 @@ func testCompetitiveMessageSelection(t *testing.T, rng *rand.Rand, getPremium fu
|
||||
t.Logf("Average reward boost: %f", rewardBoost)
|
||||
t.Logf("Average best tq reward: %f", totalBestTQReward/runs/1e12)
|
||||
|
||||
logging.SetLogLevel("messagepool", "info")
|
||||
_ = logging.SetLogLevel("messagepool", "info")
|
||||
|
||||
return capacityBoost, rewardBoost, totalBestTQReward / runs / 1e12
|
||||
}
|
||||
|
@ -184,9 +184,8 @@ func (sr *stateRand) GetBeaconRandomness(ctx context.Context, filecoinEpoch abi.
|
||||
return sr.getBeaconRandomnessV3(ctx, filecoinEpoch)
|
||||
} else if nv == network.Version13 {
|
||||
return sr.getBeaconRandomnessV2(ctx, filecoinEpoch)
|
||||
} else {
|
||||
return sr.getBeaconRandomnessV1(ctx, filecoinEpoch)
|
||||
}
|
||||
return sr.getBeaconRandomnessV1(ctx, filecoinEpoch)
|
||||
}
|
||||
|
||||
func (sr *stateRand) DrawChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, filecoinEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
|
||||
|
@ -392,7 +392,7 @@ func (s *walkScheduler) Wait() error {
|
||||
log.Errorw("error writing to CAR file", "error", err)
|
||||
return errWrite
|
||||
}
|
||||
s.workerTasks.Close() //nolint:errcheck
|
||||
_ = s.workerTasks.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -305,6 +305,7 @@ func (cs *ChainStore) SubHeadChanges(ctx context.Context) chan []*api.HeadChange
|
||||
// Unsubscribe.
|
||||
cs.bestTips.Unsub(subch)
|
||||
|
||||
// revive:disable-next-line:empty-block
|
||||
// Drain the channel.
|
||||
for range subch {
|
||||
}
|
||||
@ -752,7 +753,7 @@ func FlushValidationCache(ctx context.Context, ds dstore.Batching) error {
|
||||
for _, k := range allKeys {
|
||||
if strings.HasPrefix(k.Key, blockValidationCacheKeyPrefix.String()) {
|
||||
delCnt++
|
||||
batch.Delete(ctx, dstore.RawKey(k.Key)) // nolint:errcheck
|
||||
_ = batch.Delete(ctx, dstore.RawKey(k.Key))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -85,7 +85,7 @@ type syncTestUtil struct {
|
||||
}
|
||||
|
||||
func prepSyncTest(t testing.TB, h int) *syncTestUtil {
|
||||
logging.SetLogLevel("*", "INFO")
|
||||
_ = logging.SetLogLevel("*", "INFO")
|
||||
|
||||
g, err := gen.NewGenerator()
|
||||
if err != nil {
|
||||
@ -115,7 +115,7 @@ func prepSyncTest(t testing.TB, h int) *syncTestUtil {
|
||||
}
|
||||
|
||||
func prepSyncTestWithV5Height(t testing.TB, h int, v5height abi.ChainEpoch) *syncTestUtil {
|
||||
logging.SetLogLevel("*", "INFO")
|
||||
_ = logging.SetLogLevel("*", "INFO")
|
||||
|
||||
sched := stmgr.UpgradeSchedule{{
|
||||
// prepare for upgrade.
|
||||
|
@ -927,7 +927,7 @@ func NewEthBlockNumberOrHashFromNumber(number EthUint64) EthBlockNumberOrHash {
|
||||
|
||||
func NewEthBlockNumberOrHashFromHexString(str string) (EthBlockNumberOrHash, error) {
|
||||
// check if block param is a number (decimal or hex)
|
||||
var num EthUint64 = 0
|
||||
var num EthUint64
|
||||
err := num.UnmarshalJSON([]byte(str))
|
||||
if err != nil {
|
||||
return NewEthBlockNumberOrHashFromNumber(0), err
|
||||
|
@ -336,9 +336,7 @@ func (vm *LegacyVM) send(ctx context.Context, msg *types.Message, parent *Runtim
|
||||
return nil, aerrors.Wrapf(err, "could not create account")
|
||||
}
|
||||
toActor = a
|
||||
if vm.networkVersion <= network.Version3 {
|
||||
// Leave the rt.Message as is
|
||||
} else {
|
||||
if vm.networkVersion > network.Version3 {
|
||||
nmsg := Message{
|
||||
msg: types.Message{
|
||||
To: aid,
|
||||
@ -346,9 +344,8 @@ func (vm *LegacyVM) send(ctx context.Context, msg *types.Message, parent *Runtim
|
||||
Value: rt.Message.ValueReceived(),
|
||||
},
|
||||
}
|
||||
|
||||
rt.Message = &nmsg
|
||||
}
|
||||
} // else leave the rt.Message as is
|
||||
} else {
|
||||
return nil, aerrors.Escalate(err, "getting actor")
|
||||
}
|
||||
|
@ -24,7 +24,7 @@ type BackupApiFn func(ctx *cli.Context) (BackupAPI, jsonrpc.ClientCloser, error)
|
||||
|
||||
func BackupCmd(repoFlag string, rt repo.RepoType, getApi BackupApiFn) *cli.Command {
|
||||
var offlineBackup = func(cctx *cli.Context) error {
|
||||
logging.SetLogLevel("badger", "ERROR") // nolint:errcheck
|
||||
_ = logging.SetLogLevel("badger", "ERROR")
|
||||
|
||||
repoPath := cctx.String(repoFlag)
|
||||
r, err := repo.NewFS(repoPath)
|
||||
|
@ -574,7 +574,7 @@ func interactiveDeal(cctx *cli.Context) error {
|
||||
cs := readline.NewCancelableStdin(afmt.Stdin)
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
cs.Close() // nolint:errcheck
|
||||
_ = cs.Close()
|
||||
}()
|
||||
|
||||
rl := bufio.NewReader(cs)
|
||||
@ -2327,7 +2327,7 @@ func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChann
|
||||
for _, channel := range sendingChannels {
|
||||
w.Write(toChannelOutput("Sending To", channel, verbose))
|
||||
}
|
||||
w.Flush(out) //nolint:errcheck
|
||||
_ = w.Flush(out)
|
||||
|
||||
fmt.Fprintf(out, "\nReceiving Channels\n\n")
|
||||
w = tablewriter.New(tablewriter.Col("ID"),
|
||||
@ -2341,7 +2341,7 @@ func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChann
|
||||
for _, channel := range receivingChannels {
|
||||
w.Write(toChannelOutput("Receiving From", channel, verbose))
|
||||
}
|
||||
w.Flush(out) //nolint:errcheck
|
||||
_ = w.Flush(out)
|
||||
}
|
||||
|
||||
func channelStatusString(status datatransfer.Status) string {
|
||||
|
@ -131,9 +131,8 @@ func infoCmdAct(cctx *cli.Context) error {
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "actor not found") {
|
||||
continue
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
mbLockedSum = big.Add(mbLockedSum, mbal.Locked)
|
||||
mbAvailableSum = big.Add(mbAvailableSum, mbal.Escrow)
|
||||
|
@ -5,5 +5,5 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
logging.SetLogLevel("watchdog", "ERROR")
|
||||
_ = logging.SetLogLevel("watchdog", "ERROR")
|
||||
}
|
||||
|
@ -122,7 +122,7 @@ func printChecks(printer io.Writer, checkGroups [][]api.MessageCheckStatus, prot
|
||||
func askUser(printer io.Writer, q string, def bool) bool {
|
||||
var resp string
|
||||
fmt.Fprint(printer, q)
|
||||
fmt.Scanln(&resp)
|
||||
_, _ = fmt.Scanln(&resp)
|
||||
resp = strings.ToLower(resp)
|
||||
if len(resp) == 0 {
|
||||
return def
|
||||
|
@ -13,7 +13,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
)
|
||||
|
||||
var baseText string = `
|
||||
var baseText = `
|
||||
[Subsystems]
|
||||
# EnableWindowPost enables window post to be executed on this curio instance. Each machine in the cluster
|
||||
# with WindowPoSt enabled will also participate in the window post scheduler. It is possible to have multiple
|
||||
|
@ -270,7 +270,7 @@ func (c *CMD) startWorker(qpsTicker *time.Ticker) {
|
||||
|
||||
start := time.Now()
|
||||
|
||||
var statusCode int = 0
|
||||
var statusCode int
|
||||
|
||||
arr := strings.Fields(c.cmd)
|
||||
|
||||
|
@ -93,7 +93,7 @@ type Commit2In struct {
|
||||
}
|
||||
|
||||
func main() {
|
||||
logging.SetLogLevel("*", "INFO")
|
||||
_ = logging.SetLogLevel("*", "INFO")
|
||||
|
||||
log.Info("Starting lotus-bench")
|
||||
|
||||
|
@ -88,7 +88,7 @@ func (r *Reporter) Print(elapsed time.Duration, w io.Writer) {
|
||||
return r.latencies[i] < r.latencies[j]
|
||||
})
|
||||
|
||||
var totalLatency int64 = 0
|
||||
var totalLatency int64
|
||||
for _, latency := range r.latencies {
|
||||
totalLatency += latency
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ import (
|
||||
var log = logging.Logger("main")
|
||||
|
||||
func main() {
|
||||
logging.SetLogLevel("*", "INFO")
|
||||
_ = logging.SetLogLevel("*", "INFO")
|
||||
|
||||
log.Info("Starting fountain")
|
||||
|
||||
|
@ -25,7 +25,7 @@ type CidWindow [][]cid.Cid
|
||||
var log = logging.Logger("lotus-health")
|
||||
|
||||
func main() {
|
||||
logging.SetLogLevel("*", "INFO")
|
||||
_ = logging.SetLogLevel("*", "INFO")
|
||||
|
||||
log.Info("Starting health agent")
|
||||
|
||||
|
@ -151,7 +151,7 @@ func workersCmd(sealing bool) *cli.Command {
|
||||
ramTotal := stat.Info.Resources.MemPhysical
|
||||
ramTasks := stat.MemUsedMin
|
||||
ramUsed := stat.Info.Resources.MemUsed
|
||||
var ramReserved uint64 = 0
|
||||
var ramReserved uint64
|
||||
if ramUsed > ramTasks {
|
||||
ramReserved = ramUsed - ramTasks
|
||||
}
|
||||
@ -167,7 +167,7 @@ func workersCmd(sealing bool) *cli.Command {
|
||||
vmemTotal := stat.Info.Resources.MemPhysical + stat.Info.Resources.MemSwap
|
||||
vmemTasks := stat.MemUsedMax
|
||||
vmemUsed := stat.Info.Resources.MemUsed + stat.Info.Resources.MemSwapUsed
|
||||
var vmemReserved uint64 = 0
|
||||
var vmemReserved uint64
|
||||
if vmemUsed > vmemTasks {
|
||||
vmemReserved = vmemUsed - vmemTasks
|
||||
}
|
||||
|
@ -1305,14 +1305,11 @@ var sectorsBatchingPendingCommit = &cli.Command{
|
||||
return cctx.Command.Action(cctx)
|
||||
} else if userInput == "no" {
|
||||
return nil
|
||||
} else {
|
||||
fmt.Println("Invalid input. Please answer with 'yes' or 'no'.")
|
||||
return nil
|
||||
}
|
||||
|
||||
} else {
|
||||
fmt.Println("No sectors queued to be committed")
|
||||
fmt.Println("Invalid input. Please answer with 'yes' or 'no'.")
|
||||
return nil
|
||||
}
|
||||
fmt.Println("No sectors queued to be committed")
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@ -1384,14 +1381,11 @@ var sectorsBatchingPendingPreCommit = &cli.Command{
|
||||
return cctx.Command.Action(cctx)
|
||||
} else if userInput == "no" {
|
||||
return nil
|
||||
} else {
|
||||
fmt.Println("Invalid input. Please answer with 'yes' or 'no'.")
|
||||
return nil
|
||||
}
|
||||
|
||||
} else {
|
||||
fmt.Println("No sectors queued to be committed")
|
||||
fmt.Println("Invalid input. Please answer with 'yes' or 'no'.")
|
||||
return nil
|
||||
}
|
||||
fmt.Println("No sectors queued to be committed")
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
@ -26,7 +26,7 @@ import (
|
||||
var log = logging.Logger("lotus-seed")
|
||||
|
||||
func main() {
|
||||
logging.SetLogLevel("*", "INFO")
|
||||
_ = logging.SetLogLevel("*", "INFO")
|
||||
|
||||
local := []*cli.Command{
|
||||
genesisCmd,
|
||||
|
@ -685,7 +685,7 @@ var chainPledgeCmd = &cli.Command{
|
||||
},
|
||||
ArgsUsage: "[stateroot epoch]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
logging.SetLogLevel("badger", "ERROR")
|
||||
_ = logging.SetLogLevel("badger", "ERROR")
|
||||
ctx := context.TODO()
|
||||
|
||||
if !cctx.Args().Present() {
|
||||
@ -916,13 +916,13 @@ var fillBalancesCmd = &cli.Command{
|
||||
}
|
||||
|
||||
w := csv.NewWriter(os.Stdout)
|
||||
w.Write(append([]string{"Wallet Address"}, datestrs...)) // nolint:errcheck
|
||||
_ = w.Write(append([]string{"Wallet Address"}, datestrs...))
|
||||
for i := 0; i < len(addrs); i++ {
|
||||
row := []string{addrs[i].String()}
|
||||
for _, b := range balances[i] {
|
||||
row = append(row, types.FIL(b).String())
|
||||
}
|
||||
w.Write(row) // nolint:errcheck
|
||||
_ = w.Write(row)
|
||||
}
|
||||
w.Flush()
|
||||
return nil
|
||||
|
@ -57,7 +57,7 @@ var datastoreListCmd = &cli.Command{
|
||||
},
|
||||
ArgsUsage: "[namespace prefix]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
logging.SetLogLevel("badger", "ERROR") // nolint:errcheck
|
||||
_ = logging.SetLogLevel("badger", "ERROR")
|
||||
|
||||
r, err := repo.NewFS(cctx.String("repo"))
|
||||
if err != nil {
|
||||
@ -123,7 +123,7 @@ var datastoreGetCmd = &cli.Command{
|
||||
},
|
||||
ArgsUsage: "[namespace key]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
logging.SetLogLevel("badger", "ERROR") // nolint:errcheck
|
||||
_ = logging.SetLogLevel("badger", "ERROR")
|
||||
|
||||
r, err := repo.NewFS(cctx.String("repo"))
|
||||
if err != nil {
|
||||
|
@ -253,9 +253,8 @@ var diffStateTrees = &cli.Command{
|
||||
if ok {
|
||||
diff(stateA, stateB)
|
||||
continue
|
||||
} else {
|
||||
fmt.Printf(" actor does not exist in second state-tree (%s)\n", rootB)
|
||||
}
|
||||
fmt.Printf(" actor does not exist in second state-tree (%s)\n", rootB)
|
||||
fmt.Println()
|
||||
delete(changedB, addr)
|
||||
}
|
||||
@ -265,9 +264,8 @@ var diffStateTrees = &cli.Command{
|
||||
if ok {
|
||||
diff(stateA, stateB)
|
||||
continue
|
||||
} else {
|
||||
fmt.Printf(" actor does not exist in first state-tree (%s)\n", rootA)
|
||||
}
|
||||
fmt.Printf(" actor does not exist in first state-tree (%s)\n", rootA)
|
||||
fmt.Println()
|
||||
}
|
||||
return nil
|
||||
|
@ -64,7 +64,7 @@ var itestdCmd = &cli.Command{
|
||||
cs := readline.NewCancelableStdin(os.Stdin)
|
||||
go func() {
|
||||
<-cctx.Done()
|
||||
cs.Close() // nolint:errcheck
|
||||
_ = cs.Close()
|
||||
}()
|
||||
|
||||
rl := bufio.NewReader(cs)
|
||||
|
@ -214,7 +214,7 @@ var marketExportDatastoreCmd = &cli.Command{
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
logging.SetLogLevel("badger", "ERROR") // nolint:errcheck
|
||||
_ = logging.SetLogLevel("badger", "ERROR")
|
||||
|
||||
// If the backup dir is not specified, just use the OS temp dir
|
||||
backupDir := cctx.String("backup-dir")
|
||||
@ -332,7 +332,7 @@ var marketImportDatastoreCmd = &cli.Command{
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
logging.SetLogLevel("badger", "ERROR") // nolint:errcheck
|
||||
_ = logging.SetLogLevel("badger", "ERROR")
|
||||
|
||||
backupPath := cctx.String("backup-path")
|
||||
|
||||
|
@ -67,7 +67,7 @@ var rpcCmd = &cli.Command{
|
||||
cs := readline.NewCancelableStdin(afmt.Stdin)
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
cs.Close() // nolint:errcheck
|
||||
_ = cs.Close()
|
||||
}()
|
||||
|
||||
send := func(method, params string) error {
|
||||
@ -148,9 +148,8 @@ var rpcCmd = &cli.Command{
|
||||
if err == readline.ErrInterrupt {
|
||||
if len(line) == 0 {
|
||||
break
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
continue
|
||||
} else if err == io.EOF {
|
||||
break
|
||||
}
|
||||
|
@ -723,7 +723,7 @@ to reduce the number of decode operations performed by caching the decoded objec
|
||||
|
||||
go func() {
|
||||
// error is check later
|
||||
eg.Wait() //nolint:errcheck
|
||||
_ = eg.Wait()
|
||||
close(results)
|
||||
}()
|
||||
|
||||
|
@ -274,9 +274,8 @@ type AbortWithArgs struct {
|
||||
func (a Actor) AbortWith(rt runtime2.Runtime, args *AbortWithArgs) *abi.EmptyValue {
|
||||
if args.Uncontrolled { // uncontrolled abort: directly panic
|
||||
panic(args.Message)
|
||||
} else {
|
||||
rt.Abortf(args.Code, args.Message)
|
||||
}
|
||||
rt.Abortf(args.Code, args.Message)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -390,9 +390,8 @@ func ServeCurioMarketRPC(db *harmonydb.DB, full api.FullNode, maddr address.Addr
|
||||
}
|
||||
if !taskResult {
|
||||
return api.SectorOffset{}, xerrors.Errorf("park-piece task failed: %s", taskError)
|
||||
} else {
|
||||
return api.SectorOffset{}, xerrors.Errorf("park task succeeded but piece is not marked as complete")
|
||||
}
|
||||
return api.SectorOffset{}, xerrors.Errorf("park task succeeded but piece is not marked as complete")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -202,7 +202,7 @@ func (t *WdPostTask) DoPartition(ctx context.Context, ts *types.TipSet, maddr ad
|
||||
Proofs: postOut,
|
||||
ChallengedSectors: sinfos,
|
||||
Prover: abi.ActorID(mid),
|
||||
}); err != nil {
|
||||
}); err != nil { // revive:disable-line:empty-block
|
||||
/*log.Errorw("window post verification failed", "post", postOut, "error", err)
|
||||
time.Sleep(5 * time.Second)
|
||||
continue todo retry loop */
|
||||
@ -337,7 +337,7 @@ func (t *WdPostTask) sectorsForProof(ctx context.Context, maddr address.Address,
|
||||
}
|
||||
|
||||
func (t *WdPostTask) generateWindowPoSt(ctx context.Context, ppt abi.RegisteredPoStProof, minerID abi.ActorID, sectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, []abi.SectorID, error) {
|
||||
var retErr error = nil
|
||||
var retErr error
|
||||
randomness[31] &= 0x3f
|
||||
|
||||
out := make([]proof.PoStProof, 0)
|
||||
|
@ -667,7 +667,7 @@ func (t *WinPostTask) computeTicket(ctx context.Context, maddr address.Address,
|
||||
|
||||
func randTimeOffset(width time.Duration) time.Duration {
|
||||
buf := make([]byte, 8)
|
||||
rand.Reader.Read(buf) //nolint:errcheck
|
||||
_, _ = rand.Reader.Read(buf)
|
||||
val := time.Duration(binary.BigEndian.Uint64(buf) % uint64(width))
|
||||
|
||||
return val - (width / 2)
|
||||
|
@ -90,7 +90,7 @@ func (gw *Node) checkEthBlockParam(ctx context.Context, blkParam ethtypes.EthBlo
|
||||
return err
|
||||
}
|
||||
|
||||
var num ethtypes.EthUint64 = 0
|
||||
var num ethtypes.EthUint64
|
||||
if blkParam.PredefinedBlock != nil {
|
||||
if *blkParam.PredefinedBlock == "earliest" {
|
||||
return fmt.Errorf("block param \"earliest\" is not supported")
|
||||
|
@ -9,7 +9,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
)
|
||||
|
||||
var tmpl *template.Template = template.Must(template.New("actor-metadata").Parse(`
|
||||
var tmpl = template.Must(template.New("actor-metadata").Parse(`
|
||||
// WARNING: This file has automatically been generated
|
||||
|
||||
package build
|
||||
|
@ -579,7 +579,7 @@ func TestTxReceiptBloom(t *testing.T) {
|
||||
kit.MockProofs(),
|
||||
kit.ThroughRPC())
|
||||
ens.InterconnectAll().BeginMining(blockTime)
|
||||
logging.SetLogLevel("fullnode", "DEBUG")
|
||||
_ = logging.SetLogLevel("fullnode", "DEBUG")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
||||
defer cancel()
|
||||
|
@ -125,7 +125,7 @@ func TestEstimateInclusion(t *testing.T) {
|
||||
|
||||
// Mutate the last byte to get a new address of the same length.
|
||||
toBytes := msg.To.Bytes()
|
||||
toBytes[len(toBytes)-1] += 1 //nolint:golint
|
||||
toBytes[len(toBytes)-1] += 1 // revive:disable-line:increment-decrement
|
||||
newAddr, err := address.NewFromBytes(toBytes)
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -158,7 +158,7 @@ func TestEstimateInclusion(t *testing.T) {
|
||||
|
||||
msg.Nonce = 2
|
||||
msg.To = msg.From
|
||||
msg.GasLimit -= 1 //nolint:golint
|
||||
msg.GasLimit -= 1 // revive:disable-line:increment-decrement
|
||||
|
||||
smsg, err = client.WalletSignMessage(ctx, client.DefaultKey.Address, msg)
|
||||
require.NoError(t, err)
|
||||
|
@ -32,7 +32,7 @@ func withDbSetup(t *testing.T, f func(*kit.TestMiner)) {
|
||||
kit.MockProofs(),
|
||||
kit.WithSectorIndexDB(),
|
||||
)
|
||||
logging.SetLogLevel("harmonytask", "debug")
|
||||
_ = logging.SetLogLevel("harmonytask", "debug")
|
||||
|
||||
f(miner)
|
||||
}
|
||||
|
@ -141,7 +141,7 @@ func createRandomFile(rseed, size int) ([]byte, string, error) {
|
||||
size = 1600
|
||||
}
|
||||
data := make([]byte, size)
|
||||
rand.New(rand.NewSource(int64(rseed))).Read(data)
|
||||
_, _ = rand.New(rand.NewSource(int64(rseed))).Read(data)
|
||||
|
||||
dir, err := os.MkdirTemp(os.TempDir(), "test-make-deal-")
|
||||
if err != nil {
|
||||
|
@ -709,9 +709,9 @@ func (n *Ensemble) Start() *Ensemble {
|
||||
|
||||
var mineBlock = make(chan lotusminer.MineReq)
|
||||
|
||||
copy := *m.FullNode
|
||||
copy.FullNode = modules.MakeUuidWrapper(copy.FullNode)
|
||||
m.FullNode = ©
|
||||
minerCopy := *m.FullNode
|
||||
minerCopy.FullNode = modules.MakeUuidWrapper(minerCopy.FullNode)
|
||||
m.FullNode = &minerCopy
|
||||
|
||||
opts := []node.Option{
|
||||
node.StorageMiner(&m.StorageMiner, cfg.Subsystems),
|
||||
|
@ -413,9 +413,8 @@ func (g *Garbager) Exists(ctx context.Context, c cid.Cid) bool {
|
||||
} else if err != nil {
|
||||
g.t.Fatalf("ChainReadObj failure on existence check: %s", err)
|
||||
return false // unreachable
|
||||
} else {
|
||||
return true
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (g *Garbager) newPeerID(ctx context.Context) abi.ChainEpoch {
|
||||
|
@ -46,8 +46,8 @@ func TestManageInvalidNFds(t *testing.T) {
|
||||
|
||||
t.Logf("setting ulimit to %d, max %d, cur %d", value, rlimit.Max, rlimit.Cur)
|
||||
|
||||
if changed, new, err := ManageFdLimit(); err == nil {
|
||||
t.Errorf("ManageFdLimit should return an error: changed %t, new: %d", changed, new)
|
||||
if changed, isNew, err := ManageFdLimit(); err == nil {
|
||||
t.Errorf("ManageFdLimit should return an error: changed %t, new: %d", changed, isNew)
|
||||
} else if err != nil {
|
||||
flag := strings.Contains(err.Error(),
|
||||
"failed to raise ulimit to LOTUS_FD_MAX")
|
||||
|
@ -201,7 +201,7 @@ func (m *minerAPI) GetUnpaddedCARSize(ctx context.Context, pieceCid cid.Cid) (ui
|
||||
return 0, xerrors.Errorf("no storage deals found for piece %s", pieceCid)
|
||||
}
|
||||
|
||||
len := pieceInfo.Deals[0].Length
|
||||
l := pieceInfo.Deals[0].Length
|
||||
|
||||
return uint64(len), nil
|
||||
return uint64(l), nil
|
||||
}
|
||||
|
@ -129,9 +129,9 @@ func TestLotusAccessorGetUnpaddedCARSize(t *testing.T) {
|
||||
|
||||
// Check that the data length is correct
|
||||
//stm: @MARKET_DAGSTORE_GET_UNPADDED_CAR_SIZE_001
|
||||
len, err := api.GetUnpaddedCARSize(ctx, cid1)
|
||||
l, err := api.GetUnpaddedCARSize(ctx, cid1)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 10, len)
|
||||
require.EqualValues(t, 10, l)
|
||||
}
|
||||
|
||||
func TestThrottle(t *testing.T) {
|
||||
|
@ -53,7 +53,7 @@ type waitFunc func(ctx context.Context, baseTime uint64) (func(bool, abi.ChainEp
|
||||
|
||||
func randTimeOffset(width time.Duration) time.Duration {
|
||||
buf := make([]byte, 8)
|
||||
rand.Reader.Read(buf) //nolint:errcheck
|
||||
_, _ = rand.Reader.Read(buf)
|
||||
val := time.Duration(binary.BigEndian.Uint64(buf) % uint64(width))
|
||||
|
||||
return val - (width / 2)
|
||||
|
@ -48,7 +48,7 @@ func NewLiveHandler(api lapi.FullNode) *HealthHandler {
|
||||
var (
|
||||
countdown int32
|
||||
headCh <-chan []*lapi.HeadChange
|
||||
backoff time.Duration = minbackoff
|
||||
backoff = minbackoff
|
||||
err error
|
||||
)
|
||||
minutely := time.NewTicker(time.Minute)
|
||||
@ -66,10 +66,9 @@ func NewLiveHandler(api lapi.FullNode) *HealthHandler {
|
||||
}
|
||||
backoff = nextbackoff
|
||||
continue
|
||||
} else {
|
||||
healthlog.Infof("started ChainNotify channel")
|
||||
backoff = minbackoff
|
||||
}
|
||||
healthlog.Infof("started ChainNotify channel")
|
||||
backoff = minbackoff
|
||||
}
|
||||
select {
|
||||
case <-minutely.C:
|
||||
|
@ -644,8 +644,8 @@ func (a *ChainAPI) ChainExport(ctx context.Context, nroots abi.ChainEpoch, skipo
|
||||
bw := bufio.NewWriterSize(w, 1<<20)
|
||||
|
||||
err := a.Chain.Export(ctx, ts, nroots, skipoldmsgs, bw)
|
||||
bw.Flush() //nolint:errcheck // it is a write to a pipe
|
||||
w.CloseWithError(err) //nolint:errcheck // it is a pipe
|
||||
_ = bw.Flush() // it is a write to a pipe
|
||||
_ = w.CloseWithError(err) // it is a pipe
|
||||
}()
|
||||
|
||||
go func() {
|
||||
|
@ -92,9 +92,8 @@ func getTipsetByEthBlockNumberOrHash(ctx context.Context, chain *store.ChainStor
|
||||
return nil, fmt.Errorf("cannot get parent tipset")
|
||||
}
|
||||
return parent, nil
|
||||
} else {
|
||||
return nil, fmt.Errorf("unknown predefined block %s", *predefined)
|
||||
}
|
||||
return nil, fmt.Errorf("unknown predefined block %s", *predefined)
|
||||
}
|
||||
|
||||
if blkParam.BlockNumber != nil {
|
||||
@ -298,7 +297,7 @@ func executeTipset(ctx context.Context, ts *types.TipSet, cs *store.ChainStore,
|
||||
const errorFunctionSelector = "\x08\xc3\x79\xa0" // Error(string)
|
||||
const panicFunctionSelector = "\x4e\x48\x7b\x71" // Panic(uint256)
|
||||
// Eth ABI (solidity) panic codes.
|
||||
var panicErrorCodes map[uint64]string = map[uint64]string{
|
||||
var panicErrorCodes = map[uint64]string{
|
||||
0x00: "Panic()",
|
||||
0x01: "Assert()",
|
||||
0x11: "ArithmeticOverflow()",
|
||||
@ -398,19 +397,19 @@ func lookupEthAddress(addr address.Address, st *state.StateTree) (ethtypes.EthAd
|
||||
}
|
||||
|
||||
// Lookup on the target actor and try to get an f410 address.
|
||||
if actor, err := st.GetActor(idAddr); errors.Is(err, types.ErrActorNotFound) {
|
||||
// Not found -> use a masked ID address
|
||||
} else if err != nil {
|
||||
// Any other error -> fail.
|
||||
return ethtypes.EthAddress{}, err
|
||||
} else if actor.Address == nil {
|
||||
// No delegated address -> use masked ID address.
|
||||
} else if ethAddr, err := ethtypes.EthAddressFromFilecoinAddress(*actor.Address); err == nil && !ethAddr.IsMaskedID() {
|
||||
// Conversable into an eth address, use it.
|
||||
return ethAddr, nil
|
||||
}
|
||||
if actor, err := st.GetActor(idAddr); !errors.Is(err, types.ErrActorNotFound) {
|
||||
if err != nil {
|
||||
// Any other error -> fail.
|
||||
return ethtypes.EthAddress{}, err
|
||||
}
|
||||
if actor.Address != nil {
|
||||
if ethAddr, err := ethtypes.EthAddressFromFilecoinAddress(*actor.Address); err == nil && !ethAddr.IsMaskedID() {
|
||||
// Conversable into an eth address, use it.
|
||||
return ethAddr, nil
|
||||
}
|
||||
} // else no delegated address -> use masked ID address.
|
||||
} // else not found -> use a masked ID address
|
||||
|
||||
// Otherwise, use the masked address.
|
||||
return ethtypes.EthAddressFromFilecoinAddress(idAddr)
|
||||
}
|
||||
|
||||
@ -456,9 +455,9 @@ func ethTxHashFromSignedMessage(smsg *types.SignedMessage) (ethtypes.EthHash, er
|
||||
return tx.TxHash()
|
||||
} else if smsg.Signature.Type == crypto.SigTypeSecp256k1 {
|
||||
return ethtypes.EthHashFromCid(smsg.Cid())
|
||||
} else { // BLS message
|
||||
return ethtypes.EthHashFromCid(smsg.Message.Cid())
|
||||
}
|
||||
// else BLS message
|
||||
return ethtypes.EthHashFromCid(smsg.Message.Cid())
|
||||
}
|
||||
|
||||
func newEthTxFromSignedMessage(smsg *types.SignedMessage, st *state.StateTree) (ethtypes.EthTx, error) {
|
||||
@ -817,8 +816,8 @@ func encodeAsABIHelper(param1 uint64, param2 uint64, data []byte) []byte {
|
||||
if len(data)%EVM_WORD_SIZE != 0 {
|
||||
totalWords++
|
||||
}
|
||||
len := totalWords * EVM_WORD_SIZE
|
||||
buf := make([]byte, len)
|
||||
l := totalWords * EVM_WORD_SIZE
|
||||
buf := make([]byte, l)
|
||||
offset := 0
|
||||
// Below, we use copy instead of "appending" to preserve all the zero padding.
|
||||
for _, arg := range staticArgs {
|
||||
|
@ -965,9 +965,8 @@ func (a *StateAPI) StateComputeDataCID(ctx context.Context, maddr address.Addres
|
||||
return a.stateComputeDataCIDv1(ctx, maddr, sectorType, deals, tsk)
|
||||
} else if nv < network.Version21 {
|
||||
return a.stateComputeDataCIDv2(ctx, maddr, sectorType, deals, tsk)
|
||||
} else {
|
||||
return a.stateComputeDataCIDv3(ctx, maddr, sectorType, deals, tsk)
|
||||
}
|
||||
return a.stateComputeDataCIDv3(ctx, maddr, sectorType, deals, tsk)
|
||||
}
|
||||
|
||||
func (a *StateAPI) stateComputeDataCIDv1(ctx context.Context, maddr address.Address, sectorType abi.RegisteredSealProof, deals []abi.DealID, tsk types.TipSetKey) (cid.Cid, error) {
|
||||
|
@ -594,7 +594,7 @@ func (trw *tracerWrapper) Trace(evt *pubsub_pb.TraceEvent) {
|
||||
msgsRPC := evt.GetRecvRPC().GetMeta().GetMessages()
|
||||
|
||||
// check if any of the messages we are sending belong to a trackable topic
|
||||
var validTopic bool = false
|
||||
var validTopic = false
|
||||
for _, topic := range msgsRPC {
|
||||
if trw.traceMessage(topic.GetTopic()) {
|
||||
validTopic = true
|
||||
@ -602,7 +602,7 @@ func (trw *tracerWrapper) Trace(evt *pubsub_pb.TraceEvent) {
|
||||
}
|
||||
}
|
||||
// track if the Iwant / Ihave messages are from a valid Topic
|
||||
var validIhave bool = false
|
||||
var validIhave = false
|
||||
for _, msgs := range ihave {
|
||||
if trw.traceMessage(msgs.GetTopic()) {
|
||||
validIhave = true
|
||||
@ -630,7 +630,7 @@ func (trw *tracerWrapper) Trace(evt *pubsub_pb.TraceEvent) {
|
||||
msgsRPC := evt.GetSendRPC().GetMeta().GetMessages()
|
||||
|
||||
// check if any of the messages we are sending belong to a trackable topic
|
||||
var validTopic bool = false
|
||||
var validTopic = false
|
||||
for _, topic := range msgsRPC {
|
||||
if trw.traceMessage(topic.GetTopic()) {
|
||||
validTopic = true
|
||||
@ -638,7 +638,7 @@ func (trw *tracerWrapper) Trace(evt *pubsub_pb.TraceEvent) {
|
||||
}
|
||||
}
|
||||
// track if the Iwant / Ihave messages are from a valid Topic
|
||||
var validIhave bool = false
|
||||
var validIhave = false
|
||||
for _, msgs := range ihave {
|
||||
if trw.traceMessage(msgs.GetTopic()) {
|
||||
validIhave = true
|
||||
|
@ -38,7 +38,7 @@ func ResourceManager(connMgrHi uint) func(lc fx.Lifecycle, repo repo.LockedRepo)
|
||||
|
||||
log.Info("libp2p resource manager is enabled")
|
||||
// enable debug logs for rcmgr
|
||||
logging.SetLogLevel("rcmgr", "debug")
|
||||
_ = logging.SetLogLevel("rcmgr", "debug")
|
||||
|
||||
// Adjust default defaultLimits
|
||||
// - give it more memory, up to 4G, min of 1G
|
||||
|
@ -288,7 +288,7 @@ func (fsr *FsRepo) Init(t RepoType) error {
|
||||
}
|
||||
|
||||
log.Infof("Initializing repo at '%s'", fsr.path)
|
||||
err = os.MkdirAll(fsr.path, 0755) //nolint: gosec
|
||||
err = os.MkdirAll(fsr.path, 0755)
|
||||
if err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
|
@ -15,7 +15,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
logging.SetLogLevel("stores", "DEBUG")
|
||||
_ = logging.SetLogLevel("stores", "DEBUG")
|
||||
}
|
||||
|
||||
func newTestStorage() storiface.StorageInfo {
|
||||
|
@ -37,7 +37,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
logging.SetLogLevel("*", "DEBUG") //nolint: errcheck
|
||||
_ = logging.SetLogLevel("*", "DEBUG")
|
||||
}
|
||||
|
||||
var sealProofType = abi.RegisteredSealProof_StackedDrg2KiBV1
|
||||
|
@ -108,7 +108,7 @@ func dedupeSectorInfo(sectorInfo []proof.ExtendedSectorInfo) []proof.ExtendedSec
|
||||
}
|
||||
|
||||
func (m *Manager) generateWindowPoSt(ctx context.Context, minerID abi.ActorID, ppt abi.RegisteredPoStProof, sectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, []abi.SectorID, error) {
|
||||
var retErr error = nil
|
||||
var retErr error
|
||||
randomness[31] &= 0x3f
|
||||
|
||||
out := make([]proof.PoStProof, 0)
|
||||
|
@ -56,7 +56,7 @@ func (ps *poStScheduler) MaybeAddWorker(wid storiface.WorkerID, tasks map[sealta
|
||||
func (ps *poStScheduler) delWorker(wid storiface.WorkerID) *WorkerHandle {
|
||||
ps.lk.Lock()
|
||||
defer ps.lk.Unlock()
|
||||
var w *WorkerHandle = nil
|
||||
var w *WorkerHandle
|
||||
if wh, ok := ps.workers[wid]; ok {
|
||||
w = wh
|
||||
delete(ps.workers, wid)
|
||||
|
@ -84,10 +84,10 @@ func (m *mockAPI) setDeadline(di *dline.Info) {
|
||||
}
|
||||
|
||||
func (m *mockAPI) getDeadline(currentEpoch abi.ChainEpoch) *dline.Info {
|
||||
close := minertypes.WPoStChallengeWindow - 1
|
||||
closeEpoch := minertypes.WPoStChallengeWindow - 1
|
||||
dlIdx := uint64(0)
|
||||
for close < currentEpoch {
|
||||
close += minertypes.WPoStChallengeWindow
|
||||
for closeEpoch < currentEpoch {
|
||||
closeEpoch += minertypes.WPoStChallengeWindow
|
||||
dlIdx++
|
||||
}
|
||||
return NewDeadlineInfo(0, dlIdx, currentEpoch)
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
var RecoveringSectorLimit uint64 = 0
|
||||
var RecoveringSectorLimit uint64
|
||||
|
||||
func init() {
|
||||
if rcl := os.Getenv("LOTUS_RECOVERING_SECTOR_LIMIT"); rcl != "" {
|
||||
|
Loading…
Reference in New Issue
Block a user