lints and non parallel
This commit is contained in:
parent
7ec9eb0a70
commit
4b8aa53806
@ -847,7 +847,8 @@ var NetStatCmd = &cli.Command{
|
||||
})
|
||||
|
||||
for _, stat := range stats {
|
||||
printScope(&stat.stat, name+stat.name)
|
||||
tmp := stat.stat
|
||||
printScope(&tmp, name+stat.name)
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -559,7 +559,8 @@ var provingCheckProvableCmd = &cli.Command{
|
||||
for parIdx, par := range partitions {
|
||||
sectors := make(map[abi.SectorNumber]struct{})
|
||||
|
||||
sectorInfos, err := api.StateMinerSectors(ctx, addr, &par.LiveSectors, types.EmptyTSK)
|
||||
tmp := par.LiveSectors
|
||||
sectorInfos, err := api.StateMinerSectors(ctx, addr, &tmp, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -65,7 +65,7 @@ It will not send any messages to the chain. Since it can compute any deadline, o
|
||||
Action: func(cctx *cli.Context) error {
|
||||
|
||||
ctx := context.Background()
|
||||
deps, err := getDeps(cctx, ctx)
|
||||
deps, err := getDeps(ctx, cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -134,7 +134,7 @@ var runCmd = &cli.Command{
|
||||
}
|
||||
|
||||
shutdownChan := make(chan struct{})
|
||||
deps, err := getDeps(cctx, ctx)
|
||||
deps, err := getDeps(ctx, cctx)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
@ -267,7 +267,7 @@ type Deps struct {
|
||||
listenAddr string
|
||||
}
|
||||
|
||||
func getDeps(cctx *cli.Context, ctx context.Context) (*Deps, error) {
|
||||
func getDeps(ctx context.Context, cctx *cli.Context) (*Deps, error) {
|
||||
// Open repo
|
||||
|
||||
repoPath := cctx.String(FlagRepoPath)
|
||||
@ -319,7 +319,7 @@ func getDeps(cctx *cli.Context, ctx context.Context) (*Deps, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer j.Close()
|
||||
defer func() { _ = j.Close() }()
|
||||
|
||||
full, fullCloser, err := cliutil.GetFullNodeAPIV1LotusProvider(cctx, cfg.Apis.ChainApiInfo)
|
||||
if err != nil {
|
||||
|
@ -157,7 +157,8 @@ var terminationsCmd = &cli.Command{
|
||||
}
|
||||
|
||||
for _, t := range termParams.Terminations {
|
||||
sectors, err := minerSt.LoadSectors(&t.Sectors)
|
||||
tmp := t.Sectors
|
||||
sectors, err := minerSt.LoadSectors(&tmp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -78,7 +78,7 @@ func init() {
|
||||
}
|
||||
|
||||
func TestHarmonyTasks(t *testing.T) {
|
||||
t.Parallel()
|
||||
//t.Parallel()
|
||||
withDbSetup(t, func(m *kit.TestMiner) {
|
||||
cdb := m.BaseAPI.(*impl.StorageMinerAPI).HarmonyDB
|
||||
t1 := &task1{
|
||||
@ -160,7 +160,7 @@ func fooLetterSaver(t *testing.T, cdb *harmonydb.DB, dest *[]string) *passthru {
|
||||
}
|
||||
|
||||
func TestHarmonyTasksWith2PartiesPolling(t *testing.T) {
|
||||
t.Parallel()
|
||||
//t.Parallel()
|
||||
withDbSetup(t, func(m *kit.TestMiner) {
|
||||
cdb := m.BaseAPI.(*impl.StorageMinerAPI).HarmonyDB
|
||||
senderParty := fooLetterAdder(t, cdb)
|
||||
@ -180,7 +180,7 @@ func TestHarmonyTasksWith2PartiesPolling(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestWorkStealing(t *testing.T) {
|
||||
t.Parallel()
|
||||
//t.Parallel()
|
||||
withDbSetup(t, func(m *kit.TestMiner) {
|
||||
cdb := m.BaseAPI.(*impl.StorageMinerAPI).HarmonyDB
|
||||
ctx := context.Background()
|
||||
@ -209,7 +209,7 @@ func TestWorkStealing(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTaskRetry(t *testing.T) {
|
||||
t.Parallel()
|
||||
//t.Parallel()
|
||||
withDbSetup(t, func(m *kit.TestMiner) {
|
||||
cdb := m.BaseAPI.(*impl.StorageMinerAPI).HarmonyDB
|
||||
senderParty := fooLetterAdder(t, cdb)
|
||||
|
@ -168,7 +168,7 @@ func (s *Sender) Send(ctx context.Context, msg *types.Message, mss *api.MessageS
|
||||
return cid.Undef, xerrors.Errorf("updating db record: %w", err)
|
||||
}
|
||||
if cn != 1 {
|
||||
return cid.Undef, xerrors.Errorf("updating db record: expected 1 row to be affected, got %d", c)
|
||||
return cid.Undef, xerrors.Errorf("updating db record: expected 1 row to be affected, got %d", cn)
|
||||
}
|
||||
|
||||
log.Infow("sent message", "cid", sigMsg.Cid(), "from", fromA, "to", msg.To, "nonce", msg.Nonce, "value", msg.Value, "gaslimit", msg.GasLimit)
|
||||
|
@ -207,8 +207,9 @@ func (t *WdPostTask) DoPartition(ctx context.Context, ts *types.TipSet, maddr ad
|
||||
time.Sleep(5 * time.Second)
|
||||
continue todo retry loop */
|
||||
} else if !correct {
|
||||
_ = correct
|
||||
/*log.Errorw("generated incorrect window post proof", "post", postOut, "error", err)
|
||||
continue todo retry loop */
|
||||
continue todo retry loop*/
|
||||
}
|
||||
|
||||
// Proof generation successful, stop retrying
|
||||
@ -322,11 +323,11 @@ func (t *WdPostTask) sectorsForProof(ctx context.Context, maddr address.Address,
|
||||
if err := allSectors.ForEach(func(sectorNo uint64) error {
|
||||
if info, found := sectorByID[sectorNo]; found {
|
||||
proofSectors = append(proofSectors, info)
|
||||
} else {
|
||||
//skip
|
||||
// todo: testing: old logic used to put 'substitute' sectors here
|
||||
// that probably isn't needed post nv19, but we do need to check that
|
||||
}
|
||||
} //else {
|
||||
//skip
|
||||
// todo: testing: old logic used to put 'substitute' sectors here
|
||||
// that probably isn't needed post nv19, but we do need to check that
|
||||
//}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, xerrors.Errorf("iterating partition sector bitmap: %w", err)
|
||||
|
@ -74,10 +74,10 @@ type WdPostTask struct {
|
||||
}
|
||||
|
||||
type wdTaskIdentity struct {
|
||||
Sp_id uint64
|
||||
Proving_period_start abi.ChainEpoch
|
||||
Deadline_index uint64
|
||||
Partition_index uint64
|
||||
SpID uint64
|
||||
ProvingPeriodStart abi.ChainEpoch
|
||||
DeadlineIndex uint64
|
||||
PartitionIndex uint64
|
||||
}
|
||||
|
||||
func NewWdPostTask(db *harmonydb.DB,
|
||||
@ -206,11 +206,11 @@ func (t *WdPostTask) CanAccept(ids []harmonytask.TaskID, te *harmonytask.TaskEng
|
||||
|
||||
// GetData for tasks
|
||||
type wdTaskDef struct {
|
||||
Task_id harmonytask.TaskID
|
||||
Sp_id uint64
|
||||
Proving_period_start abi.ChainEpoch
|
||||
Deadline_index uint64
|
||||
Partition_index uint64
|
||||
TaskID harmonytask.TaskID
|
||||
SpID uint64
|
||||
ProvingPeriodStart abi.ChainEpoch
|
||||
DeadlineIndex uint64
|
||||
PartitionIndex uint64
|
||||
|
||||
dlInfo *dline.Info `pgx:"-"`
|
||||
openTs *types.TipSet
|
||||
@ -232,10 +232,10 @@ func (t *WdPostTask) CanAccept(ids []harmonytask.TaskID, te *harmonytask.TaskEng
|
||||
|
||||
// Accept those past deadline, then delete them in Do().
|
||||
for i := range tasks {
|
||||
tasks[i].dlInfo = wdpost.NewDeadlineInfo(tasks[i].Proving_period_start, tasks[i].Deadline_index, ts.Height())
|
||||
tasks[i].dlInfo = wdpost.NewDeadlineInfo(tasks[i].ProvingPeriodStart, tasks[i].DeadlineIndex, ts.Height())
|
||||
|
||||
if tasks[i].dlInfo.PeriodElapsed() {
|
||||
return &tasks[i].Task_id, nil
|
||||
return &tasks[i].TaskID, nil
|
||||
}
|
||||
|
||||
tasks[i].openTs, err = t.api.ChainGetTipSetAfterHeight(context.Background(), tasks[i].dlInfo.Open, ts.Key())
|
||||
@ -281,7 +281,7 @@ func (t *WdPostTask) CanAccept(ids []harmonytask.TaskID, te *harmonytask.TaskEng
|
||||
var r int
|
||||
err := t.db.QueryRow(context.Background(), `SELECT COUNT(*)
|
||||
FROM harmony_task_history
|
||||
WHERE task_id = $1 AND result = false`, d.Task_id).Scan(&r)
|
||||
WHERE task_id = $1 AND result = false`, d.TaskID).Scan(&r)
|
||||
if err != nil {
|
||||
log.Errorf("WdPostTask.CanAccept() failed to queryRow: %v", err)
|
||||
}
|
||||
@ -293,7 +293,7 @@ func (t *WdPostTask) CanAccept(ids []harmonytask.TaskID, te *harmonytask.TaskEng
|
||||
return tasks[i].dlInfo.Open < tasks[j].dlInfo.Open
|
||||
})
|
||||
|
||||
return &tasks[0].Task_id, nil
|
||||
return &tasks[0].TaskID, nil
|
||||
}
|
||||
|
||||
var res = storiface.ResourceTable[sealtasks.TTGenerateWindowPoSt]
|
||||
@ -353,10 +353,10 @@ func (t *WdPostTask) processHeadChange(ctx context.Context, revert, apply *types
|
||||
|
||||
for pidx := range partitions {
|
||||
tid := wdTaskIdentity{
|
||||
Sp_id: aid,
|
||||
Proving_period_start: di.PeriodStart,
|
||||
Deadline_index: di.Index,
|
||||
Partition_index: uint64(pidx),
|
||||
SpID: aid,
|
||||
ProvingPeriodStart: di.PeriodStart,
|
||||
DeadlineIndex: di.Index,
|
||||
PartitionIndex: uint64(pidx),
|
||||
}
|
||||
|
||||
tf := t.windowPoStTF.Val(ctx)
|
||||
@ -384,10 +384,10 @@ func (t *WdPostTask) addTaskToDB(taskId harmonytask.TaskID, taskIdent wdTaskIden
|
||||
partition_index
|
||||
) VALUES ($1, $2, $3, $4, $5)`,
|
||||
taskId,
|
||||
taskIdent.Sp_id,
|
||||
taskIdent.Proving_period_start,
|
||||
taskIdent.Deadline_index,
|
||||
taskIdent.Partition_index,
|
||||
taskIdent.SpID,
|
||||
taskIdent.ProvingPeriodStart,
|
||||
taskIdent.DeadlineIndex,
|
||||
taskIdent.PartitionIndex,
|
||||
)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("insert partition task: %w", err)
|
||||
|
@ -166,7 +166,7 @@ func (w *WdPostRecoverDeclareTask) Do(taskID harmonytask.TaskID, stillOwned func
|
||||
|
||||
recDecl := miner.RecoveryDeclaration{
|
||||
Deadline: dlIdx,
|
||||
Partition: uint64(partIdx),
|
||||
Partition: partIdx,
|
||||
Sectors: recovered,
|
||||
}
|
||||
|
||||
@ -187,6 +187,9 @@ func (w *WdPostRecoverDeclareTask) Do(taskID harmonytask.TaskID, stillOwned func
|
||||
}
|
||||
|
||||
msg, mss, err := preparePoStMessage(w.api, w.as, maddr, msg, abi.TokenAmount(w.maxDeclareRecoveriesGasFee))
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("sending declare recoveries message: %w", err)
|
||||
}
|
||||
|
||||
mc, err := w.sender.Send(ctx, msg, mss, "declare-recoveries")
|
||||
if err != nil {
|
||||
@ -279,10 +282,10 @@ func (w *WdPostRecoverDeclareTask) processHeadChange(ctx context.Context, revert
|
||||
}
|
||||
|
||||
tid := wdTaskIdentity{
|
||||
Sp_id: aid,
|
||||
Proving_period_start: pps,
|
||||
Deadline_index: declDeadline,
|
||||
Partition_index: uint64(pidx),
|
||||
SpID: aid,
|
||||
ProvingPeriodStart: pps,
|
||||
DeadlineIndex: declDeadline,
|
||||
PartitionIndex: uint64(pidx),
|
||||
}
|
||||
|
||||
tf(func(id harmonytask.TaskID, tx *harmonydb.Tx) (bool, error) {
|
||||
@ -304,10 +307,10 @@ func (w *WdPostRecoverDeclareTask) addTaskToDB(taskId harmonytask.TaskID, taskId
|
||||
partition_index
|
||||
) VALUES ($1, $2, $3, $4, $5)`,
|
||||
taskId,
|
||||
taskIdent.Sp_id,
|
||||
taskIdent.Proving_period_start,
|
||||
taskIdent.Deadline_index,
|
||||
taskIdent.Partition_index,
|
||||
taskIdent.SpID,
|
||||
taskIdent.ProvingPeriodStart,
|
||||
taskIdent.DeadlineIndex,
|
||||
taskIdent.PartitionIndex,
|
||||
)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("insert partition task: %w", err)
|
||||
|
@ -257,7 +257,7 @@ func preparePoStMessage(w MsgPrepAPI, as *ctladdr.AddressSelector, maddr address
|
||||
msg.From = mi.Worker
|
||||
|
||||
mss := &api.MessageSendSpec{
|
||||
MaxFee: abi.TokenAmount(maxFee),
|
||||
MaxFee: maxFee,
|
||||
}
|
||||
|
||||
// (optimal) initial estimation with some overestimation that guarantees
|
||||
|
Loading…
Reference in New Issue
Block a user