add go linter - "unused" (#11235)
* add go linter - "unused" * use _ to name unused but needed padding variable * remove unused code * add queue test to appease unused linter * remove unused code in test * remove unused func * remove unused struct identified by linter * remove unused variable * remove unused code * remove unused file * remove unused struct * remove unused function * remove unused observe peers function in raft * remove unused declareFaults function * annotate nolint:unused on needed methods
This commit is contained in:
parent
ef9a9175a1
commit
5e5a81bf23
@ -14,6 +14,7 @@ linters:
|
||||
- varcheck
|
||||
- deadcode
|
||||
- scopelint
|
||||
- unused
|
||||
|
||||
# We don't want to skip builtin/
|
||||
skip-dirs-use-default: false
|
||||
|
@ -182,7 +182,6 @@ type SplitStore struct {
|
||||
|
||||
compactionIndex int64
|
||||
pruneIndex int64
|
||||
onlineGCCnt int64
|
||||
|
||||
ctx context.Context
|
||||
cancel func()
|
||||
|
@ -29,19 +29,6 @@ import (
|
||||
|
||||
var log = logging.Logger("drand")
|
||||
|
||||
type drandPeer struct {
|
||||
addr string
|
||||
tls bool
|
||||
}
|
||||
|
||||
func (dp *drandPeer) Address() string {
|
||||
return dp.addr
|
||||
}
|
||||
|
||||
func (dp *drandPeer) IsTLS() bool {
|
||||
return dp.tls
|
||||
}
|
||||
|
||||
// DrandBeacon connects Lotus with a drand network in order to provide
|
||||
// randomness to the system in a way that's aligned with Filecoin rounds/epochs.
|
||||
//
|
||||
|
@ -1,25 +0,0 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
)
|
||||
|
||||
type contextStore struct {
|
||||
ctx context.Context
|
||||
cst *cbor.BasicIpldStore
|
||||
}
|
||||
|
||||
func (cs *contextStore) Context() context.Context {
|
||||
return cs.ctx
|
||||
}
|
||||
|
||||
func (cs *contextStore) Get(ctx context.Context, c cid.Cid, out interface{}) error {
|
||||
return cs.cst.Get(ctx, c, out)
|
||||
}
|
||||
|
||||
func (cs *contextStore) Put(ctx context.Context, v interface{}) (cid.Cid, error) {
|
||||
return cs.cst.Put(ctx, v)
|
||||
}
|
@ -39,23 +39,6 @@ func (ps *Store) save(ctx context.Context, state *FundedAddressState) error {
|
||||
return ps.ds.Put(ctx, k, b)
|
||||
}
|
||||
|
||||
// get the state for the given address
|
||||
func (ps *Store) get(ctx context.Context, addr address.Address) (*FundedAddressState, error) {
|
||||
k := dskeyForAddr(addr)
|
||||
|
||||
data, err := ps.ds.Get(ctx, k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var state FundedAddressState
|
||||
err = cborrpc.ReadCborRPC(bytes.NewReader(data), &state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &state, nil
|
||||
}
|
||||
|
||||
// forEach calls iter with each address in the datastore
|
||||
func (ps *Store) forEach(ctx context.Context, iter func(*FundedAddressState)) error {
|
||||
res, err := ps.ds.Query(ctx, dsq.Query{Prefix: dsKeyAddr})
|
||||
|
@ -232,11 +232,6 @@ func (sm *StateManager) hasExpensiveForkBetween(parent, height abi.ChainEpoch) b
|
||||
return false
|
||||
}
|
||||
|
||||
func (sm *StateManager) hasExpensiveFork(height abi.ChainEpoch) bool {
|
||||
_, ok := sm.expensiveUpgrades[height]
|
||||
return ok
|
||||
}
|
||||
|
||||
func runPreMigration(ctx context.Context, sm *StateManager, fn PreMigrationFunc, cache *nv16.MemMigrationCache, ts *types.TipSet) {
|
||||
height := ts.Height()
|
||||
parent := ts.ParentState()
|
||||
|
61
chain/sub/ratelimit/queue_test.go
Normal file
61
chain/sub/ratelimit/queue_test.go
Normal file
@ -0,0 +1,61 @@
|
||||
package ratelimit
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestQueue(t *testing.T) {
|
||||
const size = 3
|
||||
q := &queue{buf: make([]int64, size)}
|
||||
|
||||
if q.len() != 0 {
|
||||
t.Fatalf("q.len() = %d, expect 0", q.len())
|
||||
}
|
||||
|
||||
if q.cap() != size {
|
||||
t.Fatalf("q.cap() = %d, expect %d", q.cap(), size)
|
||||
}
|
||||
|
||||
for i := int64(0); i < int64(size); i++ {
|
||||
err := q.push(i)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot push element %d", i)
|
||||
}
|
||||
}
|
||||
|
||||
if q.len() != size {
|
||||
t.Fatalf("q.len() = %d, expect %d", q.len(), size)
|
||||
}
|
||||
|
||||
err := q.push(int64(size))
|
||||
if err != ErrRateLimitExceeded {
|
||||
t.Fatalf("pushing element beyond capacity should have failed with err: %s, got %s", ErrRateLimitExceeded, err)
|
||||
}
|
||||
|
||||
if q.front() != 0 {
|
||||
t.Fatalf("q.front() = %d, expect 0", q.front())
|
||||
}
|
||||
|
||||
if q.back() != int64(size-1) {
|
||||
t.Fatalf("q.back() = %d, expect %d", q.back(), size-1)
|
||||
}
|
||||
|
||||
popVal := q.pop()
|
||||
if popVal != 0 {
|
||||
t.Fatalf("q.pop() = %d, expect 0", popVal)
|
||||
}
|
||||
|
||||
if q.len() != size-1 {
|
||||
t.Fatalf("q.len() = %d, expect %d", q.len(), size-1)
|
||||
}
|
||||
|
||||
// Testing truncation.
|
||||
threshold := int64(1)
|
||||
q.truncate(threshold)
|
||||
if q.len() != 1 {
|
||||
t.Fatalf("q.len() after truncate = %d, expect 1", q.len())
|
||||
}
|
||||
if q.front() != 2 {
|
||||
t.Fatalf("q.front() after truncate = %d, expect 2", q.front())
|
||||
}
|
||||
}
|
@ -11,7 +11,6 @@ import (
|
||||
"github.com/ipfs/go-cid"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
@ -344,13 +343,6 @@ func (tu *syncTestUtil) addClientNode() int {
|
||||
return len(tu.nds) - 1
|
||||
}
|
||||
|
||||
func (tu *syncTestUtil) pid(n int) peer.ID {
|
||||
nal, err := tu.nds[n].NetAddrsListen(tu.ctx)
|
||||
require.NoError(tu.t, err)
|
||||
|
||||
return nal.ID
|
||||
}
|
||||
|
||||
func (tu *syncTestUtil) connect(from, to int) {
|
||||
toPI, err := tu.nds[to].NetAddrsListen(tu.ctx)
|
||||
require.NoError(tu.t, err)
|
||||
|
@ -7,9 +7,6 @@ import (
|
||||
)
|
||||
|
||||
func TestDecodeBlockMsg(t *testing.T) {
|
||||
type args struct {
|
||||
b []byte
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
data []byte
|
||||
|
@ -27,24 +27,3 @@ type StateTree interface {
|
||||
|
||||
Version() StateTreeVersion
|
||||
}
|
||||
|
||||
type storageWrapper struct {
|
||||
s Storage
|
||||
}
|
||||
|
||||
func (sw *storageWrapper) Put(i cbg.CBORMarshaler) (cid.Cid, error) {
|
||||
c, err := sw.s.Put(i)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (sw *storageWrapper) Get(c cid.Cid, out cbg.CBORUnmarshaler) error {
|
||||
if err := sw.s.Get(c, out); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -497,21 +497,6 @@ type Invocation struct {
|
||||
|
||||
const GasPerNs = 10
|
||||
|
||||
func countGasCosts(et *types.ExecutionTrace) int64 {
|
||||
var cgas int64
|
||||
|
||||
for _, gc := range et.GasCharges {
|
||||
cgas += gc.ComputeGas
|
||||
}
|
||||
|
||||
for _, sub := range et.Subcalls {
|
||||
c := countGasCosts(&sub) //nolint
|
||||
cgas += c
|
||||
}
|
||||
|
||||
return cgas
|
||||
}
|
||||
|
||||
type stats struct {
|
||||
timeTaken meanVar
|
||||
gasRatio meanVar
|
||||
|
@ -561,33 +561,3 @@ func find(s []string, elem string) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (rw *raftWrapper) observePeers() {
|
||||
obsCh := make(chan hraft.Observation, 1)
|
||||
defer close(obsCh)
|
||||
|
||||
observer := hraft.NewObserver(obsCh, true, func(o *hraft.Observation) bool {
|
||||
po, ok := o.Data.(hraft.PeerObservation)
|
||||
return ok && po.Removed
|
||||
})
|
||||
|
||||
rw.raft.RegisterObserver(observer)
|
||||
defer rw.raft.DeregisterObserver(observer)
|
||||
|
||||
for {
|
||||
select {
|
||||
case obs := <-obsCh:
|
||||
pObs := obs.Data.(hraft.PeerObservation)
|
||||
logger.Info("raft peer departed. Removing from peerstore: ", pObs.Peer.ID)
|
||||
pID, err := peer.Decode(string(pObs.Peer.ID))
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
continue
|
||||
}
|
||||
rw.host.Peerstore().ClearAddrs(pID)
|
||||
case <-rw.ctx.Done():
|
||||
logger.Debug("stopped observing raft peers")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -11,8 +11,8 @@ const cacheline = 64
|
||||
// name old time/op new time/op delta
|
||||
// Locks-8 74.6ns ± 7% 12.3ns ± 2% -83.54% (p=0.000 n=20+18)
|
||||
type paddedMutex struct {
|
||||
mt sync.Mutex
|
||||
pad [cacheline - 8]uint8
|
||||
mt sync.Mutex
|
||||
_ [cacheline - 8]uint8
|
||||
}
|
||||
|
||||
type ShardedMutex struct {
|
||||
|
@ -126,8 +126,6 @@ func EthEventAPI(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRepo
|
||||
MaxFilterResults: cfg.Events.MaxFilterResults,
|
||||
}
|
||||
|
||||
const ChainHeadConfidence = 1
|
||||
|
||||
lc.Append(fx.Hook{
|
||||
OnStart: func(context.Context) error {
|
||||
ev, err := events.NewEvents(ctx, &evapi)
|
||||
|
@ -805,26 +805,6 @@ func createTestVoucher(t *testing.T, ch address.Address, voucherLane uint64, non
|
||||
return sv
|
||||
}
|
||||
|
||||
func createTestVoucherWithExtra(t *testing.T, ch address.Address, voucherLane uint64, nonce uint64, voucherAmount big.Int, key []byte) *paychtypes.SignedVoucher { //nolint:deadcode
|
||||
sv := &paychtypes.SignedVoucher{
|
||||
ChannelAddr: ch,
|
||||
Lane: voucherLane,
|
||||
Nonce: nonce,
|
||||
Amount: voucherAmount,
|
||||
Extra: &paychtypes.ModVerifyParams{
|
||||
Actor: tutils.NewActorAddr(t, "act"),
|
||||
},
|
||||
}
|
||||
|
||||
signingBytes, err := sv.SigningBytes()
|
||||
require.NoError(t, err)
|
||||
sig, err := sigs.Sign(crypto.SigTypeSecp256k1, key, signingBytes)
|
||||
require.NoError(t, err)
|
||||
sv.Signature = sig
|
||||
|
||||
return sv
|
||||
}
|
||||
|
||||
type mockBestSpendableAPI struct {
|
||||
mgr *Manager
|
||||
}
|
||||
|
@ -32,6 +32,8 @@ import (
|
||||
)
|
||||
|
||||
// recordPoStFailure records a failure in the journal.
|
||||
//
|
||||
//nolint:unused
|
||||
func (s *WindowPoStScheduler) recordPoStFailure(err error, ts *types.TipSet, deadline *dline.Info) {
|
||||
s.journal.RecordEvent(s.evtTypes[evtTypeWdPoStScheduler], func() interface{} {
|
||||
c := evtCommon{Error: err}
|
||||
|
@ -195,106 +195,6 @@ func (s *WindowPoStScheduler) declareRecoveries(ctx context.Context, dlIdx uint6
|
||||
return batchedRecoveryDecls, msgs, nil
|
||||
}
|
||||
|
||||
// declareFaults identifies the sectors on the specified proving deadline that
|
||||
// are faulty, and reports the faults on chain via the `DeclareFaults` message
|
||||
// to our miner actor.
|
||||
//
|
||||
// NOTE: THIS CODE ISN'T INVOKED AFTER THE IGNITION UPGRADE
|
||||
// This is always invoked ahead of time, before the deadline for the evaluated
|
||||
// sectors arrives. That way, faults are declared before a penalty is accrued.
|
||||
//
|
||||
// If a declaration is made, it awaits for build.MessageConfidence confirmations
|
||||
// on chain before returning.
|
||||
//
|
||||
// TODO: the waiting should happen in the background. Right now this
|
||||
//
|
||||
// is blocking/delaying the actual generation and submission of WindowPoSts in
|
||||
// this deadline!
|
||||
func (s *WindowPoStScheduler) declareFaults(ctx context.Context, dlIdx uint64, partitions []api.Partition, tsk types.TipSetKey) ([]miner.FaultDeclaration, *types.SignedMessage, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "storage.declareFaults")
|
||||
defer span.End()
|
||||
|
||||
bad := uint64(0)
|
||||
params := &miner.DeclareFaultsParams{
|
||||
Faults: []miner.FaultDeclaration{},
|
||||
}
|
||||
|
||||
for partIdx, partition := range partitions {
|
||||
nonFaulty, err := bitfield.SubtractBitField(partition.LiveSectors, partition.FaultySectors)
|
||||
if err != nil {
|
||||
return nil, nil, xerrors.Errorf("determining non faulty sectors: %w", err)
|
||||
}
|
||||
|
||||
good, err := s.checkSectors(ctx, nonFaulty, tsk)
|
||||
if err != nil {
|
||||
return nil, nil, xerrors.Errorf("checking sectors: %w", err)
|
||||
}
|
||||
|
||||
newFaulty, err := bitfield.SubtractBitField(nonFaulty, good)
|
||||
if err != nil {
|
||||
return nil, nil, xerrors.Errorf("calculating faulty sector set: %w", err)
|
||||
}
|
||||
|
||||
c, err := newFaulty.Count()
|
||||
if err != nil {
|
||||
return nil, nil, xerrors.Errorf("counting faulty sectors: %w", err)
|
||||
}
|
||||
|
||||
if c == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
bad += c
|
||||
|
||||
params.Faults = append(params.Faults, miner.FaultDeclaration{
|
||||
Deadline: dlIdx,
|
||||
Partition: uint64(partIdx),
|
||||
Sectors: newFaulty,
|
||||
})
|
||||
}
|
||||
|
||||
faults := params.Faults
|
||||
if len(faults) == 0 {
|
||||
return faults, nil, nil
|
||||
}
|
||||
|
||||
log.Errorw("DETECTED FAULTY SECTORS, declaring faults", "count", bad)
|
||||
|
||||
enc, aerr := actors.SerializeParams(params)
|
||||
if aerr != nil {
|
||||
return faults, nil, xerrors.Errorf("could not serialize declare faults parameters: %w", aerr)
|
||||
}
|
||||
|
||||
msg := &types.Message{
|
||||
To: s.actor,
|
||||
Method: builtin.MethodsMiner.DeclareFaults,
|
||||
Params: enc,
|
||||
Value: types.NewInt(0), // TODO: Is there a fee?
|
||||
}
|
||||
spec := &api.MessageSendSpec{MaxFee: abi.TokenAmount(s.feeCfg.MaxWindowPoStGasFee)}
|
||||
if err := s.prepareMessage(ctx, msg, spec); err != nil {
|
||||
return faults, nil, err
|
||||
}
|
||||
|
||||
sm, err := s.api.MpoolPushMessage(ctx, msg, spec)
|
||||
if err != nil {
|
||||
return faults, sm, xerrors.Errorf("pushing message to mpool: %w", err)
|
||||
}
|
||||
|
||||
log.Warnw("declare faults Message CID", "cid", sm.Cid())
|
||||
|
||||
rec, err := s.api.StateWaitMsg(context.TODO(), sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
|
||||
if err != nil {
|
||||
return faults, sm, xerrors.Errorf("declare faults wait error: %w", err)
|
||||
}
|
||||
|
||||
if rec.Receipt.ExitCode != 0 {
|
||||
return faults, sm, xerrors.Errorf("declare faults wait non-0 exit code: %d", rec.Receipt.ExitCode)
|
||||
}
|
||||
|
||||
return faults, sm, nil
|
||||
}
|
||||
|
||||
func (s *WindowPoStScheduler) asyncFaultRecover(di dline.Info, ts *types.TipSet) {
|
||||
go func() {
|
||||
// check faults / recoveries for the *next* deadline. It's already too
|
||||
|
@ -225,6 +225,8 @@ func (s *WindowPoStScheduler) update(ctx context.Context, revert, apply *types.T
|
||||
}
|
||||
|
||||
// onAbort is called when generating proofs or submitting proofs is aborted
|
||||
//
|
||||
//nolint:unused
|
||||
func (s *WindowPoStScheduler) onAbort(ts *types.TipSet, deadline *dline.Info) {
|
||||
s.journal.RecordEvent(s.evtTypes[evtTypeWdPoStScheduler], func() interface{} {
|
||||
c := evtCommon{}
|
||||
|
Loading…
Reference in New Issue
Block a user