Address simple linter issues

This commit is contained in:
Ian Davis 2022-11-24 16:32:27 +00:00
parent ca3b7c3bcf
commit 9f85d3dca7
15 changed files with 25 additions and 30 deletions

View File

@ -24,6 +24,8 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
"github.com/multiformats/go-multiaddr"
"golang.org/x/text/cases"
"golang.org/x/text/language"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
@ -456,7 +458,8 @@ func exampleStruct(method string, t, parent reflect.Type) interface{} {
if f.Type == parent {
continue
}
if strings.Title(f.Name) == f.Name {
caser := cases.Title(language.English)
if caser.String(f.Name) == f.Name {
ns.Elem().Field(i).Set(reflect.ValueOf(ExampleValue(method, f.Type, t)))
}
}

View File

@ -29,7 +29,7 @@ func Weight(ctx context.Context, stateBs bstore.Blockstore, ts *types.TipSet) (t
// >>> wFunction(totalPowerAtTipset(ts)) * 2^8 <<< + (wFunction(totalPowerAtTipset(ts)) * sum(ts.blocks[].ElectionProof.WinCount) * wRatio_num * 2^8) / (e * wRatio_den)
tpow := big2.Zero()
var tpow big2.Int
{
cst := cbor.NewCborStore(stateBs)
state, err := state.LoadStateTree(cst, ts.ParentState())

View File

@ -105,7 +105,7 @@ func TestFundManagerBasic(t *testing.T) {
// Note: Expect failure because there is no available balance to withdraw:
// balance - reserved = 16 - 16 = 0
amt = abi.NewTokenAmount(1)
sentinel, err = s.fm.Withdraw(s.ctx, s.walletAddr, s.acctAddr, amt)
_, err = s.fm.Withdraw(s.ctx, s.walletAddr, s.acctAddr, amt)
require.Error(t, err)
}

View File

@ -274,7 +274,7 @@ func FullNodeProxy[T api.FullNode](ins []T, outstr *api.FullNodeStruct) {
}
total := len(rins)
result, err := retry.Retry(ctx, 5, initialBackoff, errorsToRetry, func() (results []reflect.Value, err2 error) {
result, _ := retry.Retry(ctx, 5, initialBackoff, errorsToRetry, func() (results []reflect.Value, err2 error) {
curr = (curr + 1) % total
result := fns[curr].Call(args)

View File

@ -898,7 +898,7 @@ func (r *refunder) EnsureMinerMinimums(ctx context.Context, tipset *types.TipSet
func (r *refunder) processTipsetStorageMarketActor(ctx context.Context, tipset *types.TipSet, msg api.Message, recp *types.MessageReceipt) (bool, string, types.BigInt, error) {
m := msg.Message
refundValue := types.NewInt(0)
var refundValue types.BigInt
var messageMethod string
switch m.Method {
@ -925,7 +925,7 @@ func (r *refunder) processTipsetStorageMarketActor(ctx context.Context, tipset *
func (r *refunder) processTipsetStorageMinerActor(ctx context.Context, tipset *types.TipSet, msg api.Message, recp *types.MessageReceipt) (bool, string, types.BigInt, error) {
m := msg.Message
refundValue := types.NewInt(0)
var refundValue types.BigInt
var messageMethod string
if _, found := r.blockmap[m.To]; found {

View File

@ -36,7 +36,7 @@ type Option uint64
const (
Approve Option = 49
Reject = 50
Reject Option = 50
)
type Vote struct {
@ -148,7 +148,7 @@ var finalResultCmd = &cli.Command{
}
votes, err := getVotesMap(vj)
if err != nil {
return xerrors.Errorf("failed to get voters: ", err)
return xerrors.Errorf("failed to get voters: %w\n", err)
}
type minerBriefInfo struct {
@ -160,23 +160,23 @@ var finalResultCmd = &cli.Command{
// power actor
pa, err := st.GetActor(power.Address)
if err != nil {
return xerrors.Errorf("failed to get power actor: \n", err)
return xerrors.Errorf("failed to get power actor: %w\n", err)
}
powerState, err := power.Load(store, pa)
if err != nil {
return xerrors.Errorf("failed to get power state: \n", err)
return xerrors.Errorf("failed to get power state: %w\n", err)
}
//market actor
ma, err := st.GetActor(market.Address)
if err != nil {
return xerrors.Errorf("fail to get market actor: ", err)
return xerrors.Errorf("fail to get market actor: %w\n", err)
}
marketState, err := market.Load(store, ma)
if err != nil {
return xerrors.Errorf("fail to load market state: ", err)
return xerrors.Errorf("fail to load market state: %w\n", err)
}
lookupId := func(addr address.Address) address.Address {

View File

@ -240,7 +240,7 @@ func checkMigrationInvariants(ctx context.Context, v8StateRootCid cid.Cid, v9Sta
return err
}
v9actorTree, err := builtin.LoadTree(actorStore, v9stateRoot.Actors)
v9actorTree, _ := builtin.LoadTree(actorStore, v9stateRoot.Actors)
messages, err := v9.CheckStateInvariants(v9actorTree, epoch, actorCodeCids)
if err != nil {
return xerrors.Errorf("checking state invariants: %w", err)
@ -465,7 +465,7 @@ func compareProposalToAllocation(prop market8.DealProposal, alloc verifreg9.Allo
return xerrors.Errorf("couldnt get ID from address")
}
if proposalClientID != uint64(alloc.Client) {
return xerrors.Errorf("client id mismatch between proposal and allocation: %s, %s", proposalClientID, alloc.Client)
return xerrors.Errorf("client id mismatch between proposal and allocation: %v, %v", proposalClientID, alloc.Client)
}
proposalProviderID, err := address.IDFromAddress(prop.Provider)
@ -473,11 +473,11 @@ func compareProposalToAllocation(prop market8.DealProposal, alloc verifreg9.Allo
return xerrors.Errorf("couldnt get ID from address")
}
if proposalProviderID != uint64(alloc.Provider) {
return xerrors.Errorf("provider id mismatch between proposal and allocation: %s, %s", proposalProviderID, alloc.Provider)
return xerrors.Errorf("provider id mismatch between proposal and allocation: %v, %v", proposalProviderID, alloc.Provider)
}
if prop.PieceSize != alloc.Size {
return xerrors.Errorf("piece size mismatch between proposal and allocation: %s, %s", prop.PieceSize, alloc.Size)
return xerrors.Errorf("piece size mismatch between proposal and allocation: %v, %v", prop.PieceSize, alloc.Size)
}
if alloc.TermMax != 540*builtin.EpochsInDay {

View File

@ -38,8 +38,6 @@ import (
)
func TestMigrationNV17(t *testing.T) {
ctx := context.Background()
kit.QuietMiningLogs()
rootKey, err := key.GenerateKey(types.KTSecp256k1)

View File

@ -31,8 +31,6 @@ import (
)
func TestGetAllocationForPendingDeal(t *testing.T) {
ctx := context.Background()
rootKey, err := key.GenerateKey(types.KTSecp256k1)
require.NoError(t, err)

View File

@ -79,7 +79,6 @@ func setup(ctx context.Context, t *testing.T, node0 *kit.TestFullNode, node1 *ki
node.Override(node.GoRPCServer, modules.NewRPCServer),
)
//raftOps := kit.ConstructorOpts()
kit.ThroughRPC()
ens := kit.NewEnsemble(t).FullNode(node0, raftOps, kit.ThroughRPC()).FullNode(node1, raftOps, kit.ThroughRPC()).FullNode(node2, raftOps, kit.ThroughRPC())
node0.AssignPrivKey(pkey0)
@ -527,7 +526,6 @@ func TestGoRPCAuth(t *testing.T) {
node.Override(node.GoRPCServer, modules.NewRPCServer),
)
//raftOps := kit.ConstructorOpts()
kit.ThroughRPC()
ens := kit.NewEnsemble(t).FullNode(&node0, raftOps, kit.ThroughRPC()).FullNode(&node1, raftOps, kit.ThroughRPC()).FullNode(&node2, raftOps, kit.ThroughRPC()).FullNode(&node3, raftOps)
node0.AssignPrivKey(pkey0)

View File

@ -29,8 +29,6 @@ import (
)
func TestNoRemoveDatacapFromVerifreg(t *testing.T) {
ctx := context.Background()
kit.QuietMiningLogs()
rootKey, err := key.GenerateKey(types.KTSecp256k1)

View File

@ -151,7 +151,7 @@ func NewConsensus(host host.Host, cfg *ClusterRaftConfig, mpool *messagepool.Mes
consensus.SetActor(actor)
peers := []peer.ID{}
addrInfos, err := addrutil.ParseAddresses(ctx, cfg.InitPeerset)
addrInfos, _ := addrutil.ParseAddresses(ctx, cfg.InitPeerset)
for _, addrInfo := range addrInfos {
peers = append(peers, addrInfo.ID)
@ -422,7 +422,7 @@ func (cc *Consensus) RmPeer(ctx context.Context, pid peer.ID) error {
return err
}
// Being here means we are the leader and can commit
finalErr = cc.raft.RemovePeer(ctx, peer.Encode(pid))
finalErr = cc.raft.RemovePeer(ctx, pid.String())
if finalErr != nil {
time.Sleep(cc.config.CommitRetryDelay)
continue

View File

@ -80,7 +80,7 @@ func newRaftWrapper(
raftW.staging = staging
raftW.repo = repo
// Set correct LocalID
cfg.RaftConfig.LocalID = hraft.ServerID(peer.Encode(host.ID()))
cfg.RaftConfig.LocalID = hraft.ServerID(host.ID().String())
df := cfg.GetDataFolder(repo)
err := makeDataFolder(df)
@ -248,7 +248,7 @@ func makeServerConf(peers []peer.ID) hraft.Configuration {
// Servers are peers + self. We avoid duplicate entries below
for _, pid := range peers {
p := peer.Encode(pid)
p := pid.String()
_, ok := sm[p]
if !ok { // avoid dups
sm[p] = struct{}{}

View File

@ -7,11 +7,11 @@ import (
nilrouting "github.com/ipfs/go-ipfs-routing/none"
"github.com/libp2p/go-libp2p"
dht "github.com/libp2p/go-libp2p-kad-dht"
"github.com/libp2p/go-libp2p-peerstore/pstoremem"
record "github.com/libp2p/go-libp2p-record"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/peerstore"
"github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem"
routedhost "github.com/libp2p/go-libp2p/p2p/host/routed"
mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
"go.uber.org/fx"

View File

@ -90,7 +90,7 @@ func ExtractTar(body io.Reader, dir string, buf []byte) (int64, error) {
sz, found := CacheFileConstraints[header.Name]
if !found {
return read, xerrors.Errorf("tar file %#v isn't expected")
return read, xerrors.Errorf("tar file %#v isn't expected", header.Name)
}
if header.Size > sz {
return read, xerrors.Errorf("tar file %#v is bigger than expected: %d > %d", header.Name, header.Size, sz)