Merge pull request #3183 from filecoin-project/steb/lint-all

Lint everything
This commit is contained in:
Łukasz Magiera 2020-08-22 21:47:41 +02:00 committed by GitHub
commit 0806fd651a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
95 changed files with 372 additions and 347 deletions

View File

@ -288,9 +288,6 @@ jobs:
command: | command: |
$HOME/.local/bin/golangci-lint run -v --timeout 2m \ $HOME/.local/bin/golangci-lint run -v --timeout 2m \
--concurrency << parameters.concurrency >> << parameters.args >> --concurrency << parameters.concurrency >> << parameters.args >>
lint-changes:
<<: *lint
lint-all: lint-all:
<<: *lint <<: *lint
@ -319,8 +316,7 @@ workflows:
version: 2.1 version: 2.1
ci: ci:
jobs: jobs:
- lint-changes: - lint-all
args: "--new-from-rev origin/next"
- mod-tidy-check - mod-tidy-check
- gofmt - gofmt
- test: - test:

View File

@ -23,6 +23,14 @@ issues:
- "Potential file inclusion via variable" - "Potential file inclusion via variable"
- "should have( a package)? comment" - "should have( a package)? comment"
- "Error return value of `logging.SetLogLevel` is not checked" - "Error return value of `logging.SetLogLevel` is not checked"
- "comment on exported"
- "(func|method) \\w+ should be \\w+"
- "(type|var|struct field|(method|func) parameter) `\\w+` should be `\\w+`"
- "(G306|G301|G307|G108|G302|G204|G104)"
- "don't use ALL_CAPS in Go names"
- "string .* has .* occurrences, make it a constant"
- "a blank import should be only in a main or test package, or have a comment justifying it"
- "package comment should be of the form"
exclude-use-default: false exclude-use-default: false
exclude-rules: exclude-rules:
@ -46,6 +54,19 @@ issues:
linters: linters:
- gosec - gosec
- path: chain/vectors/gen/.*
linters:
- gosec
- path: cmd/lotus-bench/.*
linters:
- gosec
- path: api/test/.*
text: "context.Context should be the first parameter"
linters:
- golint
linters-settings: linters-settings:
goconst: goconst:
min-occurrences: 6 min-occurrences: 6

View File

@ -26,7 +26,7 @@ import (
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
"github.com/ipfs/go-filestore" "github.com/ipfs/go-filestore"
"github.com/libp2p/go-libp2p-core/network" "github.com/libp2p/go-libp2p-core/network"
peer "github.com/libp2p/go-libp2p-peer" "github.com/libp2p/go-libp2p-core/peer"
"github.com/multiformats/go-multiaddr" "github.com/multiformats/go-multiaddr"
) )
@ -66,7 +66,7 @@ func init() {
ExampleValues[reflect.TypeOf(addr)] = addr ExampleValues[reflect.TypeOf(addr)] = addr
pid, err := peer.IDB58Decode("12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf") pid, err := peer.Decode("12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf")
if err != nil { if err != nil {
panic(err) panic(err)
} }

View File

@ -141,7 +141,7 @@ func makeDeal(t *testing.T, ctx context.Context, rseed int, client *impl.FullNod
info, err := client.ClientGetDealInfo(ctx, *deal) info, err := client.ClientGetDealInfo(ctx, *deal)
require.NoError(t, err) require.NoError(t, err)
testRetrieval(t, ctx, err, client, fcid, &info.PieceCID, carExport, data) testRetrieval(t, ctx, client, fcid, &info.PieceCID, carExport, data)
} }
func TestFastRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration) { func TestFastRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration) {
@ -193,7 +193,7 @@ func TestFastRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Durati
info, err := client.ClientGetDealInfo(ctx, *deal) info, err := client.ClientGetDealInfo(ctx, *deal)
require.NoError(t, err) require.NoError(t, err)
testRetrieval(t, ctx, err, client, fcid, &info.PieceCID, false, data) testRetrieval(t, ctx, client, fcid, &info.PieceCID, false, data)
atomic.AddInt64(&mine, -1) atomic.AddInt64(&mine, -1)
fmt.Println("shutting down mining") fmt.Println("shutting down mining")
<-done <-done
@ -267,7 +267,7 @@ func TestSenondDealRetrieval(t *testing.T, b APIBuilder, blocktime time.Duration
rf, _ := miner.SectorsRefs(ctx) rf, _ := miner.SectorsRefs(ctx)
fmt.Printf("refs: %+v\n", rf) fmt.Printf("refs: %+v\n", rf)
testRetrieval(t, ctx, err, client, fcid2, &info.PieceCID, false, data2) testRetrieval(t, ctx, client, fcid2, &info.PieceCID, false, data2)
} }
atomic.AddInt64(&mine, -1) atomic.AddInt64(&mine, -1)
@ -373,7 +373,7 @@ func startSealingWaiting(t *testing.T, ctx context.Context, miner TestStorageNod
} }
} }
func testRetrieval(t *testing.T, ctx context.Context, err error, client *impl.FullNodeAPI, fcid cid.Cid, piece *cid.Cid, carExport bool, data []byte) { func testRetrieval(t *testing.T, ctx context.Context, client *impl.FullNodeAPI, fcid cid.Cid, piece *cid.Cid, carExport bool, data []byte) {
offers, err := client.ClientFindData(ctx, fcid, piece) offers, err := client.ClientFindData(ctx, fcid, piece)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)

View File

@ -20,6 +20,7 @@ import (
"github.com/filecoin-project/lotus/node/impl" "github.com/filecoin-project/lotus/node/impl"
) )
//nolint:deadcode,varcheck
var log = logging.Logger("apitest") var log = logging.Logger("apitest")
func (ts *testSuite) testMining(t *testing.T) { func (ts *testSuite) testMining(t *testing.T) {

View File

@ -153,6 +153,9 @@ func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) {
}, int(build.MessageConfidence)+1, build.SealRandomnessLookbackLimit, func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) { }, int(build.MessageConfidence)+1, build.SealRandomnessLookbackLimit, func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) {
return preds.OnPaymentChannelActorChanged(channel, preds.OnToSendAmountChanges())(ctx, oldTs.Key(), newTs.Key()) return preds.OnPaymentChannelActorChanged(channel, preds.OnToSendAmountChanges())(ctx, oldTs.Key(), newTs.Key())
}) })
if err != nil {
t.Fatal(err)
}
select { select {
case <-finished: case <-finished:

View File

@ -24,9 +24,14 @@ import (
"github.com/filecoin-project/lotus/node/impl" "github.com/filecoin-project/lotus/node/impl"
) )
func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) { func init() {
os.Setenv("BELLMAN_NO_GPU", "1") err := os.Setenv("BELLMAN_NO_GPU", "1")
if err != nil {
panic(fmt.Sprintf("failed to set BELLMAN_NO_GPU env variable: %s", err))
}
}
func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
ctx := context.Background() ctx := context.Background()
n, sn := b(t, 1, OneMiner) n, sn := b(t, 1, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI) client := n[0].FullNode.(*impl.FullNodeAPI)
@ -110,8 +115,6 @@ func pledgeSectors(t *testing.T, ctx context.Context, miner TestStorageNode, n,
} }
func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) { func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
os.Setenv("BELLMAN_NO_GPU", "1")
ctx := context.Background() ctx := context.Background()
n, sn := b(t, 1, OneMiner) n, sn := b(t, 1, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI) client := n[0].FullNode.(*impl.FullNodeAPI)

View File

@ -7,8 +7,8 @@ import (
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/hashicorp/golang-lru" lru "github.com/hashicorp/golang-lru"
peer "github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/peer"
) )
type blockReceiptTracker struct { type blockReceiptTracker struct {

View File

@ -172,7 +172,7 @@ func (client *BlockSync) processResponse(
resLength, req.Length) resLength, req.Length)
} }
if resLength < int(req.Length) && res.Status != Partial { if resLength < int(req.Length) && res.Status != Partial {
return nil, xerrors.Errorf("got less than requested without a proper status: %s", res.Status) return nil, xerrors.Errorf("got less than requested without a proper status: %d", res.Status)
} }
validRes := &validatedResponse{} validRes := &validatedResponse{}
@ -205,7 +205,7 @@ func (client *BlockSync) processResponse(
validRes.messages = make([]*CompactedMessages, resLength) validRes.messages = make([]*CompactedMessages, resLength)
for i := 0; i < resLength; i++ { for i := 0; i < resLength; i++ {
if res.Chain[i].Messages == nil { if res.Chain[i].Messages == nil {
return nil, xerrors.Errorf("no messages included for tipset at height (head - %d): %w", i) return nil, xerrors.Errorf("no messages included for tipset at height (head - %d)", i)
} }
validRes.messages[i] = res.Chain[i].Messages validRes.messages[i] = res.Chain[i].Messages
} }

View File

@ -1,9 +1,10 @@
package blocksync package blocksync
import ( import (
"time"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/store"
"time"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log" logging "github.com/ipfs/go-log"

View File

@ -2,6 +2,7 @@ package state
import ( import (
"bytes" "bytes"
"github.com/filecoin-project/specs-actors/actors/util/adt" "github.com/filecoin-project/specs-actors/actors/util/adt"
typegen "github.com/whyrusleeping/cbor-gen" typegen "github.com/whyrusleeping/cbor-gen"
) )

View File

@ -3,6 +3,7 @@ package state
import ( import (
"bytes" "bytes"
"context" "context"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/abi/big" "github.com/filecoin-project/specs-actors/actors/abi/big"

View File

@ -114,10 +114,10 @@ func TestMarketPredicates(t *testing.T) {
} }
oldBalances := map[address.Address]balance{ oldBalances := map[address.Address]balance{
tutils.NewIDAddr(t, 1): balance{abi.NewTokenAmount(1000), abi.NewTokenAmount(1000)}, tutils.NewIDAddr(t, 1): {abi.NewTokenAmount(1000), abi.NewTokenAmount(1000)},
tutils.NewIDAddr(t, 2): balance{abi.NewTokenAmount(2000), abi.NewTokenAmount(500)}, tutils.NewIDAddr(t, 2): {abi.NewTokenAmount(2000), abi.NewTokenAmount(500)},
tutils.NewIDAddr(t, 3): balance{abi.NewTokenAmount(3000), abi.NewTokenAmount(2000)}, tutils.NewIDAddr(t, 3): {abi.NewTokenAmount(3000), abi.NewTokenAmount(2000)},
tutils.NewIDAddr(t, 5): balance{abi.NewTokenAmount(3000), abi.NewTokenAmount(1000)}, tutils.NewIDAddr(t, 5): {abi.NewTokenAmount(3000), abi.NewTokenAmount(1000)},
} }
oldStateC := createMarketState(ctx, t, store, oldDeals, oldProps, oldBalances) oldStateC := createMarketState(ctx, t, store, oldDeals, oldProps, oldBalances)
@ -162,10 +162,10 @@ func TestMarketPredicates(t *testing.T) {
// NB: DealProposals cannot be modified, so don't test that case. // NB: DealProposals cannot be modified, so don't test that case.
} }
newBalances := map[address.Address]balance{ newBalances := map[address.Address]balance{
tutils.NewIDAddr(t, 1): balance{abi.NewTokenAmount(3000), abi.NewTokenAmount(0)}, tutils.NewIDAddr(t, 1): {abi.NewTokenAmount(3000), abi.NewTokenAmount(0)},
tutils.NewIDAddr(t, 2): balance{abi.NewTokenAmount(2000), abi.NewTokenAmount(500)}, tutils.NewIDAddr(t, 2): {abi.NewTokenAmount(2000), abi.NewTokenAmount(500)},
tutils.NewIDAddr(t, 4): balance{abi.NewTokenAmount(5000), abi.NewTokenAmount(0)}, tutils.NewIDAddr(t, 4): {abi.NewTokenAmount(5000), abi.NewTokenAmount(0)},
tutils.NewIDAddr(t, 5): balance{abi.NewTokenAmount(1000), abi.NewTokenAmount(3000)}, tutils.NewIDAddr(t, 5): {abi.NewTokenAmount(1000), abi.NewTokenAmount(3000)},
} }
newStateC := createMarketState(ctx, t, store, newDeals, newProps, newBalances) newStateC := createMarketState(ctx, t, store, newDeals, newProps, newBalances)
@ -505,6 +505,7 @@ func createBalanceTable(ctx context.Context, t *testing.T, store adt.Store, bala
lockedMapRootCid, err := lockedMapRoot.Root() lockedMapRootCid, err := lockedMapRoot.Root()
require.NoError(t, err) require.NoError(t, err)
lockedRoot, err := adt.AsBalanceTable(store, lockedMapRootCid) lockedRoot, err := adt.AsBalanceTable(store, lockedMapRootCid)
require.NoError(t, err)
for addr, balance := range balances { for addr, balance := range balances {
err := escrowRoot.Add(addr, big.Add(balance.available, balance.locked)) err := escrowRoot.Add(addr, big.Add(balance.available, balance.locked))
@ -542,6 +543,7 @@ func createEmptyMinerState(ctx context.Context, t *testing.T, store adt.Store, o
emptyVestingFunds := miner.ConstructVestingFunds() emptyVestingFunds := miner.ConstructVestingFunds()
emptyVestingFundsCid, err := store.Put(store.Context(), emptyVestingFunds) emptyVestingFundsCid, err := store.Put(store.Context(), emptyVestingFunds)
require.NoError(t, err)
emptyDeadlines := miner.ConstructDeadlines(emptyDeadline) emptyDeadlines := miner.ConstructDeadlines(emptyDeadline)
emptyDeadlinesCid, err := store.Put(store.Context(), emptyDeadlines) emptyDeadlinesCid, err := store.Put(store.Context(), emptyDeadlines)

View File

@ -41,10 +41,11 @@ import (
"github.com/filecoin-project/lotus/node/repo" "github.com/filecoin-project/lotus/node/repo"
) )
var log = logging.Logger("gen")
const msgsPerBlock = 20 const msgsPerBlock = 20
//nolint:deadcode,varcheck
var log = logging.Logger("gen")
var ValidWpostForTesting = []abi.PoStProof{{ var ValidWpostForTesting = []abi.PoStProof{{
ProofBytes: []byte("valid proof"), ProofBytes: []byte("valid proof"),
}} }}
@ -605,7 +606,7 @@ func IsRoundWinner(ctx context.Context, ts *types.TipSet, round abi.ChainEpoch,
buf := new(bytes.Buffer) buf := new(bytes.Buffer)
if err := miner.MarshalCBOR(buf); err != nil { if err := miner.MarshalCBOR(buf); err != nil {
return nil, xerrors.Errorf("failed to cbor marshal address: %w") return nil, xerrors.Errorf("failed to cbor marshal address: %w", err)
} }
electionRand, err := store.DrawRandomness(brand.Data, crypto.DomainSeparationTag_ElectionProofProduction, round, buf.Bytes()) electionRand, err := store.DrawRandomness(brand.Data, crypto.DomainSeparationTag_ElectionProofProduction, round, buf.Bytes())

View File

@ -129,6 +129,9 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
return nil return nil
}) })
if err != nil {
return cid.Undef, xerrors.Errorf("mutating state: %w", err)
}
} }
// Add market funds // Add market funds
@ -217,9 +220,12 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
} }
err = vm.MutateState(ctx, builtin.RewardActorAddr, func(sct cbor.IpldStore, st *reward.State) error { err = vm.MutateState(ctx, builtin.RewardActorAddr, func(sct cbor.IpldStore, st *reward.State) error {
st = reward.ConstructState(qaPow) *st = *reward.ConstructState(qaPow)
return nil return nil
}) })
if err != nil {
return cid.Undef, xerrors.Errorf("mutating state: %w", err)
}
} }
for i, m := range miners { for i, m := range miners {
@ -244,7 +250,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
// we've added fake power for this sector above, remove it now // we've added fake power for this sector above, remove it now
err = vm.MutateState(ctx, builtin.StoragePowerActorAddr, func(cst cbor.IpldStore, st *power.State) error { err = vm.MutateState(ctx, builtin.StoragePowerActorAddr, func(cst cbor.IpldStore, st *power.State) error {
st.TotalQualityAdjPower = types.BigSub(st.TotalQualityAdjPower, sectorWeight) st.TotalQualityAdjPower = types.BigSub(st.TotalQualityAdjPower, sectorWeight) //nolint:scopelint
st.TotalRawBytePower = types.BigSub(st.TotalRawBytePower, types.NewInt(uint64(m.SectorSize))) st.TotalRawBytePower = types.BigSub(st.TotalRawBytePower, types.NewInt(uint64(m.SectorSize)))
return nil return nil
}) })

View File

@ -21,10 +21,6 @@ func mustEnc(i cbg.CBORMarshaler) []byte {
return enc return enc
} }
func doExec(ctx context.Context, vm *vm.VM, to, from address.Address, method abi.MethodNum, params []byte) ([]byte, error) {
return doExecValue(ctx, vm, to, from, types.NewInt(0), method, params)
}
func doExecValue(ctx context.Context, vm *vm.VM, to, from address.Address, value types.BigInt, method abi.MethodNum, params []byte) ([]byte, error) { func doExecValue(ctx context.Context, vm *vm.VM, to, from address.Address, value types.BigInt, method abi.MethodNum, params []byte) ([]byte, error) {
act, err := vm.StateTree().GetActor(from) act, err := vm.StateTree().GetActor(from)
if err != nil { if err != nil {

View File

@ -147,6 +147,7 @@ func TestAddFunds(t *testing.T) {
} }
for testCase, data := range testCases { for testCase, data := range testCases {
//nolint:scopelint
t.Run(testCase, func(t *testing.T) { t.Run(testCase, func(t *testing.T) {
ctx, cancel := context.WithTimeout(ctx, 5*time.Second) ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel() defer cancel()

View File

@ -56,7 +56,7 @@ func binomialCoefficient(n, k float64) float64 {
for d := 1.0; d <= k; d++ { for d := 1.0; d <= k; d++ {
r *= n r *= n
r /= d r /= d
n -= 1 n--
} }
return r return r
} }

View File

@ -43,7 +43,7 @@ func (mpp *mpoolProvider) PutMessage(m types.ChainMsg) (cid.Cid, error) {
} }
func (mpp *mpoolProvider) PubSubPublish(k string, v []byte) error { func (mpp *mpoolProvider) PubSubPublish(k string, v []byte) error {
return mpp.ps.Publish(k, v) return mpp.ps.Publish(k, v) //nolint
} }
func (mpp *mpoolProvider) StateGetActor(addr address.Address, ts *types.TipSet) (*types.Actor, error) { func (mpp *mpoolProvider) StateGetActor(addr address.Address, ts *types.TipSet) (*types.Actor, error) {

View File

@ -44,7 +44,7 @@ func SendHeadNotifs(nickname string) func(mctx helpers.MetricsCtx, lc fx.Lifecyc
} }
}() }()
go func() { go func() {
sub, err := ps.Subscribe(topic) sub, err := ps.Subscribe(topic) //nolint
if err != nil { if err != nil {
return return
} }
@ -116,6 +116,7 @@ func sendHeadNotifs(ctx context.Context, ps *pubsub.PubSub, topic string, chain
return err return err
} }
//nolint
if err := ps.Publish(topic, b); err != nil { if err := ps.Publish(topic, b); err != nil {
return err return err
} }

View File

@ -246,7 +246,7 @@ func (st *StateTree) DeleteActor(addr address.Address) error {
} }
func (st *StateTree) Flush(ctx context.Context) (cid.Cid, error) { func (st *StateTree) Flush(ctx context.Context) (cid.Cid, error) {
ctx, span := trace.StartSpan(ctx, "stateTree.Flush") ctx, span := trace.StartSpan(ctx, "stateTree.Flush") //nolint:staticcheck
defer span.End() defer span.End()
if len(st.snaps.layers) != 1 { if len(st.snaps.layers) != 1 {
return cid.Undef, xerrors.Errorf("tried to flush state tree with snapshots on the stack") return cid.Undef, xerrors.Errorf("tried to flush state tree with snapshots on the stack")
@ -268,7 +268,7 @@ func (st *StateTree) Flush(ctx context.Context) (cid.Cid, error) {
} }
func (st *StateTree) Snapshot(ctx context.Context) error { func (st *StateTree) Snapshot(ctx context.Context) error {
ctx, span := trace.StartSpan(ctx, "stateTree.SnapShot") ctx, span := trace.StartSpan(ctx, "stateTree.SnapShot") //nolint:staticcheck
defer span.End() defer span.End()
st.snaps.addLayer() st.snaps.addLayer()

View File

@ -42,7 +42,7 @@ func TestIndexSeeks(t *testing.T) {
if err := cs.PutTipSet(ctx, mock.TipSet(gen)); err != nil { if err := cs.PutTipSet(ctx, mock.TipSet(gen)); err != nil {
t.Fatal(err) t.Fatal(err)
} }
cs.SetGenesis(gen) assert.NoError(t, cs.SetGenesis(gen))
// Put 113 blocks from genesis // Put 113 blocks from genesis
for i := 0; i < 113; i++ { for i := 0; i < 113; i++ {

View File

@ -925,7 +925,7 @@ func (cs *ChainStore) LoadMessagesFromCids(cids []cid.Cid) ([]*types.Message, er
for i, c := range cids { for i, c := range cids {
m, err := cs.GetMessage(c) m, err := cs.GetMessage(c)
if err != nil { if err != nil {
return nil, xerrors.Errorf("failed to get message: (%s):%d: %w", err, c, i) return nil, xerrors.Errorf("failed to get message: (%s):%d: %w", c, i, err)
} }
msgs = append(msgs, m) msgs = append(msgs, m)
@ -939,7 +939,7 @@ func (cs *ChainStore) LoadSignedMessagesFromCids(cids []cid.Cid) ([]*types.Signe
for i, c := range cids { for i, c := range cids {
m, err := cs.GetSignedMessage(c) m, err := cs.GetSignedMessage(c)
if err != nil { if err != nil {
return nil, xerrors.Errorf("failed to get message: (%s):%d: %w", err, c, i) return nil, xerrors.Errorf("failed to get message: (%s):%d: %w", c, i, err)
} }
msgs = append(msgs, m) msgs = append(msgs, m)

View File

@ -292,10 +292,9 @@ func (bv *BlockValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub
log.Warnf("received block from unknown miner or miner that doesn't meet min power over pubsub; rejecting message") log.Warnf("received block from unknown miner or miner that doesn't meet min power over pubsub; rejecting message")
recordFailure("unknown_miner") recordFailure("unknown_miner")
return pubsub.ValidationReject return pubsub.ValidationReject
} else {
log.Warnf("cannot validate block message; unknown miner or miner that doesn't meet min power in unsynced chain")
return pubsub.ValidationIgnore
} }
log.Warnf("cannot validate block message; unknown miner or miner that doesn't meet min power in unsynced chain")
return pubsub.ValidationIgnore
} }
err = sigs.CheckBlockSignature(ctx, blk.Header, key) err = sigs.CheckBlockSignature(ctx, blk.Header, key)

View File

@ -343,12 +343,12 @@ func (sm *SyncManager) scheduleProcessResult(res *syncResult) {
sm.syncQueue.buckets = append(sm.syncQueue.buckets, relbucket) sm.syncQueue.buckets = append(sm.syncQueue.buckets, relbucket)
} }
return return
} else {
// TODO: this is the case where we try to sync a chain, and
// fail, and we have more blocks on top of that chain that
// have come in since. The question is, should we try to
// sync these? or just drop them?
} }
// TODO: this is the case where we try to sync a chain, and
// fail, and we have more blocks on top of that chain that
// have come in since. The question is, should we try to
// sync these? or just drop them?
log.Error("failed to sync chain but have new unconnected blocks from chain")
} }
if sm.nextSyncTarget == nil && !sm.syncQueue.Empty() { if sm.nextSyncTarget == nil && !sm.syncQueue.Empty() {

View File

@ -3,11 +3,12 @@ package chain_test
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/ipfs/go-cid"
"os" "os"
"testing" "testing"
"time" "time"
"github.com/ipfs/go-cid"
ds "github.com/ipfs/go-datastore" ds "github.com/ipfs/go-datastore"
logging "github.com/ipfs/go-log/v2" logging "github.com/ipfs/go-log/v2"
"github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/peer"
@ -36,7 +37,10 @@ import (
func init() { func init() {
build.InsecurePoStValidation = true build.InsecurePoStValidation = true
os.Setenv("TRUST_PARAMS", "1") err := os.Setenv("TRUST_PARAMS", "1")
if err != nil {
panic(err)
}
miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{ miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{
abi.RegisteredSealProof_StackedDrg2KiBV1: {}, abi.RegisteredSealProof_StackedDrg2KiBV1: {},
} }
@ -212,20 +216,6 @@ func (tu *syncTestUtil) mineNewBlock(src int, miners []int) {
tu.g.CurTipset = mts tu.g.CurTipset = mts
} }
func fblkToBlkMsg(fb *types.FullBlock) *types.BlockMsg {
out := &types.BlockMsg{
Header: fb.Header,
}
for _, msg := range fb.BlsMessages {
out.BlsMessages = append(out.BlsMessages, msg.Cid())
}
for _, msg := range fb.SecpkMessages {
out.SecpkMessages = append(out.SecpkMessages, msg.Cid())
}
return out
}
func (tu *syncTestUtil) addSourceNode(gen int) { func (tu *syncTestUtil) addSourceNode(gen int) {
if tu.genesis != nil { if tu.genesis != nil {
tu.t.Fatal("source node already exists") tu.t.Fatal("source node already exists")
@ -454,7 +444,7 @@ func (wpp badWpp) GenerateCandidates(context.Context, abi.PoStRandomness, uint64
func (wpp badWpp) ComputeProof(context.Context, []abi.SectorInfo, abi.PoStRandomness) ([]abi.PoStProof, error) { func (wpp badWpp) ComputeProof(context.Context, []abi.SectorInfo, abi.PoStRandomness) ([]abi.PoStProof, error) {
return []abi.PoStProof{ return []abi.PoStProof{
abi.PoStProof{ {
PoStProof: abi.RegisteredPoStProof_StackedDrgWinning2KiBV1, PoStProof: abi.RegisteredPoStProof_StackedDrgWinning2KiBV1,
ProofBytes: []byte("evil"), ProofBytes: []byte("evil"),
}, },
@ -587,7 +577,7 @@ func TestDuplicateNonce(t *testing.T) {
msgs := make([][]*types.SignedMessage, 2) msgs := make([][]*types.SignedMessage, 2)
// Each miner includes a message from the banker with the same nonce, but to different addresses // Each miner includes a message from the banker with the same nonce, but to different addresses
for k, _ := range msgs { for k := range msgs {
msgs[k] = []*types.SignedMessage{makeMsg(tu.g.Miners[k])} msgs[k] = []*types.SignedMessage{makeMsg(tu.g.Miners[k])}
} }

View File

@ -62,8 +62,8 @@ func (sm *SignedMessage) Serialize() ([]byte, error) {
return buf.Bytes(), nil return buf.Bytes(), nil
} }
func (m *SignedMessage) ChainLength() int { func (sm *SignedMessage) ChainLength() int {
ser, err := m.Serialize() ser, err := sm.Serialize()
if err != nil { if err != nil {
panic(err) panic(err)
} }

View File

@ -238,3 +238,7 @@ func (ts *TipSet) IsChildOf(parent *TipSet) bool {
// height for their processing logic at the moment to obviate it. // height for their processing logic at the moment to obviate it.
ts.height > parent.height ts.height > parent.height
} }
func (ts *TipSet) String() string {
return fmt.Sprintf("%v", ts.cids)
}

View File

@ -2,6 +2,7 @@ package validation
import ( import (
"context" "context"
"github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/specs-actors/actors/runtime" "github.com/filecoin-project/specs-actors/actors/runtime"
cbor "github.com/ipfs/go-ipld-cbor" cbor "github.com/ipfs/go-ipld-cbor"

View File

@ -2,9 +2,10 @@ package validation
import ( import (
"fmt" "fmt"
"github.com/minio/blake2b-simd"
"math/rand" "math/rand"
"github.com/minio/blake2b-simd"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-crypto" "github.com/filecoin-project/go-crypto"
acrypto "github.com/filecoin-project/specs-actors/actors/crypto" acrypto "github.com/filecoin-project/specs-actors/actors/crypto"

View File

@ -18,7 +18,7 @@ func LoadVector(t *testing.T, f string, out interface{}) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer fi.Close() defer fi.Close() //nolint:errcheck
if err := json.NewDecoder(fi).Decode(out); err != nil { if err := json.NewDecoder(fi).Decode(out); err != nil {
t.Fatal(err) t.Fatal(err)

View File

@ -410,8 +410,10 @@ type shimStateHandle struct {
func (ssh *shimStateHandle) Create(obj vmr.CBORMarshaler) { func (ssh *shimStateHandle) Create(obj vmr.CBORMarshaler) {
c := ssh.rt.Put(obj) c := ssh.rt.Put(obj)
// TODO: handle error below err := ssh.rt.stateCommit(EmptyObjectCid, c)
ssh.rt.stateCommit(EmptyObjectCid, c) if err != nil {
panic(fmt.Errorf("failed to commit state after creating object: %w", err))
}
} }
func (ssh *shimStateHandle) Readonly(obj vmr.CBORUnmarshaler) { func (ssh *shimStateHandle) Readonly(obj vmr.CBORUnmarshaler) {
@ -440,8 +442,10 @@ func (ssh *shimStateHandle) Transaction(obj vmr.CBORer, f func()) {
c := ssh.rt.Put(obj) c := ssh.rt.Put(obj)
// TODO: handle error below err = ssh.rt.stateCommit(baseState, c)
ssh.rt.stateCommit(baseState, c) if err != nil {
panic(fmt.Errorf("failed to commit state after transaction: %w", err))
}
} }
func (rt *Runtime) GetBalance(a address.Address) (types.BigInt, aerrors.ActorError) { func (rt *Runtime) GetBalance(a address.Address) (types.BigInt, aerrors.ActorError) {

View File

@ -254,6 +254,9 @@ func (vm *VM) send(ctx context.Context, msg *types.Message, parent *Runtime,
if aerr := rt.chargeGasSafe(rt.Pricelist().OnMethodInvocation(msg.Value, msg.Method)); aerr != nil { if aerr := rt.chargeGasSafe(rt.Pricelist().OnMethodInvocation(msg.Value, msg.Method)); aerr != nil {
return nil, aerrors.Wrap(aerr, "not enough gas for method invocation") return nil, aerrors.Wrap(aerr, "not enough gas for method invocation")
} }
// not charging any gas, just logging
//nolint:errcheck
defer rt.chargeGasSafe(newGasCharge("OnMethodInvocationDone", 0, 0)) defer rt.chargeGasSafe(newGasCharge("OnMethodInvocationDone", 0, 0))
if types.BigCmp(msg.Value, types.NewInt(0)) != 0 { if types.BigCmp(msg.Value, types.NewInt(0)) != 0 {

View File

@ -2,6 +2,7 @@ package cli
import ( import (
"fmt" "fmt"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"golang.org/x/xerrors" "golang.org/x/xerrors"

View File

@ -319,7 +319,7 @@ var chainSetHeadCmd = &cli.Command{
ts, err = api.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(cctx.Uint64("epoch")), types.EmptyTSK) ts, err = api.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(cctx.Uint64("epoch")), types.EmptyTSK)
} }
if ts == nil { if ts == nil {
ts, err = parseTipSet(api, ctx, cctx.Args().Slice()) ts, err = parseTipSet(ctx, api, cctx.Args().Slice())
} }
if err != nil { if err != nil {
return err return err
@ -337,7 +337,7 @@ var chainSetHeadCmd = &cli.Command{
}, },
} }
func parseTipSet(api api.FullNode, ctx context.Context, vals []string) (*types.TipSet, error) { func parseTipSet(ctx context.Context, api api.FullNode, vals []string) (*types.TipSet, error) {
var headers []*types.BlockHeader var headers []*types.BlockHeader
for _, c := range vals { for _, c := range vals {
blkc, err := cid.Decode(c) blkc, err := cid.Decode(c)

View File

@ -930,11 +930,11 @@ var clientQueryAskCmd = &cli.Command{
return xerrors.Errorf("failed to get peerID for miner: %w", err) return xerrors.Errorf("failed to get peerID for miner: %w", err)
} }
if peer.ID(*mi.PeerId) == peer.ID("SETME") { if *mi.PeerId == peer.ID("SETME") {
return fmt.Errorf("the miner hasn't initialized yet") return fmt.Errorf("the miner hasn't initialized yet")
} }
pid = peer.ID(*mi.PeerId) pid = *mi.PeerId
} }
ask, err := api.ClientQueryAsk(ctx, pid, maddr) ask, err := api.ClientQueryAsk(ctx, pid, maddr)
@ -1045,55 +1045,55 @@ var clientListDeals = &cli.Command{
fmt.Fprintf(w, "%s\t%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%d\t%s\n", d.LocalDeal.CreationTime.Format(time.Stamp), d.LocalDeal.ProposalCid, d.LocalDeal.DealID, d.LocalDeal.Provider, dealStateString(color, d.LocalDeal.State), onChain, slashed, d.LocalDeal.PieceCID, types.SizeStr(types.NewInt(d.LocalDeal.Size)), price, d.LocalDeal.Duration, d.LocalDeal.Message) fmt.Fprintf(w, "%s\t%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%d\t%s\n", d.LocalDeal.CreationTime.Format(time.Stamp), d.LocalDeal.ProposalCid, d.LocalDeal.DealID, d.LocalDeal.Provider, dealStateString(color, d.LocalDeal.State), onChain, slashed, d.LocalDeal.PieceCID, types.SizeStr(types.NewInt(d.LocalDeal.Size)), price, d.LocalDeal.Duration, d.LocalDeal.Message)
} }
return w.Flush() return w.Flush()
} else { }
w := tablewriter.New(tablewriter.Col("DealCid"),
tablewriter.Col("DealId"),
tablewriter.Col("Provider"),
tablewriter.Col("State"),
tablewriter.Col("On Chain?"),
tablewriter.Col("Slashed?"),
tablewriter.Col("PieceCID"),
tablewriter.Col("Size"),
tablewriter.Col("Price"),
tablewriter.Col("Duration"),
tablewriter.NewLineCol("Message"))
for _, d := range deals { w := tablewriter.New(tablewriter.Col("DealCid"),
propcid := d.LocalDeal.ProposalCid.String() tablewriter.Col("DealId"),
propcid = "..." + propcid[len(propcid)-8:] tablewriter.Col("Provider"),
tablewriter.Col("State"),
tablewriter.Col("On Chain?"),
tablewriter.Col("Slashed?"),
tablewriter.Col("PieceCID"),
tablewriter.Col("Size"),
tablewriter.Col("Price"),
tablewriter.Col("Duration"),
tablewriter.NewLineCol("Message"))
onChain := "N" for _, d := range deals {
if d.OnChainDealState.SectorStartEpoch != -1 { propcid := d.LocalDeal.ProposalCid.String()
onChain = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SectorStartEpoch) propcid = "..." + propcid[len(propcid)-8:]
}
slashed := "N" onChain := "N"
if d.OnChainDealState.SlashEpoch != -1 { if d.OnChainDealState.SectorStartEpoch != -1 {
slashed = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SlashEpoch) onChain = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SectorStartEpoch)
}
piece := d.LocalDeal.PieceCID.String()
piece = "..." + piece[len(piece)-8:]
price := types.FIL(types.BigMul(d.LocalDeal.PricePerEpoch, types.NewInt(d.LocalDeal.Duration)))
w.Write(map[string]interface{}{
"DealCid": propcid,
"DealId": d.LocalDeal.DealID,
"Provider": d.LocalDeal.Provider,
"State": dealStateString(color, d.LocalDeal.State),
"On Chain?": onChain,
"Slashed?": slashed,
"PieceCID": piece,
"Size": types.SizeStr(types.NewInt(d.LocalDeal.Size)),
"Price": price,
"Duration": d.LocalDeal.Duration,
"Message": d.LocalDeal.Message,
})
} }
return w.Flush(os.Stdout) slashed := "N"
if d.OnChainDealState.SlashEpoch != -1 {
slashed = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SlashEpoch)
}
piece := d.LocalDeal.PieceCID.String()
piece = "..." + piece[len(piece)-8:]
price := types.FIL(types.BigMul(d.LocalDeal.PricePerEpoch, types.NewInt(d.LocalDeal.Duration)))
w.Write(map[string]interface{}{
"DealCid": propcid,
"DealId": d.LocalDeal.DealID,
"Provider": d.LocalDeal.Provider,
"State": dealStateString(color, d.LocalDeal.State),
"On Chain?": onChain,
"Slashed?": slashed,
"PieceCID": piece,
"Size": types.SizeStr(types.NewInt(d.LocalDeal.Size)),
"Price": price,
"Duration": d.LocalDeal.Duration,
"Message": d.LocalDeal.Message,
})
} }
return w.Flush(os.Stdout)
}, },
} }
@ -1318,7 +1318,7 @@ func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChann
for _, channel := range sendingChannels { for _, channel := range sendingChannels {
w.Write(toChannelOutput(color, "Sending To", channel)) w.Write(toChannelOutput(color, "Sending To", channel))
} }
w.Flush(out) w.Flush(out) //nolint:errcheck
fmt.Fprintf(out, "\nReceiving Channels\n\n") fmt.Fprintf(out, "\nReceiving Channels\n\n")
w = tablewriter.New(tablewriter.Col("ID"), w = tablewriter.New(tablewriter.Col("ID"),
@ -1332,7 +1332,7 @@ func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChann
for _, channel := range receivingChannels { for _, channel := range receivingChannels {
w.Write(toChannelOutput(color, "Receiving From", channel)) w.Write(toChannelOutput(color, "Receiving From", channel))
} }
w.Flush(out) w.Flush(out) //nolint:errcheck
} }
func channelStatusString(useColor bool, status datatransfer.Status) string { func channelStatusString(useColor bool, status datatransfer.Status) string {

View File

@ -39,7 +39,7 @@ func RunApp(app *cli.App) {
} }
var phe *PrintHelpErr var phe *PrintHelpErr
if xerrors.As(err, &phe) { if xerrors.As(err, &phe) {
cli.ShowCommandHelp(phe.Ctx, phe.Ctx.Command.Name) _ = cli.ShowCommandHelp(phe.Ctx, phe.Ctx.Command.Name)
} }
os.Exit(1) os.Exit(1)
} }

View File

@ -346,7 +346,7 @@ var mpoolReplaceCmd = &cli.Command{
if err != nil { if err != nil {
return fmt.Errorf("parsing gas-premium: %w", err) return fmt.Errorf("parsing gas-premium: %w", err)
} }
// TODO: estiamte fee cap here // TODO: estimate fee cap here
msg.GasFeeCap, err = types.BigFromString(cctx.String("gas-feecap")) msg.GasFeeCap, err = types.BigFromString(cctx.String("gas-feecap"))
if err != nil { if err != nil {
return fmt.Errorf("parsing gas-feecap: %w", err) return fmt.Errorf("parsing gas-feecap: %w", err)

View File

@ -180,7 +180,7 @@ var netFindPeer = &cli.Command{
return nil return nil
} }
pid, err := peer.IDB58Decode(cctx.Args().First()) pid, err := peer.Decode(cctx.Args().First())
if err != nil { if err != nil {
return err return err
} }

View File

@ -44,7 +44,7 @@ var PprofGoroutines = &cli.Command{
addr = "http://" + addr + "/debug/pprof/goroutine?debug=2" addr = "http://" + addr + "/debug/pprof/goroutine?debug=2"
r, err := http.Get(addr) r, err := http.Get(addr) //nolint:gosec
if err != nil { if err != nil {
return err return err
} }

View File

@ -353,6 +353,9 @@ var stateReplaySetCmd = &cli.Command{
} }
ts, err = types.NewTipSet(headers) ts, err = types.NewTipSet(headers)
if err != nil {
return err
}
} else { } else {
var r *api.MsgLookup var r *api.MsgLookup
r, err = fapi.StateWaitMsg(ctx, mcid, build.MessageConfidence) r, err = fapi.StateWaitMsg(ctx, mcid, build.MessageConfidence)
@ -365,9 +368,9 @@ var stateReplaySetCmd = &cli.Command{
return xerrors.Errorf("loading tipset: %w", err) return xerrors.Errorf("loading tipset: %w", err)
} }
ts, err = fapi.ChainGetTipSet(ctx, childTs.Parents()) ts, err = fapi.ChainGetTipSet(ctx, childTs.Parents())
} if err != nil {
if err != nil { return err
return err }
} }
} }
@ -1499,7 +1502,7 @@ func parseParamsForMethod(act cid.Cid, method uint64, args []string) ([]byte, er
} }
p.Elem().Field(i).Set(reflect.ValueOf(val)) p.Elem().Field(i).Set(reflect.ValueOf(val))
case reflect.TypeOf(peer.ID("")): case reflect.TypeOf(peer.ID("")):
pid, err := peer.IDB58Decode(args[i]) pid, err := peer.Decode(args[i])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse peer ID: %s", err) return nil, fmt.Errorf("failed to parse peer ID: %s", err)
} }
@ -1584,6 +1587,9 @@ var stateMarketBalanceCmd = &cli.Command{
} }
balance, err := api.StateMarketBalance(ctx, addr, ts.Key()) balance, err := api.StateMarketBalance(ctx, addr, ts.Key())
if err != nil {
return err
}
fmt.Printf("Escrow: %s\n", types.FIL(balance.Escrow)) fmt.Printf("Escrow: %s\n", types.FIL(balance.Escrow))
fmt.Printf("Locked: %s\n", types.FIL(balance.Locked)) fmt.Printf("Locked: %s\n", types.FIL(balance.Locked))

View File

@ -385,10 +385,9 @@ var walletVerify = &cli.Command{
if api.WalletVerify(ctx, addr, msg, &sig) { if api.WalletVerify(ctx, addr, msg, &sig) {
fmt.Println("valid") fmt.Println("valid")
return nil return nil
} else {
fmt.Println("invalid")
return NewCliError("CLI Verify called with invalid signature")
} }
fmt.Println("invalid")
return NewCliError("CLI Verify called with invalid signature")
}, },
} }

View File

@ -214,7 +214,7 @@ func countGasCosts(et *types.ExecutionTrace) (int64, int64) {
} }
for _, sub := range et.Subcalls { for _, sub := range et.Subcalls {
c, v := countGasCosts(&sub) c, v := countGasCosts(&sub) //nolint
cgas += c cgas += c
vgas += v vgas += v
} }
@ -222,24 +222,6 @@ func countGasCosts(et *types.ExecutionTrace) (int64, int64) {
return cgas, vgas return cgas, vgas
} }
func compStats(vals []float64) (float64, float64) {
var sum float64
for _, v := range vals {
sum += v
}
av := sum / float64(len(vals))
var varsum float64
for _, v := range vals {
delta := av - v
varsum += delta * delta
}
return av, math.Sqrt(varsum / float64(len(vals)))
}
type stats struct { type stats struct {
timeTaken meanVar timeTaken meanVar
gasRatio meanVar gasRatio meanVar
@ -264,20 +246,20 @@ func (cov1 *covar) VarianceX() float64 {
return cov1.m2x / (cov1.n - 1) return cov1.m2x / (cov1.n - 1)
} }
func (v1 *covar) StddevX() float64 { func (cov1 *covar) StddevX() float64 {
return math.Sqrt(v1.VarianceX()) return math.Sqrt(cov1.VarianceX())
} }
func (cov1 *covar) VarianceY() float64 { func (cov1 *covar) VarianceY() float64 {
return cov1.m2y / (cov1.n - 1) return cov1.m2y / (cov1.n - 1)
} }
func (v1 *covar) StddevY() float64 { func (cov1 *covar) StddevY() float64 {
return math.Sqrt(v1.VarianceY()) return math.Sqrt(cov1.VarianceY())
} }
func (cov1 *covar) AddPoint(x, y float64) { func (cov1 *covar) AddPoint(x, y float64) {
cov1.n += 1 cov1.n++
dx := x - cov1.meanX dx := x - cov1.meanX
cov1.meanX += dx / cov1.n cov1.meanX += dx / cov1.n
@ -344,7 +326,7 @@ type meanVar struct {
func (v1 *meanVar) AddPoint(value float64) { func (v1 *meanVar) AddPoint(value float64) {
// based on https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm // based on https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
v1.n += 1 v1.n++
delta := value - v1.mean delta := value - v1.mean
v1.mean += delta / v1.n v1.mean += delta / v1.n
delta2 := value - v1.mean delta2 := value - v1.mean
@ -481,7 +463,7 @@ var importAnalyzeCmd = &cli.Command{
} }
go func() { go func() {
http.ListenAndServe("localhost:6060", nil) http.ListenAndServe("localhost:6060", nil) //nolint:errcheck
}() }()
fi, err := os.Open(cctx.Args().First()) fi, err := os.Open(cctx.Args().First())

View File

@ -159,7 +159,7 @@ func (p *Processor) persistRewardActors(ctx context.Context, rewards []rewardAct
log.Debugw("Persisted Reward Actors", "duration", time.Since(start).String()) log.Debugw("Persisted Reward Actors", "duration", time.Since(start).String())
}() }()
grp, ctx := errgroup.WithContext(ctx) grp, ctx := errgroup.WithContext(ctx) //nolint
grp.Go(func() error { grp.Go(func() error {
if err := p.storeChainPower(rewards); err != nil { if err := p.storeChainPower(rewards); err != nil {

View File

@ -29,7 +29,7 @@ var runCmd = &cli.Command{
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
go func() { go func() {
http.ListenAndServe(":6060", nil) http.ListenAndServe(":6060", nil) //nolint:errcheck
}() }()
ll := cctx.String("log-level") ll := cctx.String("log-level")
if err := logging.SetLogLevel("*", ll); err != nil { if err := logging.SetLogLevel("*", ll); err != nil {

View File

@ -49,11 +49,11 @@ func setupTopMinerByBaseRewardSchema(ctx context.Context, db *sql.DB) error {
order by 1 desc order by 1 desc
limit 1; limit 1;
`); err != nil { `); err != nil {
return xerrors.Errorf("create top_miner_by_base_reward views", err) return xerrors.Errorf("create top_miner_by_base_reward views: %w", err)
} }
if err := tx.Commit(); err != nil { if err := tx.Commit(); err != nil {
return xerrors.Errorf("commiting top_miner_by_base_reward views", err) return xerrors.Errorf("committing top_miner_by_base_reward views; %w", err)
} }
return nil return nil
} }

View File

@ -25,7 +25,7 @@ func PrepareScheduler(db *sql.DB) *Scheduler {
func (s *Scheduler) setupSchema(ctx context.Context) error { func (s *Scheduler) setupSchema(ctx context.Context) error {
if err := setupTopMinerByBaseRewardSchema(ctx, s.db); err != nil { if err := setupTopMinerByBaseRewardSchema(ctx, s.db); err != nil {
return xerrors.Errorf("setup top miners by reward schema", err) return xerrors.Errorf("setup top miners by reward schema: %w", err)
} }
return nil return nil
} }
@ -35,14 +35,14 @@ func (s *Scheduler) Start(ctx context.Context) {
log.Debug("Starting Scheduler") log.Debug("Starting Scheduler")
if err := s.setupSchema(ctx); err != nil { if err := s.setupSchema(ctx); err != nil {
log.Fatalw("applying scheduling schema", err) log.Fatalw("applying scheduling schema", "error", err)
} }
go func() { go func() {
// run once on start after schema has initialized // run once on start after schema has initialized
time.Sleep(5 * time.Second) time.Sleep(5 * time.Second)
if err := refreshTopMinerByBaseReward(ctx, s.db); err != nil { if err := refreshTopMinerByBaseReward(ctx, s.db); err != nil {
log.Errorf(err.Error()) log.Errorw("failed to refresh top miner", "error", err)
} }
refreshTopMinerCh := time.NewTicker(30 * time.Second) refreshTopMinerCh := time.NewTicker(30 * time.Second)
defer refreshTopMinerCh.Stop() defer refreshTopMinerCh.Stop()
@ -50,7 +50,7 @@ func (s *Scheduler) Start(ctx context.Context) {
select { select {
case <-refreshTopMinerCh.C: case <-refreshTopMinerCh.C:
if err := refreshTopMinerByBaseReward(ctx, s.db); err != nil { if err := refreshTopMinerByBaseReward(ctx, s.db); err != nil {
log.Errorf(err.Error()) log.Errorw("failed to refresh top miner", "error", err)
} }
case <-ctx.Done(): case <-ctx.Done():
return return

View File

@ -47,7 +47,7 @@ var supportedSectors struct {
} }
func init() { func init() {
for supportedSector, _ := range miner.SupportedProofTypes { for supportedSector := range miner.SupportedProofTypes {
sectorSize, err := supportedSector.SectorSize() sectorSize, err := supportedSector.SectorSize()
if err != nil { if err != nil {
panic(err) panic(err)
@ -207,24 +207,24 @@ type handler struct {
func (h *handler) minerhtml(w http.ResponseWriter, r *http.Request) { func (h *handler) minerhtml(w http.ResponseWriter, r *http.Request) {
f, err := rice.MustFindBox("site").Open("_miner.html") f, err := rice.MustFindBox("site").Open("_miner.html")
if err != nil { if err != nil {
w.WriteHeader(500) http.Error(w, err.Error(), http.StatusInternalServerError)
_, _ = w.Write([]byte(err.Error()))
return return
} }
tmpl, err := ioutil.ReadAll(f) tmpl, err := ioutil.ReadAll(f)
if err != nil { if err != nil {
w.WriteHeader(500) http.Error(w, err.Error(), http.StatusInternalServerError)
_, _ = w.Write([]byte(err.Error()))
return return
} }
var executedTmpl bytes.Buffer var executedTmpl bytes.Buffer
t, err := template.New("miner.html").Parse(string(tmpl)) t, err := template.New("miner.html").Parse(string(tmpl))
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
if err := t.Execute(&executedTmpl, supportedSectors); err != nil { if err := t.Execute(&executedTmpl, supportedSectors); err != nil {
w.WriteHeader(500) http.Error(w, err.Error(), http.StatusInternalServerError)
_, _ = w.Write([]byte(err.Error()))
return return
} }
@ -238,8 +238,7 @@ func (h *handler) minerhtml(w http.ResponseWriter, r *http.Request) {
func (h *handler) send(w http.ResponseWriter, r *http.Request) { func (h *handler) send(w http.ResponseWriter, r *http.Request) {
to, err := address.NewFromString(r.FormValue("address")) to, err := address.NewFromString(r.FormValue("address"))
if err != nil { if err != nil {
w.WriteHeader(400) http.Error(w, err.Error(), http.StatusBadRequest)
_, _ = w.Write([]byte(err.Error()))
return return
} }
@ -282,8 +281,7 @@ func (h *handler) send(w http.ResponseWriter, r *http.Request) {
To: to, To: to,
}, nil) }, nil)
if err != nil { if err != nil {
w.WriteHeader(400) http.Error(w, err.Error(), http.StatusBadRequest)
_, _ = w.Write([]byte(err.Error()))
return return
} }
@ -293,15 +291,15 @@ func (h *handler) send(w http.ResponseWriter, r *http.Request) {
func (h *handler) mkminer(w http.ResponseWriter, r *http.Request) { func (h *handler) mkminer(w http.ResponseWriter, r *http.Request) {
owner, err := address.NewFromString(r.FormValue("address")) owner, err := address.NewFromString(r.FormValue("address"))
if err != nil { if err != nil {
w.WriteHeader(400) http.Error(w, err.Error(), http.StatusBadRequest)
_, _ = w.Write([]byte(err.Error()))
return return
} }
if owner.Protocol() != address.BLS { if owner.Protocol() != address.BLS {
w.WriteHeader(400) http.Error(w,
_, _ = w.Write([]byte("Miner address must use BLS. A BLS address starts with the prefix 't3'.")) "Miner address must use BLS. A BLS address starts with the prefix 't3'."+
_, _ = w.Write([]byte("Please create a BLS address by running \"lotus wallet new bls\" while connected to a Lotus node.")) "Please create a BLS address by running \"lotus wallet new bls\" while connected to a Lotus node.",
http.StatusBadRequest)
return return
} }
@ -346,16 +344,14 @@ func (h *handler) mkminer(w http.ResponseWriter, r *http.Request) {
To: owner, To: owner,
}, nil) }, nil)
if err != nil { if err != nil {
w.WriteHeader(400) http.Error(w, "pushfunds: "+err.Error(), http.StatusBadRequest)
w.Write([]byte("pushfunds: " + err.Error()))
return return
} }
log.Infof("%s: push funds %s", owner, smsg.Cid()) log.Infof("%s: push funds %s", owner, smsg.Cid())
spt, err := ffiwrapper.SealProofTypeFromSectorSize(abi.SectorSize(ssize)) spt, err := ffiwrapper.SealProofTypeFromSectorSize(abi.SectorSize(ssize))
if err != nil { if err != nil {
w.WriteHeader(400) http.Error(w, "sealprooftype: "+err.Error(), http.StatusBadRequest)
w.Write([]byte("sealprooftype: " + err.Error()))
return return
} }
@ -366,8 +362,7 @@ func (h *handler) mkminer(w http.ResponseWriter, r *http.Request) {
Peer: abi.PeerID(h.defaultMinerPeer), Peer: abi.PeerID(h.defaultMinerPeer),
}) })
if err != nil { if err != nil {
w.WriteHeader(400) http.Error(w, err.Error(), http.StatusBadRequest)
w.Write([]byte(err.Error()))
return return
} }
@ -382,66 +377,58 @@ func (h *handler) mkminer(w http.ResponseWriter, r *http.Request) {
signed, err := h.api.MpoolPushMessage(r.Context(), createStorageMinerMsg, nil) signed, err := h.api.MpoolPushMessage(r.Context(), createStorageMinerMsg, nil)
if err != nil { if err != nil {
w.WriteHeader(400) http.Error(w, err.Error(), http.StatusBadRequest)
w.Write([]byte(err.Error()))
return return
} }
log.Infof("%s: create miner msg: %s", owner, signed.Cid()) log.Infof("%s: create miner msg: %s", owner, signed.Cid())
http.Redirect(w, r, fmt.Sprintf("/wait.html?f=%s&m=%s&o=%s", signed.Cid(), smsg.Cid(), owner), 303) http.Redirect(w, r, fmt.Sprintf("/wait.html?f=%s&m=%s&o=%s", signed.Cid(), smsg.Cid(), owner), http.StatusSeeOther)
} }
func (h *handler) msgwait(w http.ResponseWriter, r *http.Request) { func (h *handler) msgwait(w http.ResponseWriter, r *http.Request) {
c, err := cid.Parse(r.FormValue("cid")) c, err := cid.Parse(r.FormValue("cid"))
if err != nil { if err != nil {
w.WriteHeader(400) http.Error(w, err.Error(), http.StatusBadRequest)
w.Write([]byte(err.Error()))
return return
} }
mw, err := h.api.StateWaitMsg(r.Context(), c, build.MessageConfidence) mw, err := h.api.StateWaitMsg(r.Context(), c, build.MessageConfidence)
if err != nil { if err != nil {
w.WriteHeader(400) http.Error(w, err.Error(), http.StatusBadRequest)
w.Write([]byte(err.Error()))
return return
} }
if mw.Receipt.ExitCode != 0 { if mw.Receipt.ExitCode != 0 {
w.WriteHeader(400) http.Error(w, err.Error(), http.StatusBadRequest)
w.Write([]byte(xerrors.Errorf("create miner failed: exit code %d", mw.Receipt.ExitCode).Error()))
return return
} }
w.WriteHeader(200) w.WriteHeader(http.StatusOK)
} }
func (h *handler) msgwaitaddr(w http.ResponseWriter, r *http.Request) { func (h *handler) msgwaitaddr(w http.ResponseWriter, r *http.Request) {
c, err := cid.Parse(r.FormValue("cid")) c, err := cid.Parse(r.FormValue("cid"))
if err != nil { if err != nil {
w.WriteHeader(400) http.Error(w, err.Error(), http.StatusBadRequest)
w.Write([]byte(err.Error()))
return return
} }
mw, err := h.api.StateWaitMsg(r.Context(), c, build.MessageConfidence) mw, err := h.api.StateWaitMsg(r.Context(), c, build.MessageConfidence)
if err != nil { if err != nil {
w.WriteHeader(400) http.Error(w, err.Error(), http.StatusBadRequest)
w.Write([]byte(err.Error()))
return return
} }
if mw.Receipt.ExitCode != 0 { if mw.Receipt.ExitCode != 0 {
w.WriteHeader(400) http.Error(w, xerrors.Errorf("create miner failed: exit code %d", mw.Receipt.ExitCode).Error(), http.StatusBadRequest)
w.Write([]byte(xerrors.Errorf("create miner failed: exit code %d", mw.Receipt.ExitCode).Error()))
return return
} }
w.WriteHeader(200) w.WriteHeader(http.StatusOK)
var ma power.CreateMinerReturn var ma power.CreateMinerReturn
if err := ma.UnmarshalCBOR(bytes.NewReader(mw.Receipt.Return)); err != nil { if err := ma.UnmarshalCBOR(bytes.NewReader(mw.Receipt.Return)); err != nil {
log.Errorf("%w", err) log.Errorf("%w", err)
w.WriteHeader(400) http.Error(w, err.Error(), http.StatusBadRequest)
w.Write([]byte(err.Error()))
return return
} }

View File

@ -54,7 +54,12 @@ func main() {
if err != nil { if err != nil {
return err return err
} }
defer fi.Close() defer func() {
err2 := fi.Close()
if err == nil {
err = err2
}
}()
b, err := json.Marshal(ki) b, err := json.Marshal(ki)
if err != nil { if err != nil {

View File

@ -142,7 +142,7 @@ var runCmd = &cli.Command{
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
go func() { go func() {
http.ListenAndServe(":6060", nil) http.ListenAndServe(":6060", nil) //nolint:errcheck
}() }()
ctx := context.Background() ctx := context.Background()
@ -445,7 +445,7 @@ func (r *refunder) Refund(ctx context.Context, tipset *types.TipSet, refunds *Mi
// Calculate the minimum balance as the total refund we need to issue plus 5% to cover fees // Calculate the minimum balance as the total refund we need to issue plus 5% to cover fees
minBalance := types.BigAdd(refundSum, types.BigDiv(refundSum, types.NewInt(500))) minBalance := types.BigAdd(refundSum, types.BigDiv(refundSum, types.NewInt(500)))
if balance.LessThan(minBalance) { if balance.LessThan(minBalance) {
log.Errorw("not sufficent funds to cover refunds", "balance", balance, "refund_sum", refundSum, "minimum_required", minBalance) log.Errorw("not sufficient funds to cover refunds", "balance", balance, "refund_sum", refundSum, "minimum_required", minBalance)
return xerrors.Errorf("wallet does not have enough balance to cover refund") return xerrors.Errorf("wallet does not have enough balance to cover refund")
} }
@ -467,24 +467,24 @@ func (r *refunder) Refund(ctx context.Context, tipset *types.TipSet, refunds *Mi
return nil return nil
} }
type repo struct { type Repo struct {
last abi.ChainEpoch last abi.ChainEpoch
path string path string
} }
func NewRepo(path string) (*repo, error) { func NewRepo(path string) (*Repo, error) {
path, err := homedir.Expand(path) path, err := homedir.Expand(path)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &repo{ return &Repo{
last: 0, last: 0,
path: path, path: path,
}, nil }, nil
} }
func (r *repo) exists() (bool, error) { func (r *Repo) exists() (bool, error) {
_, err := os.Stat(r.path) _, err := os.Stat(r.path)
notexist := os.IsNotExist(err) notexist := os.IsNotExist(err)
if notexist { if notexist {
@ -494,7 +494,7 @@ func (r *repo) exists() (bool, error) {
} }
func (r *repo) init() error { func (r *Repo) init() error {
exist, err := r.exists() exist, err := r.exists()
if err != nil { if err != nil {
return err return err
@ -511,7 +511,7 @@ func (r *repo) init() error {
return nil return nil
} }
func (r *repo) Open() (err error) { func (r *Repo) Open() (err error) {
if err = r.init(); err != nil { if err = r.init(); err != nil {
return return
} }
@ -542,11 +542,11 @@ func (r *repo) Open() (err error) {
return return
} }
func (r *repo) Height() abi.ChainEpoch { func (r *Repo) Height() abi.ChainEpoch {
return r.last return r.last
} }
func (r *repo) SetHeight(last abi.ChainEpoch) (err error) { func (r *Repo) SetHeight(last abi.ChainEpoch) (err error) {
r.last = last r.last = last
var f *os.File var f *os.File
f, err = os.OpenFile(filepath.Join(r.path, "height"), os.O_RDWR, 0644) f, err = os.OpenFile(filepath.Join(r.path, "height"), os.O_RDWR, 0644)

View File

@ -185,7 +185,7 @@ var runCmd = &cli.Command{
return err return err
} }
if v.APIVersion != build.APIVersion { if v.APIVersion != build.APIVersion {
return xerrors.Errorf("lotus-miner API version doesn't match: local: ", api.Version{APIVersion: build.APIVersion}) return xerrors.Errorf("lotus-miner API version doesn't match: local: %s", api.Version{APIVersion: build.APIVersion})
} }
log.Infof("Remote version %s", v) log.Infof("Remote version %s", v)
@ -420,10 +420,11 @@ func watchMinerConn(ctx context.Context, cctx *cli.Context, nodeApi api.StorageM
log.Errorf("getting executable for auto-restart: %+v", err) log.Errorf("getting executable for auto-restart: %+v", err)
} }
log.Sync() _ = log.Sync()
// TODO: there are probably cleaner/more graceful ways to restart, // TODO: there are probably cleaner/more graceful ways to restart,
// but this is good enough for now (FSM can recover from the mess this creates) // but this is good enough for now (FSM can recover from the mess this creates)
//nolint:gosec
if err := syscall.Exec(exe, []string{exe, if err := syscall.Exec(exe, []string{exe,
fmt.Sprintf("--worker-repo=%s", cctx.String("worker-repo")), fmt.Sprintf("--worker-repo=%s", cctx.String("worker-repo")),
fmt.Sprintf("--miner-repo=%s", cctx.String("miner-repo")), fmt.Sprintf("--miner-repo=%s", cctx.String("miner-repo")),
@ -450,7 +451,7 @@ func extractRoutableIP(timeout time.Duration) (string, error) {
env, ok := os.LookupEnv(minerMultiAddrKey) env, ok := os.LookupEnv(minerMultiAddrKey)
if !ok { if !ok {
// TODO remove after deprecation period // TODO remove after deprecation period
env, ok = os.LookupEnv(deprecatedMinerMultiAddrKey) _, ok = os.LookupEnv(deprecatedMinerMultiAddrKey)
if ok { if ok {
log.Warnf("Using a deprecated env(%s) value, please use env(%s) instead.", deprecatedMinerMultiAddrKey, minerMultiAddrKey) log.Warnf("Using a deprecated env(%s) value, please use env(%s) instead.", deprecatedMinerMultiAddrKey, minerMultiAddrKey)
} }
@ -461,7 +462,7 @@ func extractRoutableIP(timeout time.Duration) (string, error) {
if err != nil { if err != nil {
return "", err return "", err
} }
defer conn.Close() defer conn.Close() //nolint:errcheck
localAddr := conn.LocalAddr().(*net.TCPAddr) localAddr := conn.LocalAddr().(*net.TCPAddr)

View File

@ -114,6 +114,9 @@ var preSealCmd = &cli.Command{
return err return err
} }
kb, err := hex.DecodeString(string(kh)) kb, err := hex.DecodeString(string(kh))
if err != nil {
return err
}
if err := json.Unmarshal(kb, k); err != nil { if err := json.Unmarshal(kb, k); err != nil {
return err return err
} }

View File

@ -69,7 +69,7 @@ var keyinfoImportCmd = &cli.Command{
if err != nil { if err != nil {
return err return err
} }
defer inputFile.Close() defer inputFile.Close() //nolint:errcheck
input = bufio.NewReader(inputFile) input = bufio.NewReader(inputFile)
} }
@ -98,7 +98,7 @@ var keyinfoImportCmd = &cli.Command{
return err return err
} }
defer lkrepo.Close() defer lkrepo.Close() //nolint:errcheck
keystore, err := lkrepo.KeyStore() keystore, err := lkrepo.KeyStore()
if err != nil { if err != nil {
@ -150,7 +150,7 @@ var keyinfoInfoCmd = &cli.Command{
The 'format' flag takes a golang text/template template as its value. The 'format' flag takes a golang text/template template as its value.
The following fields can be retrived through this command The following fields can be retrieved through this command
Type Type
Address Address
PublicKey PublicKey
@ -159,7 +159,7 @@ var keyinfoInfoCmd = &cli.Command{
Examples Examples
Retreive the address of a lotus wallet Retrieve the address of a lotus wallet
lotus-shed keyinfo info --format '{{ .Address }}' wallet.keyinfo lotus-shed keyinfo info --format '{{ .Address }}' wallet.keyinfo
`, `,
Flags: []cli.Flag{ Flags: []cli.Flag{
@ -181,7 +181,7 @@ var keyinfoInfoCmd = &cli.Command{
if err != nil { if err != nil {
return err return err
} }
defer inputFile.Close() defer inputFile.Close() //nolint:errcheck
input = bufio.NewReader(inputFile) input = bufio.NewReader(inputFile)
} }

View File

@ -126,7 +126,9 @@ var infoAllCmd = &cli.Command{
fs := &flag.FlagSet{} fs := &flag.FlagSet{}
for _, f := range sectorsStatusCmd.Flags { for _, f := range sectorsStatusCmd.Flags {
f.Apply(fs) if err := f.Apply(fs); err != nil {
return err
}
} }
if err := fs.Parse([]string{"--log", "--on-chain-info", fmt.Sprint(s)}); err != nil { if err := fs.Parse([]string{"--log", "--on-chain-info", fmt.Sprint(s)}); err != nil {
return err return err

View File

@ -53,7 +53,10 @@ var runCmd = &cli.Command{
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
if !cctx.Bool("enable-gpu-proving") { if !cctx.Bool("enable-gpu-proving") {
os.Setenv("BELLMAN_NO_GPU", "true") err := os.Setenv("BELLMAN_NO_GPU", "true")
if err != nil {
return err
}
} }
nodeApi, ncloser, err := lcli.GetFullNodeAPI(cctx) nodeApi, ncloser, err := lcli.GetFullNodeAPI(cctx)

View File

@ -3,13 +3,14 @@ package main
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"golang.org/x/xerrors"
"os" "os"
"sort" "sort"
"strings" "strings"
"text/tabwriter" "text/tabwriter"
"time" "time"
"golang.org/x/xerrors"
"github.com/fatih/color" "github.com/fatih/color"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"

View File

@ -2,13 +2,14 @@ package main
import ( import (
"fmt" "fmt"
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
"os" "os"
"sort" "sort"
"strconv" "strconv"
"text/tabwriter" "text/tabwriter"
"time" "time"
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"golang.org/x/xerrors" "golang.org/x/xerrors"

View File

@ -106,10 +106,11 @@ func handler(ps *pubsub.PubSub) func(w http.ResponseWriter, r *http.Request) {
return return
} }
sub, err := ps.Subscribe(topic) sub, err := ps.Subscribe(topic) //nolint
if err != nil { if err != nil {
return return
} }
defer sub.Cancel() //nolint:errcheck
fmt.Println("new conn") fmt.Println("new conn")

View File

@ -118,7 +118,7 @@ func openPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialFil
trailerLen := binary.LittleEndian.Uint32(tlen[:]) trailerLen := binary.LittleEndian.Uint32(tlen[:])
expectLen := int64(trailerLen) + int64(len(tlen)) + int64(maxPieceSize) expectLen := int64(trailerLen) + int64(len(tlen)) + int64(maxPieceSize)
if expectLen != st.Size() { if expectLen != st.Size() {
return xerrors.Errorf("file '%d' has inconsistent length; has %d bytes; expected %d (%d trailer, %d sector data)", path, st.Size(), expectLen, int64(trailerLen)+int64(len(tlen)), maxPieceSize) return xerrors.Errorf("file '%s' has inconsistent length; has %d bytes; expected %d (%d trailer, %d sector data)", path, st.Size(), expectLen, int64(trailerLen)+int64(len(tlen)), maxPieceSize)
} }
if trailerLen > veryLargeRle { if trailerLen > veryLargeRle {
log.Warnf("Partial file '%s' has a VERY large trailer with %d bytes", path, trailerLen) log.Warnf("Partial file '%s' has a VERY large trailer with %d bytes", path, trailerLen)

View File

@ -12,6 +12,8 @@ func Statfs(path string) (FsStat, error) {
return FsStat{}, xerrors.Errorf("statfs: %w", err) return FsStat{}, xerrors.Errorf("statfs: %w", err)
} }
// force int64 to handle platform specific differences
//nolint:unconvert
return FsStat{ return FsStat{
Capacity: int64(stat.Blocks) * int64(stat.Bsize), Capacity: int64(stat.Blocks) * int64(stat.Bsize),
Available: int64(stat.Bavail) * int64(stat.Bsize), Available: int64(stat.Bavail) * int64(stat.Bsize),

View File

@ -438,7 +438,7 @@ func (m mockVerif) VerifyWindowPoSt(ctx context.Context, info abi.WindowPoStVeri
} }
if !bytes.Equal(b1, b2) { if !bytes.Equal(b1, b2) {
return false, xerrors.Errorf("proven and challenged sector sets didn't match: %s != !s", string(b1), string(b2)) return false, xerrors.Errorf("proven and challenged sector sets didn't match: %s != %s", string(b1), string(b2))
} }
} }

View File

@ -36,6 +36,8 @@ func ExtractTar(body io.Reader, dir string) error {
return xerrors.Errorf("creating file %s: %w", filepath.Join(dir, header.Name), err) return xerrors.Errorf("creating file %s: %w", filepath.Join(dir, header.Name), err)
} }
// This data is coming from a trusted source, no need to check the size.
//nolint:gosec
if _, err := io.Copy(f, tr); err != nil { if _, err := io.Copy(f, tr); err != nil {
return err return err
} }

View File

@ -348,7 +348,9 @@ func (m *Sealing) restartSectors(ctx context.Context) error {
timer := time.NewTimer(cfg.WaitDealsDelay) timer := time.NewTimer(cfg.WaitDealsDelay)
go func() { go func() {
<-timer.C <-timer.C
m.StartPacking(sector.SectorNumber) if err := m.StartPacking(sector.SectorNumber); err != nil {
log.Errorf("starting sector %d: %+v", sector.SectorNumber, err)
}
}() }()
} }
} }

View File

@ -355,7 +355,9 @@ func (m *Sealing) newDealSector() (abi.SectorNumber, error) {
timer := time.NewTimer(cf.WaitDealsDelay) timer := time.NewTimer(cf.WaitDealsDelay)
go func() { go func() {
<-timer.C <-timer.C
m.StartPacking(sid) if err := m.StartPacking(sid); err != nil {
log.Errorf("starting sector %d: %+v", sid, err)
}
}() }()
} }
@ -396,7 +398,6 @@ func (m *Sealing) Address() address.Address {
func getDealPerSectorLimit(size abi.SectorSize) uint64 { func getDealPerSectorLimit(size abi.SectorSize) uint64 {
if size < 64<<30 { if size < 64<<30 {
return 256 return 256
} else {
return 512
} }
return 512
} }

View File

@ -3,6 +3,7 @@ package sealing
import ( import (
"bytes" "bytes"
"context" "context"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
"github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-actors/actors/abi"

1
go.mod
View File

@ -91,7 +91,6 @@ require (
github.com/libp2p/go-libp2p-kad-dht v0.8.3 github.com/libp2p/go-libp2p-kad-dht v0.8.3
github.com/libp2p/go-libp2p-mplex v0.2.4 github.com/libp2p/go-libp2p-mplex v0.2.4
github.com/libp2p/go-libp2p-noise v0.1.1 github.com/libp2p/go-libp2p-noise v0.1.1
github.com/libp2p/go-libp2p-peer v0.2.0
github.com/libp2p/go-libp2p-peerstore v0.2.6 github.com/libp2p/go-libp2p-peerstore v0.2.6
github.com/libp2p/go-libp2p-pubsub v0.3.5-0.20200821075113-efd56962bced github.com/libp2p/go-libp2p-pubsub v0.3.5-0.20200821075113-efd56962bced
github.com/libp2p/go-libp2p-quic-transport v0.7.1 github.com/libp2p/go-libp2p-quic-transport v0.7.1

View File

@ -5,7 +5,6 @@ import (
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
"sync"
"time" "time"
logging "github.com/ipfs/go-log" logging "github.com/ipfs/go-log"
@ -48,8 +47,6 @@ type fsJournal struct {
fi *os.File fi *os.File
fSize int64 fSize int64
lk sync.Mutex
journalDir string journalDir string
incoming chan *JournalEntry incoming chan *JournalEntry
@ -58,7 +55,7 @@ type fsJournal struct {
closing chan struct{} closing chan struct{}
} }
func OpenFSJournal(dir string) (*fsJournal, error) { func OpenFSJournal(dir string) (Journal, error) {
fsj := &fsJournal{ fsj := &fsJournal{
journalDir: dir, journalDir: dir,
incoming: make(chan *JournalEntry, 32), incoming: make(chan *JournalEntry, 32),
@ -94,7 +91,7 @@ func (fsj *fsJournal) putEntry(je *JournalEntry) error {
fsj.fSize += int64(n) fsj.fSize += int64(n)
if fsj.fSize >= fsj.journalSizeLimit { if fsj.fSize >= fsj.journalSizeLimit {
fsj.rollJournalFile() return fsj.rollJournalFile()
} }
return nil return nil
@ -104,7 +101,10 @@ const RFC3339nocolon = "2006-01-02T150405Z0700"
func (fsj *fsJournal) rollJournalFile() error { func (fsj *fsJournal) rollJournalFile() error {
if fsj.fi != nil { if fsj.fi != nil {
fsj.fi.Close() err := fsj.fi.Close()
if err != nil {
return err
}
} }
nfi, err := os.Create(filepath.Join(fsj.journalDir, fmt.Sprintf("lotus-journal-%s.ndjson", build.Clock.Now().Format(RFC3339nocolon)))) nfi, err := os.Create(filepath.Join(fsj.journalDir, fmt.Sprintf("lotus-journal-%s.ndjson", build.Clock.Now().Format(RFC3339nocolon))))
@ -125,7 +125,9 @@ func (fsj *fsJournal) runLoop() {
log.Errorw("failed to write out journal entry", "entry", je, "err", err) log.Errorw("failed to write out journal entry", "entry", je, "err", err)
} }
case <-fsj.closing: case <-fsj.closing:
fsj.fi.Close() if err := fsj.fi.Close(); err != nil {
log.Errorw("failed to close journal", "err", err)
}
return return
} }
} }

View File

@ -28,12 +28,12 @@ func NewTemporary() blockstore.Blockstore {
return NewBlockstore(ds.NewMapDatastore()) return NewBlockstore(ds.NewMapDatastore())
} }
// NewTemporary returns a thread-safe temporary blockstore. // NewTemporarySync returns a thread-safe temporary blockstore.
func NewTemporarySync() blockstore.Blockstore { func NewTemporarySync() blockstore.Blockstore {
return NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) return NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))
} }
// Wraps the underlying blockstore in an "identity" blockstore. // WrapIDStore wraps the underlying blockstore in an "identity" blockstore.
func WrapIDStore(bstore blockstore.Blockstore) blockstore.Blockstore { func WrapIDStore(bstore blockstore.Blockstore) blockstore.Blockstore {
return blockstore.NewIdStore(bstore) return blockstore.NewIdStore(bstore)
} }

View File

@ -11,6 +11,7 @@ import (
bstore "github.com/filecoin-project/lotus/lib/blockstore" bstore "github.com/filecoin-project/lotus/lib/blockstore"
) )
//nolint:deadcode,varcheck
var log = logging.Logger("cachebs") var log = logging.Logger("cachebs")
type CacheBS struct { type CacheBS struct {

View File

@ -64,7 +64,7 @@ func ReaderParamEncoder(addr string) jsonrpc.Option {
return return
} }
defer resp.Body.Close() defer resp.Body.Close() //nolint:errcheck
if resp.StatusCode != 200 { if resp.StatusCode != 200 {
b, _ := ioutil.ReadAll(resp.Body) b, _ := ioutil.ReadAll(resp.Body)

View File

@ -56,7 +56,7 @@ func TestReaderProxy(t *testing.T) {
read, err := client.ReadAll(context.TODO(), strings.NewReader("pooooootato")) read, err := client.ReadAll(context.TODO(), strings.NewReader("pooooootato"))
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, "pooooootato", string(read), "potatos weren't equal") require.Equal(t, "pooooootato", string(read), "potatoes weren't equal")
} }
func TestNullReaderProxy(t *testing.T) { func TestNullReaderProxy(t *testing.T) {

View File

@ -2,8 +2,9 @@ package bls
import ( import (
"crypto/rand" "crypto/rand"
"github.com/filecoin-project/go-address"
"testing" "testing"
"github.com/filecoin-project/go-address"
) )
func BenchmarkBLSSign(b *testing.B) { func BenchmarkBLSSign(b *testing.B) {
@ -12,7 +13,7 @@ func BenchmarkBLSSign(b *testing.B) {
b.StopTimer() b.StopTimer()
pk, _ := signer.GenPrivate() pk, _ := signer.GenPrivate()
randMsg := make([]byte, 32) randMsg := make([]byte, 32)
rand.Read(randMsg) _, _ = rand.Read(randMsg)
b.StartTimer() b.StartTimer()
_, _ = signer.Sign(pk, randMsg) _, _ = signer.Sign(pk, randMsg)
@ -24,7 +25,7 @@ func BenchmarkBLSVerify(b *testing.B) {
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
b.StopTimer() b.StopTimer()
randMsg := make([]byte, 32) randMsg := make([]byte, 32)
rand.Read(randMsg) _, _ = rand.Read(randMsg)
priv, _ := signer.GenPrivate() priv, _ := signer.GenPrivate()
pk, _ := signer.ToPublic(priv) pk, _ := signer.ToPublic(priv)

View File

@ -17,7 +17,7 @@ import (
type NodeState int type NodeState int
const ( const (
NodeUnknown = iota NodeUnknown = iota //nolint:deadcode
NodeRunning NodeRunning
NodeStopped NodeStopped
) )

View File

@ -73,7 +73,7 @@ func (m *outmux) run() {
case msg := <-stdout: case msg := <-stdout:
for k, out := range m.outs { for k, out := range m.outs {
if err := out.WriteMessage(websocket.BinaryMessage, msg); err != nil { if err := out.WriteMessage(websocket.BinaryMessage, msg); err != nil {
out.Close() _ = out.Close()
fmt.Printf("outmux write failed: %s\n", err) fmt.Printf("outmux write failed: %s\n", err)
delete(m.outs, k) delete(m.outs, k)
} }

View File

@ -266,7 +266,7 @@ func (c *ClientNodeAdapter) OnDealSectorCommitted(ctx context.Context, provider
return false, nil return false, nil
} }
sd, err := stmgr.GetStorageDeal(ctx, c.StateManager, abi.DealID(dealId), ts) sd, err := stmgr.GetStorageDeal(ctx, c.StateManager, dealId, ts)
if err != nil { if err != nil {
return false, xerrors.Errorf("failed to look up deal on chain: %w", err) return false, xerrors.Errorf("failed to look up deal on chain: %w", err)
} }
@ -303,7 +303,7 @@ func (c *ClientNodeAdapter) OnDealSectorCommitted(ctx context.Context, provider
} }
for _, did := range params.DealIDs { for _, did := range params.DealIDs {
if did == abi.DealID(dealId) { if did == dealId {
sectorNumber = params.SectorNumber sectorNumber = params.SectorNumber
sectorFound = true sectorFound = true
return true, false, nil return true, false, nil
@ -464,7 +464,7 @@ func (c *ClientNodeAdapter) ValidateAskSignature(ctx context.Context, ask *stora
mi, err := c.StateMinerInfo(ctx, ask.Ask.Miner, tsk) mi, err := c.StateMinerInfo(ctx, ask.Ask.Miner, tsk)
if err != nil { if err != nil {
return false, xerrors.Errorf("failed to get worker for miner in ask", err) return false, xerrors.Errorf("failed to get worker for miner in ask: %w", err)
} }
sigb, err := cborutil.Dump(ask.Ask) sigb, err := cborutil.Dump(ask.Ask)

View File

@ -69,7 +69,7 @@ func (n *ProviderNodeAdapter) PublishDeals(ctx context.Context, deal storagemark
}) })
if err != nil { if err != nil {
return cid.Undef, xerrors.Errorf("serializing PublishStorageDeals params failed: ", err) return cid.Undef, xerrors.Errorf("serializing PublishStorageDeals params failed: %w", err)
} }
// TODO: We may want this to happen after fetching data // TODO: We may want this to happen after fetching data
@ -267,7 +267,7 @@ func (n *ProviderNodeAdapter) OnDealSectorCommitted(ctx context.Context, provide
return false, nil return false, nil
} }
sd, err := n.StateMarketStorageDeal(ctx, abi.DealID(dealID), ts.Key()) sd, err := n.StateMarketStorageDeal(ctx, dealID, ts.Key())
if err != nil { if err != nil {
return false, xerrors.Errorf("failed to look up deal on chain: %w", err) return false, xerrors.Errorf("failed to look up deal on chain: %w", err)
} }
@ -305,7 +305,7 @@ func (n *ProviderNodeAdapter) OnDealSectorCommitted(ctx context.Context, provide
} }
for _, did := range params.DealIDs { for _, did := range params.DealIDs {
if did == abi.DealID(dealID) { if did == dealID {
sectorNumber = params.SectorNumber sectorNumber = params.SectorNumber
sectorFound = true sectorFound = true
return true, false, nil return true, false, nil

View File

@ -36,7 +36,7 @@ type waitFunc func(ctx context.Context, baseTime uint64) (func(bool, error), abi
func randTimeOffset(width time.Duration) time.Duration { func randTimeOffset(width time.Duration) time.Duration {
buf := make([]byte, 8) buf := make([]byte, 8)
rand.Reader.Read(buf) rand.Reader.Read(buf) //nolint:errcheck
val := time.Duration(binary.BigEndian.Uint64(buf) % uint64(width)) val := time.Duration(binary.BigEndian.Uint64(buf) % uint64(width))
return val - (width / 2) return val - (width / 2)
@ -508,12 +508,3 @@ func (c *cachedActorLookup) StateGetActor(ctx context.Context, a address.Address
} }
type ActorLookup func(context.Context, address.Address, types.TipSetKey) (*types.Actor, error) type ActorLookup func(context.Context, address.Address, types.TipSetKey) (*types.Actor, error)
func countFrom(msgs []*types.SignedMessage, from address.Address) (out int) {
for _, msg := range msgs {
if msg.Message.From == from {
out++
}
}
return out
}

View File

@ -67,6 +67,7 @@ import (
"github.com/filecoin-project/lotus/storage/sectorblocks" "github.com/filecoin-project/lotus/storage/sectorblocks"
) )
//nolint:deadcode,varcheck
var log = logging.Logger("builder") var log = logging.Logger("builder")
// special is a type used to give keys to modules which // special is a type used to give keys to modules which

View File

@ -109,7 +109,7 @@ func (a *API) ClientStartDeal(ctx context.Context, params *api.StartDealParams)
continue continue
} }
if c.Equals(params.Data.Root) { if c.Equals(params.Data.Root) {
storeID = &importID storeID = &importID //nolint
break break
} }
} }
@ -614,7 +614,7 @@ func (a *API) ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer rdr.Close() defer rdr.Close() //nolint:errcheck
stat, err := rdr.Stat() stat, err := rdr.Stat()
if err != nil { if err != nil {
@ -700,7 +700,7 @@ func (a *API) clientImport(ctx context.Context, ref api.FileRef, store *multisto
if err != nil { if err != nil {
return cid.Undef, err return cid.Undef, err
} }
defer f.Close() defer f.Close() //nolint:errcheck
stat, err := f.Stat() stat, err := f.Stat()
if err != nil { if err != nil {

View File

@ -2,14 +2,15 @@ package full
import ( import (
"context" "context"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/abi/big"
"github.com/filecoin-project/go-address"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
"go.uber.org/fx" "go.uber.org/fx"
"golang.org/x/xerrors" "golang.org/x/xerrors"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/abi/big"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/messagepool"
"github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/store"

View File

@ -28,7 +28,7 @@ type MsigAPI struct {
MpoolAPI MpoolAPI MpoolAPI MpoolAPI
} }
// TODO: remove gp (gasPrice) from arguemnts // TODO: remove gp (gasPrice) from arguments
func (a *MsigAPI) MsigCreate(ctx context.Context, req uint64, addrs []address.Address, duration abi.ChainEpoch, val types.BigInt, src address.Address, gp types.BigInt) (cid.Cid, error) { func (a *MsigAPI) MsigCreate(ctx context.Context, req uint64, addrs []address.Address, duration abi.ChainEpoch, val types.BigInt, src address.Address, gp types.BigInt) (cid.Cid, error) {
lenAddrs := uint64(len(addrs)) lenAddrs := uint64(len(addrs))

View File

@ -443,9 +443,8 @@ func (a *StateAPI) StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLoo
TipSet: ts.Key(), TipSet: ts.Key(),
Height: ts.Height(), Height: ts.Height(),
}, nil }, nil
} else {
return nil, nil
} }
return nil, nil
} }
func (a *StateAPI) StateGetReceipt(ctx context.Context, msg cid.Cid, tsk types.TipSetKey) (*types.MessageReceipt, error) { func (a *StateAPI) StateGetReceipt(ctx context.Context, msg cid.Cid, tsk types.TipSetKey) (*types.MessageReceipt, error) {
@ -557,7 +556,7 @@ func (a *StateAPI) StateMarketDeals(ctx context.Context, tsk types.TipSetKey) (m
} else if !found { } else if !found {
s.SectorStartEpoch = -1 s.SectorStartEpoch = -1
} }
out[strconv.FormatInt(int64(i), 10)] = api.MarketDeal{ out[strconv.FormatInt(i, 10)] = api.MarketDeal{
Proposal: d, Proposal: d,
State: s, State: s,
} }
@ -785,10 +784,6 @@ func (a *StateAPI) StateSectorPartition(ctx context.Context, maddr address.Addre
return nil, err return nil, err
} }
if found == nil {
}
return found, nil return found, nil
} }
@ -1147,12 +1142,12 @@ func (a *StateAPI) StateDealProviderCollateralBounds(ctx context.Context, size a
}) })
if err != nil { if err != nil {
return api.DealCollateralBounds{}, xerrors.Errorf("getting power and reward actor states: %w") return api.DealCollateralBounds{}, xerrors.Errorf("getting power and reward actor states: %w", err)
} }
circ, err := a.StateCirculatingSupply(ctx, ts.Key()) circ, err := a.StateCirculatingSupply(ctx, ts.Key())
if err != nil { if err != nil {
return api.DealCollateralBounds{}, xerrors.Errorf("getting total circulating supply: %w") return api.DealCollateralBounds{}, xerrors.Errorf("getting total circulating supply: %w", err)
} }
min, max := market.DealProviderCollateralBounds(size, verified, powerState.ThisEpochQualityAdjPower, rewardState.ThisEpochBaselinePower, circ.FilCirculating) min, max := market.DealProviderCollateralBounds(size, verified, powerState.ThisEpochQualityAdjPower, rewardState.ThisEpochBaselinePower, circ.FilCirculating)
@ -1175,6 +1170,9 @@ func (a *StateAPI) StateCirculatingSupply(ctx context.Context, tsk types.TipSetK
cst := cbor.NewCborStore(a.Chain.Blockstore()) cst := cbor.NewCborStore(a.Chain.Blockstore())
sTree, err := state.LoadStateTree(cst, st) sTree, err := state.LoadStateTree(cst, st)
if err != nil {
return api.CirculatingSupply{}, err
}
return a.StateManager.GetCirculatingSupplyDetailed(ctx, ts.Height(), sTree) return a.StateManager.GetCirculatingSupplyDetailed(ctx, ts.Height(), sTree)
} }

View File

@ -90,8 +90,7 @@ func (a *SyncAPI) SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) erro
return xerrors.Errorf("serializing block for pubsub publishing failed: %w", err) return xerrors.Errorf("serializing block for pubsub publishing failed: %w", err)
} }
// TODO: anything else to do here? return a.PubSub.Publish(build.BlocksTopic(a.NetName), b) //nolint:staticcheck
return a.PubSub.Publish(build.BlocksTopic(a.NetName), b)
} }
func (a *SyncAPI) SyncIncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error) { func (a *SyncAPI) SyncIncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error) {

View File

@ -2,19 +2,18 @@ package full
import ( import (
"context" "context"
"github.com/filecoin-project/specs-actors/actors/abi/big"
"github.com/filecoin-project/lotus/lib/sigs"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"go.uber.org/fx"
"golang.org/x/xerrors"
"github.com/filecoin-project/specs-actors/actors/abi/big"
"github.com/filecoin-project/specs-actors/actors/crypto" "github.com/filecoin-project/specs-actors/actors/crypto"
"github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/wallet" "github.com/filecoin-project/lotus/chain/wallet"
"github.com/filecoin-project/lotus/lib/sigs"
"go.uber.org/fx"
"golang.org/x/xerrors"
) )
type WalletAPI struct { type WalletAPI struct {
@ -45,9 +44,8 @@ func (a *WalletAPI) WalletBalance(ctx context.Context, addr address.Address) (ty
if xerrors.Is(err, types.ErrActorNotFound) { if xerrors.Is(err, types.ErrActorNotFound) {
return big.Zero(), nil return big.Zero(), nil
} else {
return bal, err
} }
return bal, err
} }
func (a *WalletAPI) WalletSign(ctx context.Context, k address.Address, msg []byte) (*crypto.Signature, error) { func (a *WalletAPI) WalletSign(ctx context.Context, k address.Address, msg []byte) (*crypto.Signature, error) {

View File

@ -123,8 +123,7 @@ func StorageClient(lc fx.Lifecycle, h host.Host, ibs dtypes.ClientBlockstore, md
return c.Start(ctx) return c.Start(ctx)
}, },
OnStop: func(context.Context) error { OnStop: func(context.Context) error {
c.Stop() return c.Stop()
return nil
}, },
}) })
return c, nil return c, nil

View File

@ -36,8 +36,8 @@ func RecordValidator(ps peerstore.Peerstore) record.Validator {
} }
} }
const JWTSecretName = "auth-jwt-private" //nolint:gosec const JWTSecretName = "auth-jwt-private" //nolint:gosec
const KTJwtHmacSecret = "jwt-hmac-secret" const KTJwtHmacSecret = "jwt-hmac-secret" //nolint:gosec
type JwtPayload struct { type JwtPayload struct {
Allow []auth.Permission Allow []auth.Permission

View File

@ -8,7 +8,7 @@ import (
gsnet "github.com/ipfs/go-graphsync/network" gsnet "github.com/ipfs/go-graphsync/network"
"github.com/ipfs/go-graphsync/storeutil" "github.com/ipfs/go-graphsync/storeutil"
"github.com/libp2p/go-libp2p-core/host" "github.com/libp2p/go-libp2p-core/host"
peer "github.com/libp2p/go-libp2p-peer" "github.com/libp2p/go-libp2p-core/peer"
"go.uber.org/fx" "go.uber.org/fx"
) )

View File

@ -76,7 +76,7 @@ func RunBlockSync(h host.Host, svc *blocksync.BlockSyncService) {
func HandleIncomingBlocks(mctx helpers.MetricsCtx, lc fx.Lifecycle, ps *pubsub.PubSub, s *chain.Syncer, bserv dtypes.ChainBlockService, chain *store.ChainStore, stmgr *stmgr.StateManager, h host.Host, nn dtypes.NetworkName) { func HandleIncomingBlocks(mctx helpers.MetricsCtx, lc fx.Lifecycle, ps *pubsub.PubSub, s *chain.Syncer, bserv dtypes.ChainBlockService, chain *store.ChainStore, stmgr *stmgr.StateManager, h host.Host, nn dtypes.NetworkName) {
ctx := helpers.LifecycleCtx(mctx, lc) ctx := helpers.LifecycleCtx(mctx, lc)
blocksub, err := ps.Subscribe(build.BlocksTopic(nn)) blocksub, err := ps.Subscribe(build.BlocksTopic(nn)) //nolint
if err != nil { if err != nil {
panic(err) panic(err)
} }
@ -98,7 +98,7 @@ func HandleIncomingBlocks(mctx helpers.MetricsCtx, lc fx.Lifecycle, ps *pubsub.P
func HandleIncomingMessages(mctx helpers.MetricsCtx, lc fx.Lifecycle, ps *pubsub.PubSub, mpool *messagepool.MessagePool, h host.Host, nn dtypes.NetworkName) { func HandleIncomingMessages(mctx helpers.MetricsCtx, lc fx.Lifecycle, ps *pubsub.PubSub, mpool *messagepool.MessagePool, h host.Host, nn dtypes.NetworkName) {
ctx := helpers.LifecycleCtx(mctx, lc) ctx := helpers.LifecycleCtx(mctx, lc)
msgsub, err := ps.Subscribe(build.MessagesTopic(nn)) msgsub, err := ps.Subscribe(build.MessagesTopic(nn)) //nolint:staticcheck
if err != nil { if err != nil {
panic(err) panic(err)
} }

View File

@ -3,7 +3,6 @@ package storage
import ( import (
"bytes" "bytes"
"context" "context"
"github.com/filecoin-project/lotus/api"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen" cbg "github.com/whyrusleeping/cbor-gen"
@ -18,6 +17,7 @@ import (
"github.com/filecoin-project/specs-actors/actors/crypto" "github.com/filecoin-project/specs-actors/actors/crypto"
"github.com/filecoin-project/specs-actors/actors/util/adt" "github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/apibstore" "github.com/filecoin-project/lotus/api/apibstore"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"

View File

@ -2,6 +2,7 @@ package storage
import ( import (
"context" "context"
"golang.org/x/xerrors" "golang.org/x/xerrors"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"

View File

@ -4,13 +4,13 @@ import (
"bytes" "bytes"
"context" "context"
"errors" "errors"
"github.com/filecoin-project/specs-actors/actors/abi/big"
"time" "time"
"github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/abi/big"
"github.com/filecoin-project/specs-actors/actors/builtin" "github.com/filecoin-project/specs-actors/actors/builtin"
"github.com/filecoin-project/specs-actors/actors/builtin/miner" "github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/filecoin-project/specs-actors/actors/crypto" "github.com/filecoin-project/specs-actors/actors/crypto"

View File

@ -11,7 +11,7 @@ type headBuffer struct {
size int size int
} }
func NewHeadBuffer(size int) *headBuffer { func newHeadBuffer(size int) *headBuffer {
buffer := list.New() buffer := list.New()
buffer.Init() buffer.Init()
@ -21,7 +21,7 @@ func NewHeadBuffer(size int) *headBuffer {
} }
} }
func (h *headBuffer) Push(hc *api.HeadChange) (rethc *api.HeadChange) { func (h *headBuffer) push(hc *api.HeadChange) (rethc *api.HeadChange) {
if h.buffer.Len() == h.size { if h.buffer.Len() == h.size {
var ok bool var ok bool
@ -39,7 +39,7 @@ func (h *headBuffer) Push(hc *api.HeadChange) (rethc *api.HeadChange) {
return return
} }
func (h *headBuffer) Pop() { func (h *headBuffer) pop() {
el := h.buffer.Back() el := h.buffer.Back()
if el != nil { if el != nil {
h.buffer.Remove(el) h.buffer.Remove(el)

View File

@ -10,34 +10,34 @@ import (
func TestHeadBuffer(t *testing.T) { func TestHeadBuffer(t *testing.T) {
t.Run("Straight push through", func(t *testing.T) { t.Run("Straight push through", func(t *testing.T) {
hb := NewHeadBuffer(5) hb := newHeadBuffer(5)
require.Nil(t, hb.Push(&api.HeadChange{Type: "1"})) require.Nil(t, hb.push(&api.HeadChange{Type: "1"}))
require.Nil(t, hb.Push(&api.HeadChange{Type: "2"})) require.Nil(t, hb.push(&api.HeadChange{Type: "2"}))
require.Nil(t, hb.Push(&api.HeadChange{Type: "3"})) require.Nil(t, hb.push(&api.HeadChange{Type: "3"}))
require.Nil(t, hb.Push(&api.HeadChange{Type: "4"})) require.Nil(t, hb.push(&api.HeadChange{Type: "4"}))
require.Nil(t, hb.Push(&api.HeadChange{Type: "5"})) require.Nil(t, hb.push(&api.HeadChange{Type: "5"}))
hc := hb.Push(&api.HeadChange{Type: "6"}) hc := hb.push(&api.HeadChange{Type: "6"})
require.Equal(t, hc.Type, "1") require.Equal(t, hc.Type, "1")
}) })
t.Run("Reverts", func(t *testing.T) { t.Run("Reverts", func(t *testing.T) {
hb := NewHeadBuffer(5) hb := newHeadBuffer(5)
require.Nil(t, hb.Push(&api.HeadChange{Type: "1"})) require.Nil(t, hb.push(&api.HeadChange{Type: "1"}))
require.Nil(t, hb.Push(&api.HeadChange{Type: "2"})) require.Nil(t, hb.push(&api.HeadChange{Type: "2"}))
require.Nil(t, hb.Push(&api.HeadChange{Type: "3"})) require.Nil(t, hb.push(&api.HeadChange{Type: "3"}))
hb.Pop() hb.pop()
require.Nil(t, hb.Push(&api.HeadChange{Type: "3a"})) require.Nil(t, hb.push(&api.HeadChange{Type: "3a"}))
hb.Pop() hb.pop()
require.Nil(t, hb.Push(&api.HeadChange{Type: "3b"})) require.Nil(t, hb.push(&api.HeadChange{Type: "3b"}))
require.Nil(t, hb.Push(&api.HeadChange{Type: "4"})) require.Nil(t, hb.push(&api.HeadChange{Type: "4"}))
require.Nil(t, hb.Push(&api.HeadChange{Type: "5"})) require.Nil(t, hb.push(&api.HeadChange{Type: "5"}))
hc := hb.Push(&api.HeadChange{Type: "6"}) hc := hb.push(&api.HeadChange{Type: "6"})
require.Equal(t, hc.Type, "1") require.Equal(t, hc.Type, "1")
hc = hb.Push(&api.HeadChange{Type: "7"}) hc = hb.push(&api.HeadChange{Type: "7"})
require.Equal(t, hc.Type, "2") require.Equal(t, hc.Type, "2")
hc = hb.Push(&api.HeadChange{Type: "8"}) hc = hb.push(&api.HeadChange{Type: "8"})
require.Equal(t, hc.Type, "3b") require.Equal(t, hc.Type, "3b")
}) })
} }

View File

@ -251,16 +251,13 @@ func RecordTipsetStatePoints(ctx context.Context, api api.FullNode, pl *PointLis
return err return err
} }
err = mp.ForEach(nil, func(key string) error { var claim power.Claim
err = mp.ForEach(&claim, func(key string) error {
addr, err := address.NewFromBytes([]byte(key)) addr, err := address.NewFromBytes([]byte(key))
if err != nil { if err != nil {
return err return err
} }
var claim power.Claim
keyerAddr := adt.AddrKey(addr)
mp.Get(keyerAddr, &claim)
if claim.QualityAdjPower.Int64() == 0 { if claim.QualityAdjPower.Int64() == 0 {
return nil return nil
} }
@ -311,7 +308,7 @@ func RecordTipsetMessagesPoints(ctx context.Context, api api.FullNode, pl *Point
for i, msg := range msgs { for i, msg := range msgs {
// FIXME: use float so this doesn't overflow // FIXME: use float so this doesn't overflow
// FIXME: this doesn't work as time points get overriden // FIXME: this doesn't work as time points get overridden
p := NewPoint("chain.message_gaspremium", msg.Message.GasPremium.Int64()) p := NewPoint("chain.message_gaspremium", msg.Message.GasPremium.Int64())
pl.AddPoint(p) pl.AddPoint(p)
p = NewPoint("chain.message_gasfeecap", msg.Message.GasFeeCap.Int64()) p = NewPoint("chain.message_gasfeecap", msg.Message.GasFeeCap.Int64())

View File

@ -124,7 +124,7 @@ sync_complete:
func GetTips(ctx context.Context, api api.FullNode, lastHeight abi.ChainEpoch, headlag int) (<-chan *types.TipSet, error) { func GetTips(ctx context.Context, api api.FullNode, lastHeight abi.ChainEpoch, headlag int) (<-chan *types.TipSet, error) {
chmain := make(chan *types.TipSet) chmain := make(chan *types.TipSet)
hb := NewHeadBuffer(headlag) hb := newHeadBuffer(headlag)
notif, err := api.ChainNotify(ctx) notif, err := api.ChainNotify(ctx)
if err != nil { if err != nil {
@ -134,7 +134,8 @@ func GetTips(ctx context.Context, api api.FullNode, lastHeight abi.ChainEpoch, h
go func() { go func() {
defer close(chmain) defer close(chmain)
ping := time.Tick(30 * time.Second) ticker := time.NewTicker(30 * time.Second)
defer ticker.Stop()
for { for {
select { select {
@ -154,14 +155,14 @@ func GetTips(ctx context.Context, api api.FullNode, lastHeight abi.ChainEpoch, h
chmain <- tipset chmain <- tipset
} }
case store.HCApply: case store.HCApply:
if out := hb.Push(change); out != nil { if out := hb.push(change); out != nil {
chmain <- out.Val chmain <- out.Val
} }
case store.HCRevert: case store.HCRevert:
hb.Pop() hb.pop()
} }
} }
case <-ping: case <-ticker.C:
log.Info("Running health check") log.Info("Running health check")
cctx, cancel := context.WithTimeout(ctx, 5*time.Second) cctx, cancel := context.WithTimeout(ctx, 5*time.Second)