chore: fix lint errors in simulation
This commit is contained in:
parent
2aedd82c72
commit
eb2b706156
@ -672,7 +672,7 @@ func (vm *VM) Flush(ctx context.Context) (cid.Cid, error) {
|
||||
}
|
||||
|
||||
// Get the buffered blockstore associated with the VM. This includes any temporary blocks produced
|
||||
// during thsi VM's execution.
|
||||
// during this VM's execution.
|
||||
func (vm *VM) ActorStore(ctx context.Context) adt.Store {
|
||||
return adt.WrapStore(ctx, vm.cst)
|
||||
}
|
||||
|
@ -9,12 +9,16 @@ import (
|
||||
var copySimCommand = &cli.Command{
|
||||
Name: "copy",
|
||||
ArgsUsage: "<new-name>",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
Action: func(cctx *cli.Context) (err error) {
|
||||
node, err := open(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer node.Close()
|
||||
defer func() {
|
||||
if cerr := node.Close(); err == nil {
|
||||
err = cerr
|
||||
}
|
||||
}()
|
||||
if cctx.NArg() != 1 {
|
||||
return fmt.Errorf("expected 1 argument")
|
||||
}
|
||||
|
@ -12,12 +12,16 @@ import (
|
||||
var createSimCommand = &cli.Command{
|
||||
Name: "create",
|
||||
ArgsUsage: "[tipset]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
Action: func(cctx *cli.Context) (err error) {
|
||||
node, err := open(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer node.Close()
|
||||
defer func() {
|
||||
if cerr := node.Close(); err == nil {
|
||||
err = cerr
|
||||
}
|
||||
}()
|
||||
|
||||
var ts *types.TipSet
|
||||
switch cctx.NArg() {
|
||||
|
@ -6,12 +6,16 @@ import (
|
||||
|
||||
var deleteSimCommand = &cli.Command{
|
||||
Name: "delete",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
Action: func(cctx *cli.Context) (err error) {
|
||||
node, err := open(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer node.Close()
|
||||
defer func() {
|
||||
if cerr := node.Close(); err == nil {
|
||||
err = cerr
|
||||
}
|
||||
}()
|
||||
|
||||
return node.DeleteSim(cctx.Context, cctx.String("simulation"))
|
||||
},
|
||||
|
@ -89,12 +89,16 @@ var infoSimCommand = &cli.Command{
|
||||
infoCapacityGrowthSimCommand,
|
||||
infoStateGrowthSimCommand,
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
Action: func(cctx *cli.Context) (err error) {
|
||||
node, err := open(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer node.Close()
|
||||
defer func() {
|
||||
if cerr := node.Close(); err == nil {
|
||||
err = cerr
|
||||
}
|
||||
}()
|
||||
|
||||
sim, err := node.LoadSim(cctx.Context, cctx.String("simulation"))
|
||||
if err != nil {
|
||||
|
@ -14,12 +14,16 @@ import (
|
||||
var infoCapacityGrowthSimCommand = &cli.Command{
|
||||
Name: "capacity-growth",
|
||||
Description: "List daily capacity growth over the course of the simulation starting at the end.",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
Action: func(cctx *cli.Context) (err error) {
|
||||
node, err := open(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer node.Close()
|
||||
defer func() {
|
||||
if cerr := node.Close(); err == nil {
|
||||
err = cerr
|
||||
}
|
||||
}()
|
||||
|
||||
sim, err := node.LoadSim(cctx.Context, cctx.String("simulation"))
|
||||
if err != nil {
|
||||
|
@ -4,13 +4,13 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/streadway/quantile"
|
||||
"github.com/urfave/cli/v2"
|
||||
"syscall"
|
||||
|
||||
"github.com/streadway/quantile"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/exitcode"
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||
@ -28,7 +28,7 @@ var infoCommitGasSimCommand = &cli.Command{
|
||||
Value: 0,
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
Action: func(cctx *cli.Context) (err error) {
|
||||
log := func(f string, i ...interface{}) {
|
||||
fmt.Fprintf(os.Stderr, f, i...)
|
||||
}
|
||||
@ -36,7 +36,11 @@ var infoCommitGasSimCommand = &cli.Command{
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer node.Close()
|
||||
defer func() {
|
||||
if cerr := node.Close(); err == nil {
|
||||
err = cerr
|
||||
}
|
||||
}()
|
||||
|
||||
go profileOnSignal(cctx, syscall.SIGUSR2)
|
||||
|
||||
|
@ -21,12 +21,16 @@ import (
|
||||
var infoStateGrowthSimCommand = &cli.Command{
|
||||
Name: "state-size",
|
||||
Description: "List daily state size over the course of the simulation starting at the end.",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
Action: func(cctx *cli.Context) (err error) {
|
||||
node, err := open(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer node.Close()
|
||||
defer func() {
|
||||
if cerr := node.Close(); err == nil {
|
||||
err = cerr
|
||||
}
|
||||
}()
|
||||
|
||||
sim, err := node.LoadSim(cctx.Context, cctx.String("simulation"))
|
||||
if err != nil {
|
||||
@ -58,14 +62,13 @@ var infoStateGrowthSimCommand = &cli.Command{
|
||||
var totalSize uint64
|
||||
if err := store.View(c, func(data []byte) error {
|
||||
totalSize += uint64(len(data))
|
||||
cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) {
|
||||
return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) {
|
||||
if c.Prefix().Codec != cid.DagCBOR {
|
||||
return
|
||||
}
|
||||
|
||||
links = append(links, c)
|
||||
})
|
||||
return nil
|
||||
}); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -18,12 +18,16 @@ import (
|
||||
var infoWindowPostBandwidthSimCommand = &cli.Command{
|
||||
Name: "post-bandwidth",
|
||||
Description: "List average chain bandwidth used by window posts for each day of the simulation.",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
Action: func(cctx *cli.Context) (err error) {
|
||||
node, err := open(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer node.Close()
|
||||
defer func() {
|
||||
if cerr := node.Close(); err == nil {
|
||||
err = cerr
|
||||
}
|
||||
}()
|
||||
|
||||
sim, err := node.LoadSim(cctx.Context, cctx.String("simulation"))
|
||||
if err != nil {
|
||||
|
@ -9,12 +9,16 @@ import (
|
||||
|
||||
var listSimCommand = &cli.Command{
|
||||
Name: "list",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
Action: func(cctx *cli.Context) (err error) {
|
||||
node, err := open(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer node.Close()
|
||||
defer func() {
|
||||
if cerr := node.Close(); err == nil {
|
||||
err = cerr
|
||||
}
|
||||
}()
|
||||
|
||||
list, err := node.ListSims(cctx.Context)
|
||||
if err != nil {
|
||||
|
@ -9,12 +9,17 @@ import (
|
||||
var renameSimCommand = &cli.Command{
|
||||
Name: "rename",
|
||||
ArgsUsage: "<new-name>",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
Action: func(cctx *cli.Context) (err error) {
|
||||
node, err := open(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer node.Close()
|
||||
defer func() {
|
||||
if cerr := node.Close(); err == nil {
|
||||
err = cerr
|
||||
}
|
||||
}()
|
||||
|
||||
if cctx.NArg() != 1 {
|
||||
return fmt.Errorf("expected 1 argument")
|
||||
}
|
||||
|
@ -22,12 +22,16 @@ Signals:
|
||||
Usage: "Advance the given number of epochs then stop.",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
Action: func(cctx *cli.Context) (err error) {
|
||||
node, err := open(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer node.Close()
|
||||
defer func() {
|
||||
if cerr := node.Close(); err == nil {
|
||||
err = cerr
|
||||
}
|
||||
}()
|
||||
|
||||
go profileOnSignal(cctx, syscall.SIGUSR2)
|
||||
|
||||
|
@ -150,7 +150,10 @@ func (bb *BlockBuilder) PushMessage(msg *types.Message) (*types.MessageReceipt,
|
||||
msg.GasLimit = build.BlockGasTarget
|
||||
|
||||
// We manually snapshot so we can revert nonce changes, etc. on failure.
|
||||
st.Snapshot(bb.ctx)
|
||||
err = st.Snapshot(bb.ctx)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to take a snapshot while estimating message gas: %w", err)
|
||||
}
|
||||
defer st.ClearSnapshot()
|
||||
|
||||
ret, err := bb.vm.ApplyMessage(bb.ctx, msg)
|
||||
|
@ -39,19 +39,19 @@ func OpenNode(ctx context.Context, path string) (*Node, error) {
|
||||
|
||||
node.Repo, err = r.Lock(repo.FullNode)
|
||||
if err != nil {
|
||||
node.Close()
|
||||
_ = node.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
node.Blockstore, err = node.Repo.Blockstore(ctx, repo.UniversalBlockstore)
|
||||
if err != nil {
|
||||
node.Close()
|
||||
_ = node.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
node.MetadataDS, err = node.Repo.Datastore(ctx, "/metadata")
|
||||
if err != nil {
|
||||
node.Close()
|
||||
_ = node.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -157,7 +157,9 @@ func (nd *Node) ListSims(ctx context.Context) ([]string, error) {
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to list simulations: %w", err)
|
||||
}
|
||||
defer items.Close()
|
||||
|
||||
defer func() { _ = items.Close() }()
|
||||
|
||||
var names []string
|
||||
for {
|
||||
select {
|
||||
|
@ -16,7 +16,6 @@ import (
|
||||
|
||||
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
"github.com/filecoin-project/lotus/chain/state"
|
||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
@ -25,8 +24,6 @@ import (
|
||||
|
||||
var log = logging.Logger("simulation")
|
||||
|
||||
const onboardingProjectionLookback = 2 * 7 * builtin.EpochsInDay // lookback two weeks
|
||||
|
||||
// config is the simulation's config, persisted to the local metadata store and loaded on start.
|
||||
//
|
||||
// See Simulation.loadConfig and Simulation.saveConfig.
|
||||
|
@ -16,7 +16,7 @@ type pendingCommitTracker map[address.Address]minerPendingCommits
|
||||
// minerPendingCommits tracks a miner's pending commits during a single epoch (grouped by seal proof type).
|
||||
type minerPendingCommits map[abi.RegisteredSealProof][]abi.SectorNumber
|
||||
|
||||
// finish markes count sectors of the given proof type as "prove-committed".
|
||||
// finish marks count sectors of the given proof type as "prove-committed".
|
||||
func (m minerPendingCommits) finish(proof abi.RegisteredSealProof, count int) {
|
||||
snos := m[proof]
|
||||
if len(snos) < count {
|
||||
|
@ -68,7 +68,7 @@ func TestCommitQueue(t *testing.T) {
|
||||
require.EqualValues(t, []abi.SectorNumber{1}, sectors[proofType])
|
||||
|
||||
// 1 : non-empty + non-empty
|
||||
epoch += 1
|
||||
epoch++
|
||||
q.advanceEpoch(epoch)
|
||||
addr, sectors, ok = q.nextMiner()
|
||||
require.True(t, ok)
|
||||
@ -79,13 +79,13 @@ func TestCommitQueue(t *testing.T) {
|
||||
require.Equal(t, sectors.count(), 0)
|
||||
|
||||
// 2 : empty + empty
|
||||
epoch += 1
|
||||
epoch++
|
||||
q.advanceEpoch(epoch)
|
||||
_, _, ok = q.nextMiner()
|
||||
require.False(t, ok)
|
||||
|
||||
// 3 : empty + non-empty
|
||||
epoch += 1
|
||||
epoch++
|
||||
q.advanceEpoch(epoch)
|
||||
_, sectors, ok = q.nextMiner()
|
||||
require.True(t, ok)
|
||||
@ -93,7 +93,7 @@ func TestCommitQueue(t *testing.T) {
|
||||
require.EqualValues(t, []abi.SectorNumber{4}, sectors[proofType])
|
||||
|
||||
// 4 : non-empty + non-empty
|
||||
epoch += 1
|
||||
epoch++
|
||||
q.advanceEpoch(epoch)
|
||||
_, sectors, ok = q.nextMiner()
|
||||
require.True(t, ok)
|
||||
@ -101,7 +101,7 @@ func TestCommitQueue(t *testing.T) {
|
||||
require.EqualValues(t, []abi.SectorNumber{4, 5}, sectors[proofType])
|
||||
|
||||
// 5 : empty + non-empty
|
||||
epoch += 1
|
||||
epoch++
|
||||
q.advanceEpoch(epoch)
|
||||
_, sectors, ok = q.nextMiner()
|
||||
require.True(t, ok)
|
||||
@ -111,7 +111,7 @@ func TestCommitQueue(t *testing.T) {
|
||||
require.EqualValues(t, []abi.SectorNumber{5}, sectors[proofType])
|
||||
|
||||
// 6
|
||||
epoch += 1
|
||||
epoch++
|
||||
q.advanceEpoch(epoch)
|
||||
_, sectors, ok = q.nextMiner()
|
||||
require.True(t, ok)
|
||||
|
@ -26,8 +26,9 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
minPreCommitBatchSize = 1
|
||||
maxPreCommitBatchSize = miner5.PreCommitSectorBatchMaxSize
|
||||
minPreCommitBatchSize = 1
|
||||
maxPreCommitBatchSize = miner5.PreCommitSectorBatchMaxSize
|
||||
onboardingProjectionLookback = 2 * 7 * builtin.EpochsInDay // lookback two weeks
|
||||
)
|
||||
|
||||
type PreCommitStage struct {
|
||||
@ -89,7 +90,7 @@ func (stage *PreCommitStage) PackMessages(ctx context.Context, bb *blockbuilder.
|
||||
)
|
||||
|
||||
// We pre-commit for the top 1%, 10%, and the of the network 1/3rd of the time each.
|
||||
// This won't yeild the most accurate distribution... but it'll give us a good
|
||||
// This won't yield the most accurate distribution... but it'll give us a good
|
||||
// enough distribution.
|
||||
switch {
|
||||
case (i%3) <= 0 && top1Miners < stage.top1.len():
|
||||
@ -237,7 +238,7 @@ func (stage *PreCommitStage) packMiner(
|
||||
}
|
||||
}
|
||||
for _, info := range infos {
|
||||
enc, err := actors.SerializeParams(&info)
|
||||
enc, err := actors.SerializeParams(&info) //nolint
|
||||
if err != nil {
|
||||
return 0, false, err
|
||||
}
|
||||
@ -261,7 +262,7 @@ func (stage *PreCommitStage) packMiner(
|
||||
return added, false, nil
|
||||
}
|
||||
|
||||
func (ps *PreCommitStage) load(ctx context.Context, bb *blockbuilder.BlockBuilder) (_err error) {
|
||||
func (stage *PreCommitStage) load(ctx context.Context, bb *blockbuilder.BlockBuilder) (_err error) {
|
||||
bb.L().Infow("loading miner power for pre-commits")
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
@ -270,12 +271,12 @@ func (ps *PreCommitStage) load(ctx context.Context, bb *blockbuilder.BlockBuilde
|
||||
}
|
||||
bb.L().Infow("loaded miner power for pre-commits",
|
||||
"duration", time.Since(start),
|
||||
"top1", ps.top1.len(),
|
||||
"top10", ps.top10.len(),
|
||||
"rest", ps.rest.len(),
|
||||
"top1", stage.top1.len(),
|
||||
"top10", stage.top10.len(),
|
||||
"rest", stage.rest.len(),
|
||||
)
|
||||
}()
|
||||
lookbackEpoch := bb.Height() - (14 * builtin.EpochsInDay)
|
||||
lookbackEpoch := bb.Height() - onboardingProjectionLookback
|
||||
lookbackPowerTable, err := loadClaims(ctx, bb, lookbackEpoch)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to load claims from lookback epoch %d: %w", lookbackEpoch, err)
|
||||
@ -334,26 +335,26 @@ func (ps *PreCommitStage) load(ctx context.Context, bb *blockbuilder.BlockBuilde
|
||||
})
|
||||
|
||||
// reset, just in case.
|
||||
ps.top1 = actorIter{}
|
||||
ps.top10 = actorIter{}
|
||||
ps.rest = actorIter{}
|
||||
stage.top1 = actorIter{}
|
||||
stage.top10 = actorIter{}
|
||||
stage.rest = actorIter{}
|
||||
|
||||
for i, oi := range sealList {
|
||||
var dist *actorIter
|
||||
if i < len(sealList)/100 {
|
||||
dist = &ps.top1
|
||||
dist = &stage.top1
|
||||
} else if i < len(sealList)/10 {
|
||||
dist = &ps.top10
|
||||
dist = &stage.top10
|
||||
} else {
|
||||
dist = &ps.rest
|
||||
dist = &stage.rest
|
||||
}
|
||||
dist.add(oi.addr)
|
||||
}
|
||||
|
||||
ps.top1.shuffle()
|
||||
ps.top10.shuffle()
|
||||
ps.rest.shuffle()
|
||||
stage.top1.shuffle()
|
||||
stage.top10.shuffle()
|
||||
stage.rest.shuffle()
|
||||
|
||||
ps.initialized = true
|
||||
stage.initialized = true
|
||||
return nil
|
||||
}
|
||||
|
@ -288,7 +288,7 @@ func (stage *WindowPoStStage) tick(ctx context.Context, bb *blockbuilder.BlockBu
|
||||
store := bb.ActorStore()
|
||||
|
||||
// Perform a bit of catch up. This lets us do things like skip blocks at upgrades then catch
|
||||
// up to make the simualtion easier.
|
||||
// up to make the simulation easier.
|
||||
for ; stage.nextWpostEpoch <= targetHeight; stage.nextWpostEpoch++ {
|
||||
if stage.nextWpostEpoch+miner.WPoStChallengeWindow < targetHeight {
|
||||
bb.L().Warnw("skipping old window post", "deadline-open", stage.nextWpostEpoch)
|
||||
|
@ -17,6 +17,7 @@ var upgradeCommand = &cli.Command{
|
||||
Description: "Modifies network upgrade heights.",
|
||||
Subcommands: []*cli.Command{
|
||||
upgradeSetCommand,
|
||||
upgradeList,
|
||||
},
|
||||
}
|
||||
|
||||
@ -26,12 +27,16 @@ var upgradeList = &cli.Command{
|
||||
Subcommands: []*cli.Command{
|
||||
upgradeSetCommand,
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
Action: func(cctx *cli.Context) (err error) {
|
||||
node, err := open(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer node.Close()
|
||||
defer func() {
|
||||
if cerr := node.Close(); err == nil {
|
||||
err = cerr
|
||||
}
|
||||
}()
|
||||
|
||||
sim, err := node.LoadSim(cctx.Context, cctx.String("simulation"))
|
||||
if err != nil {
|
||||
@ -61,7 +66,7 @@ var upgradeSetCommand = &cli.Command{
|
||||
Name: "set",
|
||||
ArgsUsage: "<network-version> [+]<epochs>",
|
||||
Description: "Set a network upgrade height. Prefix with '+' to set it relative to the last epoch.",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
Action: func(cctx *cli.Context) (err error) {
|
||||
args := cctx.Args()
|
||||
if args.Len() != 2 {
|
||||
return fmt.Errorf("expected 2 arguments")
|
||||
@ -86,7 +91,11 @@ var upgradeSetCommand = &cli.Command{
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer node.Close()
|
||||
defer func() {
|
||||
if cerr := node.Close(); err == nil {
|
||||
err = cerr
|
||||
}
|
||||
}()
|
||||
|
||||
sim, err := node.LoadSim(cctx.Context, cctx.String("simulation"))
|
||||
if err != nil {
|
||||
|
Loading…
Reference in New Issue
Block a user