Merge pull request #9654 from filecoin-project/gstuart/gas-estimation-tooling

feat: cli: gas estimation tooling
This commit is contained in:
Aayush Rajasekaran 2022-11-22 09:55:19 -05:00 committed by GitHub
commit 6067968c07
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 453 additions and 217 deletions

View File

@ -69,6 +69,10 @@ var DrandConfigs = map[DrandEnum]dtypes.DrandConfig{
ChainInfoJSON: `{"public_key":"8cda589f88914aa728fd183f383980b35789ce81b274e5daee1f338b77d02566ef4d3fb0098af1f844f10f9c803c1827","period":25,"genesis_time":1595348225,"hash":"e73b7dc3c4f6a236378220c0dd6aa110eb16eed26c11259606e07ee122838d4f","groupHash":"567d4785122a5a3e75a9bc9911d7ea807dd85ff76b78dc4ff06b075712898607"}`,
},
DrandIncentinet: {
Servers: []string{
"https://dev1.drand.sh",
"https://dev2.drand.sh",
},
ChainInfoJSON: `{"public_key":"8cad0c72c606ab27d36ee06de1d5b2db1faf92e447025ca37575ab3a8aac2eaae83192f846fc9e158bc738423753d000","period":30,"genesis_time":1595873820,"hash":"80c8b872c714f4c00fdd3daa465d5514049f457f01f85a4caf68cdcd394ba039","groupHash":"d9406aaed487f7af71851b4399448e311f2328923d454e971536c05398ce2d9b"}`,
},
}

View File

@ -107,7 +107,7 @@ func NewDrandBeacon(genesisTs, interval uint64, ps *pubsub.PubSub, config dtypes
client, err := dclient.Wrap(clients, opts...)
if err != nil {
return nil, xerrors.Errorf("creating drand client")
return nil, xerrors.Errorf("creating drand client: %w", err)
}
lc, err := lru.New(1024)

View File

@ -14,6 +14,7 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/blockstore"
@ -29,74 +30,7 @@ var ErrExpensiveFork = errors.New("refusing explicit call due to state fork at e
// Call applies the given message to the given tipset's parent state, at the epoch following the
// tipset's parent. In the presence of null blocks, the height at which the message is invoked may
// be less than the specified tipset.
//
// - If no tipset is specified, the first tipset without an expensive migration is used.
// - If executing a message at a given tipset would trigger an expensive migration, the call will
// fail with ErrExpensiveFork.
func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types.TipSet) (*api.InvocResult, error) {
ctx, span := trace.StartSpan(ctx, "statemanager.Call")
defer span.End()
var pheight abi.ChainEpoch = -1
// If no tipset is provided, try to find one without a fork.
if ts == nil {
ts = sm.cs.GetHeaviestTipSet()
// Search back till we find a height with no fork, or we reach the beginning.
for ts.Height() > 0 {
pts, err := sm.cs.GetTipSetFromKey(ctx, ts.Parents())
if err != nil {
return nil, xerrors.Errorf("failed to find a non-forking epoch: %w", err)
}
if !sm.hasExpensiveFork(pts.Height()) {
pheight = pts.Height()
break
}
ts = pts
}
} else if ts.Height() > 0 {
pts, err := sm.cs.LoadTipSet(ctx, ts.Parents())
if err != nil {
return nil, xerrors.Errorf("failed to load parent tipset: %w", err)
}
pheight = pts.Height()
if sm.hasExpensiveFork(pheight) {
return nil, ErrExpensiveFork
}
} else {
// We can't get the parent tipset in this case.
pheight = ts.Height() - 1
}
// Since we're simulating a future message, pretend we're applying it in the "next" tipset
vmHeight := pheight + 1
bstate := ts.ParentState()
// Run the (not expensive) migration.
bstate, err := sm.HandleStateForks(ctx, bstate, pheight, nil, ts)
if err != nil {
return nil, fmt.Errorf("failed to handle fork: %w", err)
}
vmopt := &vm.VMOpts{
StateBase: bstate,
Epoch: vmHeight,
Rand: rand.NewStateRand(sm.cs, ts.Cids(), sm.beacon, sm.GetNetworkVersion),
Bstore: sm.cs.StateBlockstore(),
Actors: sm.tsExec.NewActorRegistry(),
Syscalls: sm.Syscalls,
CircSupplyCalc: sm.GetVMCirculatingSupply,
NetworkVersion: sm.GetNetworkVersion(ctx, pheight+1),
BaseFee: types.NewInt(0),
LookbackState: LookbackStateGetterForTipset(sm, ts),
Tracing: true,
}
vmi, err := sm.newVM(ctx, vmopt)
if err != nil {
return nil, xerrors.Errorf("failed to set up vm: %w", err)
}
if msg.GasLimit == 0 {
msg.GasLimit = build.BlockGasLimit
}
@ -106,61 +40,43 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types.
if msg.GasPremium == types.EmptyInt {
msg.GasPremium = types.NewInt(0)
}
if msg.Value == types.EmptyInt {
msg.Value = types.NewInt(0)
}
if span.IsRecordingEvents() {
span.AddAttributes(
trace.Int64Attribute("gas_limit", msg.GasLimit),
trace.StringAttribute("gas_feecap", msg.GasFeeCap.String()),
trace.StringAttribute("value", msg.Value.String()),
)
}
stTree, err := sm.StateTree(bstate)
if err != nil {
return nil, xerrors.Errorf("failed to load state tree: %w", err)
}
fromActor, err := stTree.GetActor(msg.From)
if err != nil {
return nil, xerrors.Errorf("call raw get actor: %s", err)
}
msg.Nonce = fromActor.Nonce
// TODO: maybe just use the invoker directly?
ret, err := vmi.ApplyImplicitMessage(ctx, msg)
if err != nil && ret == nil {
return nil, xerrors.Errorf("apply message failed: %w", err)
}
var errs string
if ret.ActorErr != nil {
errs = ret.ActorErr.Error()
log.Warnf("chain call failed: %s", ret.ActorErr)
}
return &api.InvocResult{
MsgCid: msg.Cid(),
Msg: msg,
MsgRct: &ret.MessageReceipt,
ExecutionTrace: ret.ExecutionTrace,
Error: errs,
Duration: ret.Duration,
}, err
return sm.callInternal(ctx, msg, nil, ts, cid.Undef, sm.GetNetworkVersion, false)
}
// CallWithGas calculates the state for a given tipset, and then applies the given message on top of that state.
func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, priorMsgs []types.ChainMsg, ts *types.TipSet) (*api.InvocResult, error) {
ctx, span := trace.StartSpan(ctx, "statemanager.CallWithGas")
return sm.callInternal(ctx, msg, priorMsgs, ts, cid.Undef, sm.GetNetworkVersion, true)
}
// CallAtStateAndVersion allows you to specify a message to execute on the given stateCid and network version.
// This should mostly be used for gas modelling on a migrated state.
// Tipset here is not needed because stateCid and network version fully describe execution we want. The internal function
// will get the heaviest tipset for use for things like basefee, which we don't really care about here.
func (sm *StateManager) CallAtStateAndVersion(ctx context.Context, msg *types.Message, stateCid cid.Cid, v network.Version) (*api.InvocResult, error) {
nvGetter := func(context.Context, abi.ChainEpoch) network.Version {
return v
}
return sm.callInternal(ctx, msg, nil, nil, stateCid, nvGetter, true)
}
// - If no tipset is specified, the first tipset without an expensive migration or one in its parent is used.
// - If executing a message at a given tipset or its parent would trigger an expensive migration, the call will
// fail with ErrExpensiveFork.
func (sm *StateManager) callInternal(ctx context.Context, msg *types.Message, priorMsgs []types.ChainMsg, ts *types.TipSet, stateCid cid.Cid, nvGetter rand.NetworkVersionGetter, checkGas bool) (*api.InvocResult, error) {
ctx, span := trace.StartSpan(ctx, "statemanager.callInternal")
defer span.End()
// Copy the message as we'll be modifying the nonce.
msgCopy := *msg
msg = &msgCopy
var err error
var pts *types.TipSet
if ts == nil {
ts = sm.cs.GetHeaviestTipSet()
@ -170,10 +86,11 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
// height to have no fork, because we'll run it inside this
// function before executing the given message.
for ts.Height() > 0 {
pts, err := sm.cs.GetTipSetFromKey(ctx, ts.Parents())
pts, err = sm.cs.GetTipSetFromKey(ctx, ts.Parents())
if err != nil {
return nil, xerrors.Errorf("failed to find a non-forking epoch: %w", err)
}
// Checks for expensive forks from the parents to the tipset, including nil tipsets
if !sm.hasExpensiveForkBetween(pts.Height(), ts.Height()+1) {
break
}
@ -181,7 +98,7 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
ts = pts
}
} else if ts.Height() > 0 {
pts, err := sm.cs.GetTipSetFromKey(ctx, ts.Parents())
pts, err = sm.cs.GetTipSetFromKey(ctx, ts.Parents())
if err != nil {
return nil, xerrors.Errorf("failed to find a non-forking epoch: %w", err)
}
@ -190,12 +107,22 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
}
}
// Since we're simulating a future message, pretend we're applying it in the "next" tipset
vmHeight := ts.Height() + 1
stateCid, _, err := sm.TipSetState(ctx, ts)
if err != nil {
return nil, xerrors.Errorf("computing tipset state: %w", err)
var vmHeight abi.ChainEpoch
if checkGas {
// Since we're simulating a future message, pretend we're applying it in the "next" tipset
vmHeight = ts.Height() + 1
if stateCid == cid.Undef {
stateCid, _, err = sm.TipSetState(ctx, ts)
if err != nil {
return nil, xerrors.Errorf("computing tipset state: %w", err)
}
}
} else {
// If we're not checking gas, we don't want to have to execute the tipset like above. This saves a lot of computation time
vmHeight = pts.Height() + 1
if stateCid == cid.Undef {
stateCid = ts.ParentState()
}
}
// Technically, the tipset we're passing in here should be ts+1, but that may not exist.
@ -204,8 +131,6 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
return nil, fmt.Errorf("failed to handle fork: %w", err)
}
r := rand.NewStateRand(sm.cs, ts.Cids(), sm.beacon, sm.GetNetworkVersion)
if span.IsRecordingEvents() {
span.AddAttributes(
trace.Int64Attribute("gas_limit", msg.GasLimit),
@ -218,12 +143,12 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
vmopt := &vm.VMOpts{
StateBase: stateCid,
Epoch: vmHeight,
Rand: r,
Rand: rand.NewStateRand(sm.cs, ts.Cids(), sm.beacon, nvGetter),
Bstore: buffStore,
Actors: sm.tsExec.NewActorRegistry(),
Syscalls: sm.Syscalls,
CircSupplyCalc: sm.GetVMCirculatingSupply,
NetworkVersion: sm.GetNetworkVersion(ctx, ts.Height()+1),
NetworkVersion: nvGetter(ctx, vmHeight),
BaseFee: ts.Blocks()[0].ParentBaseFee,
LookbackState: LookbackStateGetterForTipset(sm, ts),
Tracing: true,
@ -233,7 +158,7 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
return nil, xerrors.Errorf("failed to set up vm: %w", err)
}
for i, m := range priorMsgs {
_, err := vmi.ApplyMessage(ctx, m)
_, err = vmi.ApplyMessage(ctx, m)
if err != nil {
return nil, xerrors.Errorf("applying prior message (%d, %s): %w", i, m.Cid(), err)
}
@ -258,27 +183,6 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
msg.Nonce = fromActor.Nonce
fromKey, err := sm.ResolveToKeyAddress(ctx, msg.From, ts)
if err != nil {
return nil, xerrors.Errorf("could not resolve key: %w", err)
}
var msgApply types.ChainMsg
switch fromKey.Protocol() {
case address.BLS:
msgApply = msg
case address.SECP256K1:
msgApply = &types.SignedMessage{
Message: *msg,
Signature: crypto.Signature{
Type: crypto.SigTypeSecp256k1,
Data: make([]byte, 65),
},
}
}
// If the fee cap is set to zero, make gas free.
if msg.GasFeeCap.NilOrZero() {
// Now estimate with a new VM with no base fee.
@ -291,9 +195,39 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
}
}
ret, err := vmi.ApplyMessage(ctx, msgApply)
if err != nil {
return nil, xerrors.Errorf("gas estimation failed: %w", err)
var ret *vm.ApplyRet
var gasInfo api.MsgGasCost
if checkGas {
fromKey, err := sm.ResolveToKeyAddress(ctx, msg.From, ts)
if err != nil {
return nil, xerrors.Errorf("could not resolve key: %w", err)
}
var msgApply types.ChainMsg
switch fromKey.Protocol() {
case address.BLS:
msgApply = msg
case address.SECP256K1:
msgApply = &types.SignedMessage{
Message: *msg,
Signature: crypto.Signature{
Type: crypto.SigTypeSecp256k1,
Data: make([]byte, 65),
},
}
}
ret, err = vmi.ApplyMessage(ctx, msgApply)
if err != nil {
return nil, xerrors.Errorf("gas estimation failed: %w", err)
}
gasInfo = MakeMsgGasCost(msg, ret)
} else {
ret, err = vmi.ApplyImplicitMessage(ctx, msg)
if err != nil && ret == nil {
return nil, xerrors.Errorf("apply message failed: %w", err)
}
}
var errs string
@ -305,11 +239,11 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
MsgCid: msg.Cid(),
Msg: msg,
MsgRct: &ret.MessageReceipt,
GasCost: MakeMsgGasCost(msg, ret),
GasCost: gasInfo,
ExecutionTrace: ret.ExecutionTrace,
Error: errs,
Duration: ret.Duration,
}, nil
}, err
}
var errHaltExecution = fmt.Errorf("halt")

View File

@ -335,7 +335,7 @@ func testForkRefuseCall(t *testing.T, nullsBefore, nullsAfter int) {
parentHeight := pts.Height()
currentHeight := ts.TipSet.TipSet().Height()
// CallWithGas calls _at_ the current tipset.
// CallWithGas calls on top of the given tipset.
ret, err := sm.CallWithGas(ctx, m, nil, ts.TipSet.TipSet())
if parentHeight <= testForkHeight && currentHeight >= testForkHeight {
// If I had a fork, or I _will_ have a fork, it should fail.
@ -347,7 +347,7 @@ func testForkRefuseCall(t *testing.T, nullsBefore, nullsAfter int) {
// Call always applies the message to the "next block" after the tipset's parent state.
ret, err = sm.Call(ctx, m, ts.TipSet.TipSet())
if parentHeight == testForkHeight {
if parentHeight <= testForkHeight && currentHeight >= testForkHeight {
require.Equal(t, ErrExpensiveFork, err)
} else {
require.NoError(t, err)

View File

@ -42,6 +42,25 @@ type Loc struct {
Function string
}
func (et ExecutionTrace) SumGas() GasTrace {
return SumGas(et.GasCharges)
}
func SumGas(charges []*GasTrace) GasTrace {
var out GasTrace
for _, gc := range charges {
out.TotalGas += gc.TotalGas
out.ComputeGas += gc.ComputeGas
out.StorageGas += gc.StorageGas
out.TotalVirtualGas += gc.TotalVirtualGas
out.VirtualComputeGas += gc.VirtualComputeGas
out.VirtualStorageGas += gc.VirtualStorageGas
}
return out
}
func (l Loc) Show() bool {
ignorePrefix := []string{
"reflect.",

View File

@ -1321,7 +1321,7 @@ var compStateMsg = `
{{end}}
</td></tr>
{{end}}
{{with SumGas .GasCharges}}
{{with sumGas .GasCharges}}
<tr class="sum"><td><b>Sum</b></td>
{{template "gasC" .}}
<td>{{if PrintTiming}}{{.TimeTaken}}{{end}}</td>
@ -1355,7 +1355,7 @@ func ComputeStateHTMLTempl(w io.Writer, ts *types.TipSet, o *api.ComputeStateOut
"IsSlow": isSlow,
"IsVerySlow": isVerySlow,
"IntExit": func(i exitcode.ExitCode) int64 { return int64(i) },
"SumGas": sumGas,
"sumGas": types.SumGas,
"CodeStr": codeStr,
"Call": call,
"PrintTiming": func() bool { return printTiming },
@ -1423,21 +1423,6 @@ func isVerySlow(t time.Duration) bool {
return t > 50*time.Millisecond
}
func sumGas(changes []*types.GasTrace) types.GasTrace {
var out types.GasTrace
for _, gc := range changes {
out.TotalGas += gc.TotalGas
out.ComputeGas += gc.ComputeGas
out.StorageGas += gc.StorageGas
out.TotalVirtualGas += gc.TotalVirtualGas
out.VirtualComputeGas += gc.VirtualComputeGas
out.VirtualStorageGas += gc.VirtualStorageGas
}
return out
}
func JsonParams(code cid.Cid, method abi.MethodNum, params []byte) (string, error) {
p, err := stmgr.GetParamType(filcns.NewActorRegistry(), code, method) // todo use api for correct actor registry
if err != nil {

View File

@ -0,0 +1,277 @@
package main
import (
"context"
"fmt"
"io"
"os"
"strconv"
"text/tabwriter"
"github.com/ipfs/go-cid"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/beacon"
"github.com/filecoin-project/lotus/chain/beacon/drand"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/node/repo"
"github.com/filecoin-project/lotus/storage/sealer/ffiwrapper"
)
const MAINNET_GENESIS_TIME = 1598306400
// USAGE: Sync a node, then call migrate-nv17 on some old state. Pass in the cid of the migrated state root,
// the epoch you migrated at, the network version you migrated to, and a message CID. You will be able to replay any
// message from between the migration epoch, and where your node originally synced to. Note: You may run into issues
// with state that changed between the epoch you migrated at, and when the message was originally processed.
// This can be avoided by replaying messages from close to the migration epoch, or circumvented by using a custom
// FVM bundle.
var gasTraceCmd = &cli.Command{
Name: "trace-gas",
Description: "replay a message on the specified stateRoot and network version to get an execution trace",
ArgsUsage: "[migratedStateRootCid networkVersion messageCid]",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "repo",
Value: "~/.lotus",
},
},
Action: func(cctx *cli.Context) error {
ctx := context.TODO()
if cctx.NArg() != 3 {
return lcli.IncorrectNumArgs(cctx)
}
stateRootCid, err := cid.Decode(cctx.Args().Get(0))
if err != nil {
return fmt.Errorf("failed to parse input: %w", err)
}
nv, err := strconv.ParseInt(cctx.Args().Get(1), 10, 32)
if err != nil {
return fmt.Errorf("failed to parse input: %w", err)
}
messageCid, err := cid.Decode(cctx.Args().Get(2))
if err != nil {
return fmt.Errorf("failed to parse input: %w", err)
}
fsrepo, err := repo.NewFS(cctx.String("repo"))
if err != nil {
return err
}
lkrepo, err := fsrepo.Lock(repo.FullNode)
if err != nil {
return err
}
defer lkrepo.Close() //nolint:errcheck
bs, err := lkrepo.Blockstore(ctx, repo.UniversalBlockstore)
if err != nil {
return fmt.Errorf("failed to open blockstore: %w", err)
}
defer func() {
if c, ok := bs.(io.Closer); ok {
if err := c.Close(); err != nil {
log.Warnf("failed to close blockstore: %s", err)
}
}
}()
mds, err := lkrepo.Datastore(context.Background(), "/metadata")
if err != nil {
return err
}
dcs := build.DrandConfigSchedule()
shd := beacon.Schedule{}
for _, dc := range dcs {
bc, err := drand.NewDrandBeacon(MAINNET_GENESIS_TIME, build.BlockDelaySecs, nil, dc.Config)
if err != nil {
return xerrors.Errorf("creating drand beacon: %w", err)
}
shd = append(shd, beacon.BeaconPoint{Start: dc.Start, Beacon: bc})
}
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
defer cs.Close() //nolint:errcheck
sm, err := stmgr.NewStateManager(cs, filcns.NewTipSetExecutor(), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd)
if err != nil {
return err
}
msg, err := cs.GetMessage(ctx, messageCid)
if err != nil {
return err
}
// Set to block limit so message will not run out of gas
msg.GasLimit = build.BlockGasLimit
err = cs.Load(ctx)
if err != nil {
return err
}
tw := tabwriter.NewWriter(os.Stdout, 8, 2, 2, ' ', tabwriter.AlignRight)
res, err := sm.CallAtStateAndVersion(ctx, msg, stateRootCid, network.Version(nv))
if err != nil {
return err
}
fmt.Println("Total gas used: ", res.MsgRct.GasUsed)
printInternalExecutions(0, []types.ExecutionTrace{res.ExecutionTrace}, tw)
return tw.Flush()
},
}
var replayOfflineCmd = &cli.Command{
Name: "replay-offline",
Description: "replay a message to get a gas trace",
ArgsUsage: "[messageCid]",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "repo",
Value: "~/.lotus",
},
&cli.Int64Flag{
Name: "lookback-limit",
Value: 10000,
},
},
Action: func(cctx *cli.Context) error {
ctx := context.TODO()
if cctx.NArg() != 1 {
return lcli.IncorrectNumArgs(cctx)
}
messageCid, err := cid.Decode(cctx.Args().Get(0))
if err != nil {
return fmt.Errorf("failed to parse input: %w", err)
}
lookbackLimit := cctx.Int("lookback-limit")
fsrepo, err := repo.NewFS(cctx.String("repo"))
if err != nil {
return err
}
lkrepo, err := fsrepo.Lock(repo.FullNode)
if err != nil {
return err
}
defer lkrepo.Close() //nolint:errcheck
bs, err := lkrepo.Blockstore(ctx, repo.UniversalBlockstore)
if err != nil {
return fmt.Errorf("failed to open blockstore: %w", err)
}
defer func() {
if c, ok := bs.(io.Closer); ok {
if err := c.Close(); err != nil {
log.Warnf("failed to close blockstore: %s", err)
}
}
}()
mds, err := lkrepo.Datastore(context.Background(), "/metadata")
if err != nil {
return err
}
dcs := build.DrandConfigSchedule()
shd := beacon.Schedule{}
for _, dc := range dcs {
bc, err := drand.NewDrandBeacon(MAINNET_GENESIS_TIME, build.BlockDelaySecs, nil, dc.Config)
if err != nil {
return xerrors.Errorf("creating drand beacon: %w", err)
}
shd = append(shd, beacon.BeaconPoint{Start: dc.Start, Beacon: bc})
}
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
defer cs.Close() //nolint:errcheck
sm, err := stmgr.NewStateManager(cs, filcns.NewTipSetExecutor(), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd)
if err != nil {
return err
}
msg, err := cs.GetMessage(ctx, messageCid)
if err != nil {
return err
}
err = cs.Load(ctx)
if err != nil {
return err
}
ts, _, _, err := sm.SearchForMessage(ctx, cs.GetHeaviestTipSet(), messageCid, abi.ChainEpoch(lookbackLimit), true)
if err != nil {
return err
}
if ts == nil {
return xerrors.Errorf("could not find message within the last %d epochs", lookbackLimit)
}
executionTs, err := cs.GetTipsetByHeight(ctx, ts.Height()-2, ts, true)
tw := tabwriter.NewWriter(os.Stdout, 8, 2, 2, ' ', tabwriter.AlignRight)
res, err := sm.CallWithGas(ctx, msg, []types.ChainMsg{}, executionTs)
if err != nil {
return err
}
fmt.Println("Total gas used: ", res.MsgRct.GasUsed)
printInternalExecutions(0, []types.ExecutionTrace{res.ExecutionTrace}, tw)
return tw.Flush()
},
}
func printInternalExecutions(depth int, trace []types.ExecutionTrace, tw *tabwriter.Writer) {
if depth == 0 {
_, _ = fmt.Fprintf(tw, "Depth\tFrom\tTo\tMethod\tTotalGas\tComputeGas\tStorageGas\t\tExitCode\n")
}
for _, im := range trace {
sumGas := im.SumGas()
_, _ = fmt.Fprintf(tw, "%d\t%s\t%s\t%d\t%d\t%d\t%d\t\t%d\n", depth, truncateString(im.Msg.From.String(), 10), truncateString(im.Msg.To.String(), 10), im.Msg.Method, sumGas.TotalGas, sumGas.ComputeGas, sumGas.StorageGas, im.MsgRct.ExitCode)
printInternalExecutions(depth+1, im.Subcalls, tw)
}
}
func truncateString(str string, length int) string {
if len(str) <= length {
return str
}
truncated := ""
count := 0
for _, char := range str {
truncated += string(char)
count++
if count >= length {
break
}
}
truncated += "..."
return truncated
}

View File

@ -13,7 +13,10 @@ import (
var log = logging.Logger("lotus-shed")
func main() {
logging.SetLogLevel("*", "INFO")
_ = logging.SetLogLevel("*", "INFO")
_ = logging.SetLogLevelRegex("badger*", "ERROR")
_ = logging.SetLogLevel("drand", "ERROR")
_ = logging.SetLogLevel("chainstore", "ERROR")
local := []*cli.Command{
addressCmd,
@ -76,6 +79,8 @@ func main() {
msigCmd,
fip36PollCmd,
invariantsCmd,
gasTraceCmd,
replayOfflineCmd,
}
app := &cli.App{
@ -108,7 +113,7 @@ func main() {
}
if err := app.Run(os.Args); err != nil {
log.Warnf("%+v", err)
log.Errorf("%+v", err)
os.Exit(1)
return
}

View File

@ -7,6 +7,7 @@ import (
"time"
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log/v2"
"github.com/urfave/cli/v2"
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
@ -53,6 +54,9 @@ var migrationsCmd = &cli.Command{
Name: "repo",
Value: "~/.lotus",
},
&cli.BoolFlag{
Name: "skip-pre-migration",
},
&cli.BoolFlag{
Name: "check-invariants",
},
@ -60,6 +64,11 @@ var migrationsCmd = &cli.Command{
Action: func(cctx *cli.Context) error {
ctx := context.TODO()
err := logging.SetLogLevelRegex("badger*", "ERROR")
if err != nil {
return err
}
if cctx.NArg() != 1 {
return lcli.IncorrectNumArgs(cctx)
}
@ -107,8 +116,6 @@ var migrationsCmd = &cli.Command{
return err
}
cache := nv15.NewMemMigrationCache()
blk, err := cs.GetBlock(ctx, blkCid)
if err != nil {
return err
@ -119,45 +126,8 @@ var migrationsCmd = &cli.Command{
return err
}
ts1, err := cs.GetTipsetByHeight(ctx, blk.Height-240, migrationTs, false)
if err != nil {
return err
}
startTime := time.Now()
err = filcns.PreUpgradeActorsV9(ctx, sm, cache, ts1.ParentState(), ts1.Height()-1, ts1)
if err != nil {
return err
}
preMigration1Time := time.Since(startTime)
ts2, err := cs.GetTipsetByHeight(ctx, blk.Height-15, migrationTs, false)
if err != nil {
return err
}
startTime = time.Now()
err = filcns.PreUpgradeActorsV9(ctx, sm, cache, ts2.ParentState(), ts2.Height()-1, ts2)
if err != nil {
return err
}
preMigration2Time := time.Since(startTime)
startTime = time.Now()
newCid1, err := filcns.UpgradeActorsV9(ctx, sm, cache, nil, blk.ParentStateRoot, blk.Height-1, migrationTs)
if err != nil {
return err
}
cachedMigrationTime := time.Since(startTime)
startTime = time.Now()
newCid2, err := filcns.UpgradeActorsV9(ctx, sm, nv15.NewMemMigrationCache(), nil, blk.ParentStateRoot, blk.Height-1, migrationTs)
if err != nil {
return err
@ -165,18 +135,60 @@ var migrationsCmd = &cli.Command{
uncachedMigrationTime := time.Since(startTime)
if newCid1 != newCid2 {
return xerrors.Errorf("got different results with and without the cache: %s, %s", newCid1,
newCid2)
}
fmt.Println("migration height ", blk.Height-1)
fmt.Println("old cid ", blk.ParentStateRoot)
fmt.Println("new cid ", newCid2)
fmt.Println("completed premigration 1, took ", preMigration1Time)
fmt.Println("completed premigration 2, took ", preMigration2Time)
fmt.Println("completed round actual (with cache), took ", cachedMigrationTime)
fmt.Println("completed round actual (without cache), took ", uncachedMigrationTime)
if !cctx.IsSet("skip-pre-migration") {
cache := nv15.NewMemMigrationCache()
ts1, err := cs.GetTipsetByHeight(ctx, blk.Height-240, migrationTs, false)
if err != nil {
return err
}
startTime = time.Now()
err = filcns.PreUpgradeActorsV9(ctx, sm, cache, ts1.ParentState(), ts1.Height()-1, ts1)
if err != nil {
return err
}
preMigration1Time := time.Since(startTime)
ts2, err := cs.GetTipsetByHeight(ctx, blk.Height-15, migrationTs, false)
if err != nil {
return err
}
startTime = time.Now()
err = filcns.PreUpgradeActorsV9(ctx, sm, cache, ts2.ParentState(), ts2.Height()-1, ts2)
if err != nil {
return err
}
preMigration2Time := time.Since(startTime)
startTime = time.Now()
newCid1, err := filcns.UpgradeActorsV9(ctx, sm, cache, nil, blk.ParentStateRoot, blk.Height-1, migrationTs)
if err != nil {
return err
}
cachedMigrationTime := time.Since(startTime)
if newCid1 != newCid2 {
return xerrors.Errorf("got different results with and without the cache: %s, %s", newCid1,
newCid2)
}
fmt.Println("completed premigration 1, took ", preMigration1Time)
fmt.Println("completed premigration 2, took ", preMigration2Time)
fmt.Println("completed round actual (with cache), took ", cachedMigrationTime)
}
if cctx.Bool("check-invariants") {
err = checkMigrationInvariants(ctx, blk.ParentStateRoot, newCid2, bs, blk.Height-1)
if err != nil {