Merge branch 'master' into nonsense/split-market-miner-processes

This commit is contained in:
Anton Evangelatov 2021-06-08 14:54:59 +02:00
commit c0607a0e2a
10 changed files with 140 additions and 91 deletions

View File

@ -21,7 +21,7 @@ jobs:
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- name: testground run - name: testground run
uses: coryschwartz/testground-github-action@v1.0 uses: coryschwartz/testground-github-action@v1.1
with: with:
backend_addr: ${{ matrix.backend_addr }} backend_addr: ${{ matrix.backend_addr }}
backend_proto: ${{ matrix.backend_proto }} backend_proto: ${{ matrix.backend_proto }}

View File

@ -248,24 +248,18 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
var errHaltExecution = fmt.Errorf("halt") var errHaltExecution = fmt.Errorf("halt")
func (sm *StateManager) Replay(ctx context.Context, ts *types.TipSet, mcid cid.Cid) (*types.Message, *vm.ApplyRet, error) { func (sm *StateManager) Replay(ctx context.Context, ts *types.TipSet, mcid cid.Cid) (*types.Message, *vm.ApplyRet, error) {
var outm *types.Message var finder messageFinder
var outr *vm.ApplyRet // message to find
finder.mcid = mcid
_, _, err := sm.computeTipSetState(ctx, ts, func(c cid.Cid, m *types.Message, ret *vm.ApplyRet) error { _, _, err := sm.computeTipSetState(ctx, ts, &finder)
if c == mcid {
outm = m
outr = ret
return errHaltExecution
}
return nil
})
if err != nil && !xerrors.Is(err, errHaltExecution) { if err != nil && !xerrors.Is(err, errHaltExecution) {
return nil, nil, xerrors.Errorf("unexpected error during execution: %w", err) return nil, nil, xerrors.Errorf("unexpected error during execution: %w", err)
} }
if outr == nil { if finder.outr == nil {
return nil, nil, xerrors.Errorf("given message not found in tipset") return nil, nil, xerrors.Errorf("given message not found in tipset")
} }
return outm, outr, nil return finder.outm, finder.outr, nil
} }

View File

@ -61,7 +61,7 @@ type MigrationCache interface {
type MigrationFunc func( type MigrationFunc func(
ctx context.Context, ctx context.Context,
sm *StateManager, cache MigrationCache, sm *StateManager, cache MigrationCache,
cb ExecCallback, oldState cid.Cid, cb ExecMonitor, oldState cid.Cid,
height abi.ChainEpoch, ts *types.TipSet, height abi.ChainEpoch, ts *types.TipSet,
) (newState cid.Cid, err error) ) (newState cid.Cid, err error)
@ -292,7 +292,7 @@ func (us UpgradeSchedule) Validate() error {
return nil return nil
} }
func (sm *StateManager) handleStateForks(ctx context.Context, root cid.Cid, height abi.ChainEpoch, cb ExecCallback, ts *types.TipSet) (cid.Cid, error) { func (sm *StateManager) handleStateForks(ctx context.Context, root cid.Cid, height abi.ChainEpoch, cb ExecMonitor, ts *types.TipSet) (cid.Cid, error) {
retCid := root retCid := root
var err error var err error
u := sm.stateMigrations[height] u := sm.stateMigrations[height]
@ -472,7 +472,7 @@ func doTransfer(tree types.StateTree, from, to address.Address, amt abi.TokenAmo
return nil return nil
} }
func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ MigrationCache, em ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
// Some initial parameters // Some initial parameters
FundsForMiners := types.FromFil(1_000_000) FundsForMiners := types.FromFil(1_000_000)
LookbackEpoch := abi.ChainEpoch(32000) LookbackEpoch := abi.ChainEpoch(32000)
@ -722,12 +722,12 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ Migratio
return cid.Undef, xerrors.Errorf("resultant state tree account balance was not correct: %s", total) return cid.Undef, xerrors.Errorf("resultant state tree account balance was not correct: %s", total)
} }
if cb != nil { if em != nil {
// record the transfer in execution traces // record the transfer in execution traces
fakeMsg := makeFakeMsg(builtin.SystemActorAddr, builtin.SystemActorAddr, big.Zero(), uint64(epoch)) fakeMsg := makeFakeMsg(builtin.SystemActorAddr, builtin.SystemActorAddr, big.Zero(), uint64(epoch))
if err := cb(fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{ if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{
MessageReceipt: *makeFakeRct(), MessageReceipt: *makeFakeRct(),
ActorErr: nil, ActorErr: nil,
ExecutionTrace: types.ExecutionTrace{ ExecutionTrace: types.ExecutionTrace{
@ -740,7 +740,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ Migratio
}, },
Duration: 0, Duration: 0,
GasCosts: nil, GasCosts: nil,
}); err != nil { }, false); err != nil {
return cid.Undef, xerrors.Errorf("recording transfers: %w", err) return cid.Undef, xerrors.Errorf("recording transfers: %w", err)
} }
} }
@ -748,7 +748,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ Migratio
return tree.Flush(ctx) return tree.Flush(ctx)
} }
func UpgradeIgnition(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { func UpgradeIgnition(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
store := sm.cs.ActorStore(ctx) store := sm.cs.ActorStore(ctx)
if build.UpgradeLiftoffHeight <= epoch { if build.UpgradeLiftoffHeight <= epoch {
@ -785,12 +785,12 @@ func UpgradeIgnition(ctx context.Context, sm *StateManager, _ MigrationCache, cb
return cid.Undef, xerrors.Errorf("resetting genesis msig start epochs: %w", err) return cid.Undef, xerrors.Errorf("resetting genesis msig start epochs: %w", err)
} }
err = splitGenesisMultisig0(ctx, cb, split1, store, tree, 50, epoch) err = splitGenesisMultisig0(ctx, cb, split1, store, tree, 50, epoch, ts)
if err != nil { if err != nil {
return cid.Undef, xerrors.Errorf("splitting first msig: %w", err) return cid.Undef, xerrors.Errorf("splitting first msig: %w", err)
} }
err = splitGenesisMultisig0(ctx, cb, split2, store, tree, 50, epoch) err = splitGenesisMultisig0(ctx, cb, split2, store, tree, 50, epoch, ts)
if err != nil { if err != nil {
return cid.Undef, xerrors.Errorf("splitting second msig: %w", err) return cid.Undef, xerrors.Errorf("splitting second msig: %w", err)
} }
@ -803,7 +803,7 @@ func UpgradeIgnition(ctx context.Context, sm *StateManager, _ MigrationCache, cb
return tree.Flush(ctx) return tree.Flush(ctx)
} }
func UpgradeRefuel(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { func UpgradeRefuel(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
store := sm.cs.ActorStore(ctx) store := sm.cs.ActorStore(ctx)
tree, err := sm.StateTree(root) tree, err := sm.StateTree(root)
@ -829,7 +829,7 @@ func UpgradeRefuel(ctx context.Context, sm *StateManager, _ MigrationCache, cb E
return tree.Flush(ctx) return tree.Flush(ctx)
} }
func UpgradeActorsV2(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { func UpgradeActorsV2(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync()) buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync())
store := store.ActorStore(ctx, buf) store := store.ActorStore(ctx, buf)
@ -875,7 +875,7 @@ func UpgradeActorsV2(ctx context.Context, sm *StateManager, _ MigrationCache, cb
return newRoot, nil return newRoot, nil
} }
func UpgradeLiftoff(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { func UpgradeLiftoff(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
tree, err := sm.StateTree(root) tree, err := sm.StateTree(root)
if err != nil { if err != nil {
return cid.Undef, xerrors.Errorf("getting state tree: %w", err) return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
@ -889,7 +889,7 @@ func UpgradeLiftoff(ctx context.Context, sm *StateManager, _ MigrationCache, cb
return tree.Flush(ctx) return tree.Flush(ctx)
} }
func UpgradeCalico(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { func UpgradeCalico(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
if build.BuildType != build.BuildMainnet { if build.BuildType != build.BuildMainnet {
return root, nil return root, nil
} }
@ -935,7 +935,7 @@ func UpgradeCalico(ctx context.Context, sm *StateManager, _ MigrationCache, cb E
return newRoot, nil return newRoot, nil
} }
func terminateActor(ctx context.Context, tree *state.StateTree, addr address.Address, cb ExecCallback, epoch abi.ChainEpoch) error { func terminateActor(ctx context.Context, tree *state.StateTree, addr address.Address, em ExecMonitor, epoch abi.ChainEpoch, ts *types.TipSet) error {
a, err := tree.GetActor(addr) a, err := tree.GetActor(addr)
if xerrors.Is(err, types.ErrActorNotFound) { if xerrors.Is(err, types.ErrActorNotFound) {
return types.ErrActorNotFound return types.ErrActorNotFound
@ -950,18 +950,18 @@ func terminateActor(ctx context.Context, tree *state.StateTree, addr address.Add
return xerrors.Errorf("transferring terminated actor's balance: %w", err) return xerrors.Errorf("transferring terminated actor's balance: %w", err)
} }
if cb != nil { if em != nil {
// record the transfer in execution traces // record the transfer in execution traces
fakeMsg := makeFakeMsg(builtin.SystemActorAddr, addr, big.Zero(), uint64(epoch)) fakeMsg := makeFakeMsg(builtin.SystemActorAddr, addr, big.Zero(), uint64(epoch))
if err := cb(fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{ if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{
MessageReceipt: *makeFakeRct(), MessageReceipt: *makeFakeRct(),
ActorErr: nil, ActorErr: nil,
ExecutionTrace: trace, ExecutionTrace: trace,
Duration: 0, Duration: 0,
GasCosts: nil, GasCosts: nil,
}); err != nil { }, false); err != nil {
return xerrors.Errorf("recording transfers: %w", err) return xerrors.Errorf("recording transfers: %w", err)
} }
} }
@ -995,7 +995,7 @@ func terminateActor(ctx context.Context, tree *state.StateTree, addr address.Add
return tree.SetActor(init_.Address, ia) return tree.SetActor(init_.Address, ia)
} }
func UpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { func UpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
// Use all the CPUs except 3. // Use all the CPUs except 3.
workerCount := runtime.NumCPU() - 3 workerCount := runtime.NumCPU() - 3
if workerCount <= 0 { if workerCount <= 0 {
@ -1019,7 +1019,7 @@ func UpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache
} }
if build.BuildType == build.BuildMainnet { if build.BuildType == build.BuildMainnet {
err := terminateActor(ctx, tree, build.ZeroAddress, cb, epoch) err := terminateActor(ctx, tree, build.ZeroAddress, cb, epoch, ts)
if err != nil && !xerrors.Is(err, types.ErrActorNotFound) { if err != nil && !xerrors.Is(err, types.ErrActorNotFound) {
return cid.Undef, xerrors.Errorf("deleting zero bls actor: %w", err) return cid.Undef, xerrors.Errorf("deleting zero bls actor: %w", err)
} }
@ -1097,7 +1097,7 @@ func upgradeActorsV3Common(
return newRoot, nil return newRoot, nil
} }
func UpgradeActorsV4(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { func UpgradeActorsV4(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
// Use all the CPUs except 3. // Use all the CPUs except 3.
workerCount := runtime.NumCPU() - 3 workerCount := runtime.NumCPU() - 3
if workerCount <= 0 { if workerCount <= 0 {
@ -1183,7 +1183,7 @@ func upgradeActorsV4Common(
return newRoot, nil return newRoot, nil
} }
func UpgradeActorsV5(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { func UpgradeActorsV5(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
// Use all the CPUs except 3. // Use all the CPUs except 3.
workerCount := runtime.NumCPU() - 3 workerCount := runtime.NumCPU() - 3
if workerCount <= 0 { if workerCount <= 0 {
@ -1296,7 +1296,7 @@ func setNetworkName(ctx context.Context, store adt.Store, tree *state.StateTree,
return nil return nil
} }
func splitGenesisMultisig0(ctx context.Context, cb ExecCallback, addr address.Address, store adt0.Store, tree *state.StateTree, portions uint64, epoch abi.ChainEpoch) error { func splitGenesisMultisig0(ctx context.Context, em ExecMonitor, addr address.Address, store adt0.Store, tree *state.StateTree, portions uint64, epoch abi.ChainEpoch, ts *types.TipSet) error {
if portions < 1 { if portions < 1 {
return xerrors.Errorf("cannot split into 0 portions") return xerrors.Errorf("cannot split into 0 portions")
} }
@ -1393,12 +1393,12 @@ func splitGenesisMultisig0(ctx context.Context, cb ExecCallback, addr address.Ad
i++ i++
} }
if cb != nil { if em != nil {
// record the transfer in execution traces // record the transfer in execution traces
fakeMsg := makeFakeMsg(builtin.SystemActorAddr, addr, big.Zero(), uint64(epoch)) fakeMsg := makeFakeMsg(builtin.SystemActorAddr, addr, big.Zero(), uint64(epoch))
if err := cb(fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{ if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{
MessageReceipt: *makeFakeRct(), MessageReceipt: *makeFakeRct(),
ActorErr: nil, ActorErr: nil,
ExecutionTrace: types.ExecutionTrace{ ExecutionTrace: types.ExecutionTrace{
@ -1411,7 +1411,7 @@ func splitGenesisMultisig0(ctx context.Context, cb ExecCallback, addr address.Ad
}, },
Duration: 0, Duration: 0,
GasCosts: nil, GasCosts: nil,
}); err != nil { }, false); err != nil {
return xerrors.Errorf("recording transfers: %w", err) return xerrors.Errorf("recording transfers: %w", err)
} }
} }

View File

@ -123,7 +123,7 @@ func TestForkHeightTriggers(t *testing.T) {
cg.ChainStore(), UpgradeSchedule{{ cg.ChainStore(), UpgradeSchedule{{
Network: 1, Network: 1,
Height: testForkHeight, Height: testForkHeight,
Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecCallback, Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor,
root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
cst := ipldcbor.NewCborStore(sm.ChainStore().StateBlockstore()) cst := ipldcbor.NewCborStore(sm.ChainStore().StateBlockstore())
@ -253,7 +253,7 @@ func TestForkRefuseCall(t *testing.T) {
Network: 1, Network: 1,
Expensive: true, Expensive: true,
Height: testForkHeight, Height: testForkHeight,
Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecCallback, Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor,
root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
return root, nil return root, nil
}}}) }}})
@ -363,7 +363,7 @@ func TestForkPreMigration(t *testing.T) {
cg.ChainStore(), UpgradeSchedule{{ cg.ChainStore(), UpgradeSchedule{{
Network: 1, Network: 1,
Height: testForkHeight, Height: testForkHeight,
Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecCallback, Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor,
root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
// Make sure the test that should be canceled, is canceled. // Make sure the test that should be canceled, is canceled.

View File

@ -103,6 +103,8 @@ type StateManager struct {
genesisPledge abi.TokenAmount genesisPledge abi.TokenAmount
genesisMarketFunds abi.TokenAmount genesisMarketFunds abi.TokenAmount
tsExecMonitor ExecMonitor
} }
// Caches a single state tree // Caches a single state tree
@ -171,6 +173,15 @@ func NewStateManagerWithUpgradeSchedule(cs *store.ChainStore, us UpgradeSchedule
}, nil }, nil
} }
func NewStateManagerWithUpgradeScheduleAndMonitor(cs *store.ChainStore, us UpgradeSchedule, em ExecMonitor) (*StateManager, error) {
sm, err := NewStateManagerWithUpgradeSchedule(cs, us)
if err != nil {
return nil, err
}
sm.tsExecMonitor = em
return sm, nil
}
func cidsToKey(cids []cid.Cid) string { func cidsToKey(cids []cid.Cid) string {
var out string var out string
for _, c := range cids { for _, c := range cids {
@ -255,7 +266,7 @@ func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st c
return ts.Blocks()[0].ParentStateRoot, ts.Blocks()[0].ParentMessageReceipts, nil return ts.Blocks()[0].ParentStateRoot, ts.Blocks()[0].ParentMessageReceipts, nil
} }
st, rec, err = sm.computeTipSetState(ctx, ts, nil) st, rec, err = sm.computeTipSetState(ctx, ts, sm.tsExecMonitor)
if err != nil { if err != nil {
return cid.Undef, cid.Undef, err return cid.Undef, cid.Undef, err
} }
@ -263,39 +274,21 @@ func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st c
return st, rec, nil return st, rec, nil
} }
func traceFunc(trace *[]*api.InvocResult) func(mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet) error { func (sm *StateManager) ExecutionTraceWithMonitor(ctx context.Context, ts *types.TipSet, em ExecMonitor) (cid.Cid, error) {
return func(mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet) error { st, _, err := sm.computeTipSetState(ctx, ts, em)
ir := &api.InvocResult{ return st, err
MsgCid: mcid,
Msg: msg,
MsgRct: &ret.MessageReceipt,
ExecutionTrace: ret.ExecutionTrace,
Duration: ret.Duration,
}
if ret.ActorErr != nil {
ir.Error = ret.ActorErr.Error()
}
if ret.GasCosts != nil {
ir.GasCost = MakeMsgGasCost(msg, ret)
}
*trace = append(*trace, ir)
return nil
}
} }
func (sm *StateManager) ExecutionTrace(ctx context.Context, ts *types.TipSet) (cid.Cid, []*api.InvocResult, error) { func (sm *StateManager) ExecutionTrace(ctx context.Context, ts *types.TipSet) (cid.Cid, []*api.InvocResult, error) {
var trace []*api.InvocResult var invocTrace []*api.InvocResult
st, _, err := sm.computeTipSetState(ctx, ts, traceFunc(&trace)) st, err := sm.ExecutionTraceWithMonitor(ctx, ts, &InvocationTracer{trace: &invocTrace})
if err != nil { if err != nil {
return cid.Undef, nil, err return cid.Undef, nil, err
} }
return st, invocTrace, nil
return st, trace, nil
} }
type ExecCallback func(cid.Cid, *types.Message, *vm.ApplyRet) error func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEpoch, pstate cid.Cid, bms []store.BlockMessages, epoch abi.ChainEpoch, r vm.Rand, em ExecMonitor, baseFee abi.TokenAmount, ts *types.TipSet) (cid.Cid, cid.Cid, error) {
func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEpoch, pstate cid.Cid, bms []store.BlockMessages, epoch abi.ChainEpoch, r vm.Rand, cb ExecCallback, baseFee abi.TokenAmount, ts *types.TipSet) (cid.Cid, cid.Cid, error) {
done := metrics.Timer(ctx, metrics.VMApplyBlocksTotal) done := metrics.Timer(ctx, metrics.VMApplyBlocksTotal)
defer done() defer done()
@ -341,8 +334,8 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
if err != nil { if err != nil {
return err return err
} }
if cb != nil { if em != nil {
if err := cb(cronMsg.Cid(), cronMsg, ret); err != nil { if err := em.MessageApplied(ctx, ts, cronMsg.Cid(), cronMsg, ret, true); err != nil {
return xerrors.Errorf("callback failed on cron message: %w", err) return xerrors.Errorf("callback failed on cron message: %w", err)
} }
} }
@ -368,7 +361,7 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
// handle state forks // handle state forks
// XXX: The state tree // XXX: The state tree
newState, err := sm.handleStateForks(ctx, pstate, i, cb, ts) newState, err := sm.handleStateForks(ctx, pstate, i, em, ts)
if err != nil { if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("error handling state forks: %w", err) return cid.Undef, cid.Undef, xerrors.Errorf("error handling state forks: %w", err)
} }
@ -407,8 +400,8 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
gasReward = big.Add(gasReward, r.GasCosts.MinerTip) gasReward = big.Add(gasReward, r.GasCosts.MinerTip)
penalty = big.Add(penalty, r.GasCosts.MinerPenalty) penalty = big.Add(penalty, r.GasCosts.MinerPenalty)
if cb != nil { if em != nil {
if err := cb(cm.Cid(), m, r); err != nil { if err := em.MessageApplied(ctx, ts, cm.Cid(), m, r, false); err != nil {
return cid.Undef, cid.Undef, err return cid.Undef, cid.Undef, err
} }
} }
@ -440,8 +433,8 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
if actErr != nil { if actErr != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("failed to apply reward message for miner %s: %w", b.Miner, actErr) return cid.Undef, cid.Undef, xerrors.Errorf("failed to apply reward message for miner %s: %w", b.Miner, actErr)
} }
if cb != nil { if em != nil {
if err := cb(rwMsg.Cid(), rwMsg, ret); err != nil { if err := em.MessageApplied(ctx, ts, rwMsg.Cid(), rwMsg, ret, true); err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("callback failed on reward message: %w", err) return cid.Undef, cid.Undef, xerrors.Errorf("callback failed on reward message: %w", err)
} }
} }
@ -483,7 +476,7 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
return st, rectroot, nil return st, rectroot, nil
} }
func (sm *StateManager) computeTipSetState(ctx context.Context, ts *types.TipSet, cb ExecCallback) (cid.Cid, cid.Cid, error) { func (sm *StateManager) computeTipSetState(ctx context.Context, ts *types.TipSet, em ExecMonitor) (cid.Cid, cid.Cid, error) {
ctx, span := trace.StartSpan(ctx, "computeTipSetState") ctx, span := trace.StartSpan(ctx, "computeTipSetState")
defer span.End() defer span.End()
@ -519,7 +512,7 @@ func (sm *StateManager) computeTipSetState(ctx context.Context, ts *types.TipSet
baseFee := blks[0].ParentBaseFee baseFee := blks[0].ParentBaseFee
return sm.ApplyBlocks(ctx, parentEpoch, pstate, blkmsgs, blks[0].Height, r, cb, baseFee, ts) return sm.ApplyBlocks(ctx, parentEpoch, pstate, blkmsgs, blks[0].Height, r, em, baseFee, ts)
} }
func (sm *StateManager) parentState(ts *types.TipSet) cid.Cid { func (sm *StateManager) parentState(ts *types.TipSet) cid.Cid {

56
chain/stmgr/tracers.go Normal file
View File

@ -0,0 +1,56 @@
package stmgr
import (
"context"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
"github.com/ipfs/go-cid"
)
type ExecMonitor interface {
// MessageApplied is called after a message has been applied. Returning an error will halt execution of any further messages.
MessageApplied(ctx context.Context, ts *types.TipSet, mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet, implicit bool) error
}
var _ ExecMonitor = (*InvocationTracer)(nil)
type InvocationTracer struct {
trace *[]*api.InvocResult
}
func (i *InvocationTracer) MessageApplied(ctx context.Context, ts *types.TipSet, mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet, implicit bool) error {
ir := &api.InvocResult{
MsgCid: mcid,
Msg: msg,
MsgRct: &ret.MessageReceipt,
ExecutionTrace: ret.ExecutionTrace,
Duration: ret.Duration,
}
if ret.ActorErr != nil {
ir.Error = ret.ActorErr.Error()
}
if ret.GasCosts != nil {
ir.GasCost = MakeMsgGasCost(msg, ret)
}
*i.trace = append(*i.trace, ir)
return nil
}
var _ ExecMonitor = (*messageFinder)(nil)
type messageFinder struct {
mcid cid.Cid // the message cid to find
outm *types.Message
outr *vm.ApplyRet
}
func (m *messageFinder) MessageApplied(ctx context.Context, ts *types.TipSet, mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet, implicit bool) error {
if m.mcid == mcid {
m.outm = msg
m.outr = ret
return errHaltExecution // message was found, no need to continue
}
return nil
}

View File

@ -342,7 +342,7 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch,
for i := ts.Height(); i < height; i++ { for i := ts.Height(); i < height; i++ {
// handle state forks // handle state forks
base, err = sm.handleStateForks(ctx, base, i, traceFunc(&trace), ts) base, err = sm.handleStateForks(ctx, base, i, &InvocationTracer{trace: &trace}, ts)
if err != nil { if err != nil {
return cid.Undef, nil, xerrors.Errorf("error handling state forks: %w", err) return cid.Undef, nil, xerrors.Errorf("error handling state forks: %w", err)
} }

View File

@ -267,7 +267,7 @@ func (ss *syscallShim) VerifySeal(info proof5.SealVerifyInfo) error {
proof := info.Proof proof := info.Proof
seed := []byte(info.InteractiveRandomness) seed := []byte(info.InteractiveRandomness)
log.Debugf("Verif r:%x; d:%x; m:%s; t:%x; s:%x; N:%d; p:%x", info.SealedCID, info.UnsealedCID, miner, ticket, seed, info.SectorID.Number, proof) log.Debugf("Verif r:%s; d:%s; m:%s; t:%x; s:%x; N:%d; p:%x", info.SealedCID, info.UnsealedCID, miner, ticket, seed, info.SectorID.Number, proof)
//func(ctx context.Context, maddr address.Address, ssize abi.SectorSize, commD, commR, ticket, proof, seed []byte, sectorID abi.SectorNumber) //func(ctx context.Context, maddr address.Address, ssize abi.SectorSize, commD, commR, ticket, proof, seed []byte, sectorID abi.SectorNumber)
ok, err := ss.verifier.VerifySeal(info) ok, err := ss.verifier.VerifySeal(info)

View File

@ -345,7 +345,7 @@ var StateSectorsCmd = &cli.Command{
} }
for _, s := range sectors { for _, s := range sectors {
fmt.Printf("%d: %x\n", s.SectorNumber, s.SealedCID) fmt.Printf("%d: %s\n", s.SectorNumber, s.SealedCID)
} }
return nil return nil
@ -385,7 +385,7 @@ var StateActiveSectorsCmd = &cli.Command{
} }
for _, s := range sectors { for _, s := range sectors {
fmt.Printf("%d: %x\n", s.SectorNumber, s.SealedCID) fmt.Printf("%d: %s\n", s.SectorNumber, s.SealedCID)
} }
return nil return nil

View File

@ -141,16 +141,11 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, params
blocks = append(blocks, sb) blocks = append(blocks, sb)
} }
var ( recordOutputs := &outputRecorder{
messages []*types.Message messages: []*types.Message{},
results []*vm.ApplyRet results: []*vm.ApplyRet{},
)
recordOutputs := func(_ cid.Cid, msg *types.Message, ret *vm.ApplyRet) error {
messages = append(messages, msg)
results = append(results, ret)
return nil
} }
postcid, receiptsroot, err := sm.ApplyBlocks(context.Background(), postcid, receiptsroot, err := sm.ApplyBlocks(context.Background(),
params.ParentEpoch, params.ParentEpoch,
params.Preroot, params.Preroot,
@ -169,8 +164,8 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, params
ret := &ExecuteTipsetResult{ ret := &ExecuteTipsetResult{
ReceiptsRoot: receiptsroot, ReceiptsRoot: receiptsroot,
PostStateRoot: postcid, PostStateRoot: postcid,
AppliedMessages: messages, AppliedMessages: recordOutputs.messages,
AppliedResults: results, AppliedResults: recordOutputs.results,
} }
return ret, nil return ret, nil
} }
@ -284,3 +279,14 @@ func CircSupplyOrDefault(circSupply *gobig.Int) abi.TokenAmount {
} }
return big.NewFromGo(circSupply) return big.NewFromGo(circSupply)
} }
type outputRecorder struct {
messages []*types.Message
results []*vm.ApplyRet
}
func (o *outputRecorder) MessageApplied(ctx context.Context, ts *types.TipSet, mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet, implicit bool) error {
o.messages = append(o.messages, msg)
o.results = append(o.results, ret)
return nil
}