address review comments

This commit is contained in:
vyzo 2023-03-24 15:48:58 +02:00
parent 4b590e2102
commit 08134552a4
6 changed files with 17 additions and 8 deletions

View File

@ -196,6 +196,7 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context,
cronGas = 0 cronGas = 0
partDone = metrics.Timer(ctx, metrics.VMApplyMessages) partDone = metrics.Timer(ctx, metrics.VMApplyMessages)
// TODO reorg the code to minimize the execution critical section
vmi, err := makeVm(pstate, epoch, ts.MinTimestamp()) vmi, err := makeVm(pstate, epoch, ts.MinTimestamp())
if err != nil { if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("making vm: %w", err) return cid.Undef, cid.Undef, xerrors.Errorf("making vm: %w", err)

View File

@ -108,6 +108,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.Syscal
if err != nil { if err != nil {
return cid.Undef, fmt.Errorf("creating vm: %w", err) return cid.Undef, fmt.Errorf("creating vm: %w", err)
} }
// Note: genesisVm is mutated, so this has to happen in a deferred func; go horror show.
defer func() { genesisVm.Done() }() defer func() { genesisVm.Done() }()
if len(miners) == 0 { if len(miners) == 0 {

View File

@ -159,6 +159,7 @@ func (sm *StateManager) callInternal(ctx context.Context, msg *types.Message, pr
if err != nil { if err != nil {
return nil, xerrors.Errorf("failed to set up vm: %w", err) return nil, xerrors.Errorf("failed to set up vm: %w", err)
} }
// Note: vmi is mutated, so this has to happen in a deferred func; go horror show.
defer func() { vmi.Done() }() defer func() { vmi.Done() }()
for i, m := range priorMsgs { for i, m := range priorMsgs {

View File

@ -17,8 +17,14 @@ import (
) )
const ( const (
// DefaultAvailableExecutionLanes is the number of available execution lanes; it is the bound of
// concurrent active executions.
// This is the default value in filecoin-ffi
DefaultAvailableExecutionLanes = 4 DefaultAvailableExecutionLanes = 4
DefaultPriorityExecutionLanes = 2 // DefaultPriorityExecutionLanes is the number of reserved execution lanes for priority computations.
// This is purely userspace, but we believe it is a reasonable default, even with more available
// lanes.
DefaultPriorityExecutionLanes = 2
) )
var ErrExecutorDone = errors.New("executor has been released") var ErrExecutorDone = errors.New("executor has been released")
@ -118,7 +124,7 @@ func (e *executionEnv) getToken(lane ExecutionLane) *executionToken {
e.available-- e.available--
metricsUp(metrics.VMExecutionActive, lane) metricsUp(metrics.VMExecutionRunning, lane)
return &executionToken{lane: lane, reserved: 0} return &executionToken{lane: lane, reserved: 0}
case ExecutionLanePriority: case ExecutionLanePriority:
@ -134,7 +140,7 @@ func (e *executionEnv) getToken(lane ExecutionLane) *executionToken {
reserving = 1 reserving = 1
} }
metricsUp(metrics.VMExecutionActive, lane) metricsUp(metrics.VMExecutionRunning, lane)
return &executionToken{lane: lane, reserved: reserving} return &executionToken{lane: lane, reserved: reserving}
default: default:
@ -152,7 +158,7 @@ func (e *executionEnv) putToken(token *executionToken) {
e.cond.Broadcast() e.cond.Broadcast()
metricsDown(metrics.VMExecutionActive, token.lane) metricsDown(metrics.VMExecutionRunning, token.lane)
} }
func metricsUp(metric *stats.Int64Measure, lane ExecutionLane) { func metricsUp(metric *stats.Int64Measure, lane ExecutionLane) {

View File

@ -37,7 +37,7 @@ type Interface interface {
Flush(ctx context.Context) (cid.Cid, error) Flush(ctx context.Context) (cid.Cid, error)
} }
// Executor is the general vm execution interface, which is prioritized according to execution langes. // Executor is the general vm execution interface, which is prioritized according to execution lanes.
// User must call Done when it is done with this executor to release resource holds by the execution // User must call Done when it is done with this executor to release resource holds by the execution
// environment // environment
type Executor interface { type Executor interface {

View File

@ -125,7 +125,7 @@ var (
VMSends = stats.Int64("vm/sends", "Counter for sends processed by the VM", stats.UnitDimensionless) VMSends = stats.Int64("vm/sends", "Counter for sends processed by the VM", stats.UnitDimensionless)
VMApplied = stats.Int64("vm/applied", "Counter for messages (including internal messages) processed by the VM", stats.UnitDimensionless) VMApplied = stats.Int64("vm/applied", "Counter for messages (including internal messages) processed by the VM", stats.UnitDimensionless)
VMExecutionWaiting = stats.Int64("vm/execution_waiting", "Counter for VM executions waiting to be assigned to a lane", stats.UnitDimensionless) VMExecutionWaiting = stats.Int64("vm/execution_waiting", "Counter for VM executions waiting to be assigned to a lane", stats.UnitDimensionless)
VMExecutionActive = stats.Int64("vm/execution_default", "Counter for active VM executions", stats.UnitDimensionless) VMExecutionRunning = stats.Int64("vm/execution_running", "Counter for running VM executions", stats.UnitDimensionless)
// miner // miner
WorkerCallsStarted = stats.Int64("sealing/worker_calls_started", "Counter of started worker tasks", stats.UnitDimensionless) WorkerCallsStarted = stats.Int64("sealing/worker_calls_started", "Counter of started worker tasks", stats.UnitDimensionless)
@ -373,8 +373,8 @@ var (
Aggregation: view.LastValue(), Aggregation: view.LastValue(),
TagKeys: []tag.Key{ExecutionLane}, TagKeys: []tag.Key{ExecutionLane},
} }
VMExecutionActiveView = &view.View{ VMExecutionRunningView = &view.View{
Measure: VMExecutionActive, Measure: VMExecutionRunning,
Aggregation: view.LastValue(), Aggregation: view.LastValue(),
TagKeys: []tag.Key{ExecutionLane}, TagKeys: []tag.Key{ExecutionLane},
} }