lotus/chain/vm/execution.go

227 lines
4.5 KiB
Go
Raw Normal View History

2023-03-23 14:53:50 +00:00
package vm
import (
"context"
"errors"
"os"
"strconv"
"sync"
2023-03-28 13:58:09 +00:00
"github.com/ipfs/go-cid"
2023-03-24 13:16:22 +00:00
"go.opencensus.io/stats"
"go.opencensus.io/tag"
2023-03-23 14:53:50 +00:00
"github.com/filecoin-project/lotus/chain/types"
2023-03-24 13:16:22 +00:00
"github.com/filecoin-project/lotus/metrics"
2023-03-23 14:53:50 +00:00
)
const (
2023-03-24 13:48:58 +00:00
// DefaultAvailableExecutionLanes is the number of available execution lanes; it is the bound of
// concurrent active executions.
// This is the default value in filecoin-ffi
2023-03-23 14:53:50 +00:00
DefaultAvailableExecutionLanes = 4
2023-03-24 13:48:58 +00:00
// DefaultPriorityExecutionLanes is the number of reserved execution lanes for priority computations.
// This is purely userspace, but we believe it is a reasonable default, even with more available
// lanes.
DefaultPriorityExecutionLanes = 2
2023-03-23 14:53:50 +00:00
)
var ErrExecutorDone = errors.New("executor has been released")
// the execution environment; see below for definition, methods, and initilization
var execution *executionEnv
// implementation of vm executor with simple sanity check preventing use after free.
type vmExecutor struct {
lk sync.RWMutex
vmi Interface
token *executionToken
done bool
}
var _ Executor = (*vmExecutor)(nil)
func newVMExecutor(vmi Interface, token *executionToken) Executor {
return &vmExecutor{vmi: vmi, token: token}
}
func (e *vmExecutor) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, error) {
e.lk.RLock()
defer e.lk.RUnlock()
if e.done {
return nil, ErrExecutorDone
}
return e.vmi.ApplyMessage(ctx, cmsg)
}
func (e *vmExecutor) ApplyImplicitMessage(ctx context.Context, msg *types.Message) (*ApplyRet, error) {
e.lk.RLock()
defer e.lk.RUnlock()
if e.done {
return nil, ErrExecutorDone
}
return e.vmi.ApplyImplicitMessage(ctx, msg)
}
func (e *vmExecutor) Flush(ctx context.Context) (cid.Cid, error) {
e.lk.RLock()
defer e.lk.RUnlock()
if e.done {
return cid.Undef, ErrExecutorDone
}
return e.vmi.Flush(ctx)
}
func (e *vmExecutor) Done() {
e.lk.Lock()
defer e.lk.Unlock()
2023-03-23 15:38:36 +00:00
if !e.done {
e.token.Done()
e.token = nil
e.done = true
}
2023-03-23 14:53:50 +00:00
}
type executionToken struct {
2023-03-24 13:16:22 +00:00
lane ExecutionLane
2023-03-23 14:53:50 +00:00
reserved int
}
func (token *executionToken) Done() {
execution.putToken(token)
}
type executionEnv struct {
mx *sync.Mutex
cond *sync.Cond
// available executors
available int
// reserved executors
reserved int
}
func (e *executionEnv) getToken(lane ExecutionLane) *executionToken {
2023-03-24 13:16:22 +00:00
metricsUp(metrics.VMExecutionWaiting, lane)
defer metricsDown(metrics.VMExecutionWaiting, lane)
2023-03-23 14:53:50 +00:00
e.mx.Lock()
defer e.mx.Unlock()
switch lane {
case ExecutionLaneDefault:
for e.available <= e.reserved {
e.cond.Wait()
}
e.available--
2023-03-24 13:16:22 +00:00
2023-03-24 13:48:58 +00:00
metricsUp(metrics.VMExecutionRunning, lane)
2023-03-24 13:16:22 +00:00
return &executionToken{lane: lane, reserved: 0}
2023-03-23 14:53:50 +00:00
case ExecutionLanePriority:
for e.available == 0 {
e.cond.Wait()
}
e.available--
reserving := 0
if e.reserved > 0 {
e.reserved--
reserving = 1
}
2023-03-24 13:16:22 +00:00
2023-03-24 13:48:58 +00:00
metricsUp(metrics.VMExecutionRunning, lane)
2023-03-24 13:16:22 +00:00
return &executionToken{lane: lane, reserved: reserving}
2023-03-23 14:53:50 +00:00
default:
// already checked at interface boundary in NewVM, so this is appropriate
panic("bogus execution lane")
}
}
func (e *executionEnv) putToken(token *executionToken) {
e.mx.Lock()
defer e.mx.Unlock()
e.available++
e.reserved += token.reserved
e.cond.Broadcast()
2023-03-24 13:16:22 +00:00
2023-03-24 13:48:58 +00:00
metricsDown(metrics.VMExecutionRunning, token.lane)
2023-03-24 13:16:22 +00:00
}
func metricsUp(metric *stats.Int64Measure, lane ExecutionLane) {
metricsAdjust(metric, lane, 1)
}
func metricsDown(metric *stats.Int64Measure, lane ExecutionLane) {
metricsAdjust(metric, lane, -1)
}
func metricsAdjust(metric *stats.Int64Measure, lane ExecutionLane, delta int) {
laneName := "default"
if lane > ExecutionLaneDefault {
laneName = "priority"
}
ctx, _ := tag.New(
context.Background(),
tag.Upsert(metrics.ExecutionLane, laneName),
)
stats.Record(ctx, metric.M(int64(delta)))
2023-03-23 14:53:50 +00:00
}
func init() {
var available, priority int
var err error
concurrency := os.Getenv("LOTUS_FVM_CONCURRENCY")
if concurrency == "" {
available = DefaultAvailableExecutionLanes
2023-03-23 15:28:08 +00:00
} else {
available, err = strconv.Atoi(concurrency)
if err != nil {
panic(err)
}
2023-03-23 14:53:50 +00:00
}
reserved := os.Getenv("LOTUS_FVM_CONCURRENCY_RESERVED")
if reserved == "" {
priority = DefaultPriorityExecutionLanes
2023-03-23 15:28:08 +00:00
} else {
priority, err = strconv.Atoi(reserved)
if err != nil {
panic(err)
}
2023-03-23 14:53:50 +00:00
}
// some sanity checks
if available < 2 {
panic("insufficient execution concurrency")
}
if priority > available-1 {
panic("insufficient default execution concurrency")
}
2023-03-23 14:53:50 +00:00
mx := &sync.Mutex{}
cond := sync.NewCond(mx)
execution = &executionEnv{
mx: mx,
cond: cond,
available: available,
reserved: priority,
}
}