forked from cerc-io/plugeth
cmd, core, eth/tracers: support fancier js tracing (#15516)
* cmd, core, eth/tracers: support fancier js tracing * eth, internal/web3ext: rework trace API, concurrency, chain tracing * eth/tracers: add three more JavaScript tracers * eth/tracers, vendor: swap ottovm to duktape for tracing * core, eth, internal: finalize call tracer and needed extras * eth, tests: prestate tracer, call test suite, rewinding * vendor: fix windows builds for tracer js engine * vendor: temporary duktape fix * eth/tracers: fix up 4byte and evmdis tracer * vendor: pull in latest duktape with my upstream fixes * eth: fix some review comments * eth: rename rewind to reexec to make it more obvious * core/vm: terminate tracing using defers
This commit is contained in:
parent
1a5425779b
commit
5258785c81
@ -19,6 +19,7 @@ package main
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -35,6 +36,10 @@ func NewJSONLogger(cfg *vm.LogConfig, writer io.Writer) *JSONLogger {
|
||||
return &JSONLogger{json.NewEncoder(writer), cfg}
|
||||
}
|
||||
|
||||
func (l *JSONLogger) CaptureStart(from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CaptureState outputs state information on the logger.
|
||||
func (l *JSONLogger) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, memory *vm.Memory, stack *vm.Stack, contract *vm.Contract, depth int, err error) error {
|
||||
log := vm.StructLog{
|
||||
@ -56,6 +61,11 @@ func (l *JSONLogger) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cos
|
||||
return l.encoder.Encode(log)
|
||||
}
|
||||
|
||||
// CaptureFault outputs state information on the logger.
|
||||
func (l *JSONLogger) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, memory *vm.Memory, stack *vm.Stack, contract *vm.Contract, depth int, err error) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CaptureEnd is triggered at end of execution.
|
||||
func (l *JSONLogger) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) error {
|
||||
type endLog struct {
|
||||
|
@ -19,6 +19,7 @@ package vm
|
||||
import (
|
||||
"math/big"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
@ -165,13 +166,23 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas
|
||||
}
|
||||
evm.Transfer(evm.StateDB, caller.Address(), to.Address(), value)
|
||||
|
||||
// initialise a new contract and set the code that is to be used by the
|
||||
// E The contract is a scoped environment for this execution context
|
||||
// only.
|
||||
// Initialise a new contract and set the code that is to be used by the EVM.
|
||||
// The contract is a scoped environment for this execution context only.
|
||||
contract := NewContract(caller, to, value, gas)
|
||||
contract.SetCallCode(&addr, evm.StateDB.GetCodeHash(addr), evm.StateDB.GetCode(addr))
|
||||
|
||||
start := time.Now()
|
||||
|
||||
// Capture the tracer start/end events in debug mode
|
||||
if evm.vmConfig.Debug && evm.depth == 0 {
|
||||
evm.vmConfig.Tracer.CaptureStart(caller.Address(), addr, false, input, gas, value)
|
||||
|
||||
defer func() { // Lazy evaluation of the parameters
|
||||
evm.vmConfig.Tracer.CaptureEnd(ret, gas-contract.Gas, time.Since(start), err)
|
||||
}()
|
||||
}
|
||||
ret, err = run(evm, contract, input)
|
||||
|
||||
// When an error was returned by the EVM or when setting the creation code
|
||||
// above we revert to the snapshot and consume any gas remaining. Additionally
|
||||
// when we're in homestead this also counts for code storage gas errors.
|
||||
@ -338,7 +349,14 @@ func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.I
|
||||
if evm.vmConfig.NoRecursion && evm.depth > 0 {
|
||||
return nil, contractAddr, gas, nil
|
||||
}
|
||||
|
||||
if evm.vmConfig.Debug && evm.depth == 0 {
|
||||
evm.vmConfig.Tracer.CaptureStart(caller.Address(), contractAddr, true, code, gas, value)
|
||||
}
|
||||
start := time.Now()
|
||||
|
||||
ret, err = run(evm, contract, nil)
|
||||
|
||||
// check whether the max code size has been exceeded
|
||||
maxCodeSizeExceeded := evm.ChainConfig().IsEIP158(evm.BlockNumber) && len(ret) > params.MaxCodeSize
|
||||
// if the contract creation ran successfully and no errors were returned
|
||||
@ -367,6 +385,9 @@ func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.I
|
||||
if maxCodeSizeExceeded && err == nil {
|
||||
err = errMaxCodeSizeExceeded
|
||||
}
|
||||
if evm.vmConfig.Debug && evm.depth == 0 {
|
||||
evm.vmConfig.Tracer.CaptureEnd(ret, gas-contract.Gas, time.Since(start), err)
|
||||
}
|
||||
return ret, contractAddr, contract.Gas, err
|
||||
}
|
||||
|
||||
|
@ -144,12 +144,17 @@ func (in *Interpreter) Run(contract *Contract, input []byte) (ret []byte, err er
|
||||
)
|
||||
contract.Input = input
|
||||
|
||||
defer func() {
|
||||
if err != nil && !logged && in.cfg.Debug {
|
||||
in.cfg.Tracer.CaptureState(in.evm, pcCopy, op, gasCopy, cost, mem, stack, contract, in.evm.depth, err)
|
||||
}
|
||||
}()
|
||||
|
||||
if in.cfg.Debug {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if !logged {
|
||||
in.cfg.Tracer.CaptureState(in.evm, pcCopy, op, gasCopy, cost, mem, stack, contract, in.evm.depth, err)
|
||||
} else {
|
||||
in.cfg.Tracer.CaptureFault(in.evm, pcCopy, op, gasCopy, cost, mem, stack, contract, in.evm.depth, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
// The Interpreter main run loop (contextual). This loop runs until either an
|
||||
// explicit STOP, RETURN or SELFDESTRUCT is executed, an error occurred during
|
||||
// the execution of one of the operations or until the done flag is set by the
|
||||
|
@ -84,7 +84,9 @@ func (s *StructLog) OpName() string {
|
||||
// Note that reference types are actual VM data structures; make copies
|
||||
// if you need to retain them beyond the current call.
|
||||
type Tracer interface {
|
||||
CaptureStart(from common.Address, to common.Address, call bool, input []byte, gas uint64, value *big.Int) error
|
||||
CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, memory *Memory, stack *Stack, contract *Contract, depth int, err error) error
|
||||
CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, memory *Memory, stack *Stack, contract *Contract, depth int, err error) error
|
||||
CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) error
|
||||
}
|
||||
|
||||
@ -111,6 +113,10 @@ func NewStructLogger(cfg *LogConfig) *StructLogger {
|
||||
return logger
|
||||
}
|
||||
|
||||
func (l *StructLogger) CaptureStart(from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CaptureState logs a new structured log message and pushes it out to the environment
|
||||
//
|
||||
// CaptureState also tracks SSTORE ops to track dirty values.
|
||||
@ -161,6 +167,10 @@ func (l *StructLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost ui
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *StructLogger) CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, memory *Memory, stack *Stack, contract *Contract, depth int, err error) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *StructLogger) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) error {
|
||||
fmt.Printf("0x%x", output)
|
||||
if err != nil {
|
||||
|
251
eth/api.go
251
eth/api.go
@ -17,24 +17,19 @@
|
||||
package eth
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/miner"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
@ -43,8 +38,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
)
|
||||
|
||||
const defaultTraceTimeout = 5 * time.Second
|
||||
|
||||
// PublicEthereumAPI provides an API to access Ethereum full node-related
|
||||
// information.
|
||||
type PublicEthereumAPI struct {
|
||||
@ -348,248 +341,6 @@ func NewPrivateDebugAPI(config *params.ChainConfig, eth *Ethereum) *PrivateDebug
|
||||
return &PrivateDebugAPI{config: config, eth: eth}
|
||||
}
|
||||
|
||||
// BlockTraceResult is the returned value when replaying a block to check for
|
||||
// consensus results and full VM trace logs for all included transactions.
|
||||
type BlockTraceResult struct {
|
||||
Validated bool `json:"validated"`
|
||||
StructLogs []ethapi.StructLogRes `json:"structLogs"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
// TraceArgs holds extra parameters to trace functions
|
||||
type TraceArgs struct {
|
||||
*vm.LogConfig
|
||||
Tracer *string
|
||||
Timeout *string
|
||||
}
|
||||
|
||||
// TraceBlock processes the given block'api RLP but does not import the block in to
|
||||
// the chain.
|
||||
func (api *PrivateDebugAPI) TraceBlock(blockRlp []byte, config *vm.LogConfig) BlockTraceResult {
|
||||
var block types.Block
|
||||
err := rlp.Decode(bytes.NewReader(blockRlp), &block)
|
||||
if err != nil {
|
||||
return BlockTraceResult{Error: fmt.Sprintf("could not decode block: %v", err)}
|
||||
}
|
||||
|
||||
validated, logs, err := api.traceBlock(&block, config)
|
||||
return BlockTraceResult{
|
||||
Validated: validated,
|
||||
StructLogs: ethapi.FormatLogs(logs),
|
||||
Error: formatError(err),
|
||||
}
|
||||
}
|
||||
|
||||
// TraceBlockFromFile loads the block'api RLP from the given file name and attempts to
|
||||
// process it but does not import the block in to the chain.
|
||||
func (api *PrivateDebugAPI) TraceBlockFromFile(file string, config *vm.LogConfig) BlockTraceResult {
|
||||
blockRlp, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
return BlockTraceResult{Error: fmt.Sprintf("could not read file: %v", err)}
|
||||
}
|
||||
return api.TraceBlock(blockRlp, config)
|
||||
}
|
||||
|
||||
// TraceBlockByNumber processes the block by canonical block number.
|
||||
func (api *PrivateDebugAPI) TraceBlockByNumber(blockNr rpc.BlockNumber, config *vm.LogConfig) BlockTraceResult {
|
||||
// Fetch the block that we aim to reprocess
|
||||
var block *types.Block
|
||||
switch blockNr {
|
||||
case rpc.PendingBlockNumber:
|
||||
// Pending block is only known by the miner
|
||||
block = api.eth.miner.PendingBlock()
|
||||
case rpc.LatestBlockNumber:
|
||||
block = api.eth.blockchain.CurrentBlock()
|
||||
default:
|
||||
block = api.eth.blockchain.GetBlockByNumber(uint64(blockNr))
|
||||
}
|
||||
|
||||
if block == nil {
|
||||
return BlockTraceResult{Error: fmt.Sprintf("block #%d not found", blockNr)}
|
||||
}
|
||||
|
||||
validated, logs, err := api.traceBlock(block, config)
|
||||
return BlockTraceResult{
|
||||
Validated: validated,
|
||||
StructLogs: ethapi.FormatLogs(logs),
|
||||
Error: formatError(err),
|
||||
}
|
||||
}
|
||||
|
||||
// TraceBlockByHash processes the block by hash.
|
||||
func (api *PrivateDebugAPI) TraceBlockByHash(hash common.Hash, config *vm.LogConfig) BlockTraceResult {
|
||||
// Fetch the block that we aim to reprocess
|
||||
block := api.eth.BlockChain().GetBlockByHash(hash)
|
||||
if block == nil {
|
||||
return BlockTraceResult{Error: fmt.Sprintf("block #%x not found", hash)}
|
||||
}
|
||||
|
||||
validated, logs, err := api.traceBlock(block, config)
|
||||
return BlockTraceResult{
|
||||
Validated: validated,
|
||||
StructLogs: ethapi.FormatLogs(logs),
|
||||
Error: formatError(err),
|
||||
}
|
||||
}
|
||||
|
||||
// traceBlock processes the given block but does not save the state.
|
||||
func (api *PrivateDebugAPI) traceBlock(block *types.Block, logConfig *vm.LogConfig) (bool, []vm.StructLog, error) {
|
||||
// Validate and reprocess the block
|
||||
var (
|
||||
blockchain = api.eth.BlockChain()
|
||||
validator = blockchain.Validator()
|
||||
processor = blockchain.Processor()
|
||||
)
|
||||
|
||||
structLogger := vm.NewStructLogger(logConfig)
|
||||
|
||||
config := vm.Config{
|
||||
Debug: true,
|
||||
Tracer: structLogger,
|
||||
}
|
||||
if err := api.eth.engine.VerifyHeader(blockchain, block.Header(), true); err != nil {
|
||||
return false, structLogger.StructLogs(), err
|
||||
}
|
||||
statedb, err := blockchain.StateAt(blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1).Root())
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case *trie.MissingNodeError:
|
||||
return false, structLogger.StructLogs(), fmt.Errorf("required historical state unavailable")
|
||||
default:
|
||||
return false, structLogger.StructLogs(), err
|
||||
}
|
||||
}
|
||||
|
||||
receipts, _, usedGas, err := processor.Process(block, statedb, config)
|
||||
if err != nil {
|
||||
return false, structLogger.StructLogs(), err
|
||||
}
|
||||
if err := validator.ValidateState(block, blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1), statedb, receipts, usedGas); err != nil {
|
||||
return false, structLogger.StructLogs(), err
|
||||
}
|
||||
return true, structLogger.StructLogs(), nil
|
||||
}
|
||||
|
||||
// formatError formats a Go error into either an empty string or the data content
|
||||
// of the error itself.
|
||||
func formatError(err error) string {
|
||||
if err == nil {
|
||||
return ""
|
||||
}
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
type timeoutError struct{}
|
||||
|
||||
func (t *timeoutError) Error() string {
|
||||
return "Execution time exceeded"
|
||||
}
|
||||
|
||||
// TraceTransaction returns the structured logs created during the execution of EVM
|
||||
// and returns them as a JSON object.
|
||||
func (api *PrivateDebugAPI) TraceTransaction(ctx context.Context, txHash common.Hash, config *TraceArgs) (interface{}, error) {
|
||||
var tracer vm.Tracer
|
||||
if config != nil && config.Tracer != nil {
|
||||
timeout := defaultTraceTimeout
|
||||
if config.Timeout != nil {
|
||||
var err error
|
||||
if timeout, err = time.ParseDuration(*config.Timeout); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
if tracer, err = ethapi.NewJavascriptTracer(*config.Tracer); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Handle timeouts and RPC cancellations
|
||||
deadlineCtx, cancel := context.WithTimeout(ctx, timeout)
|
||||
go func() {
|
||||
<-deadlineCtx.Done()
|
||||
tracer.(*ethapi.JavascriptTracer).Stop(&timeoutError{})
|
||||
}()
|
||||
defer cancel()
|
||||
} else if config == nil {
|
||||
tracer = vm.NewStructLogger(nil)
|
||||
} else {
|
||||
tracer = vm.NewStructLogger(config.LogConfig)
|
||||
}
|
||||
|
||||
// Retrieve the tx from the chain and the containing block
|
||||
tx, blockHash, _, txIndex := core.GetTransaction(api.eth.ChainDb(), txHash)
|
||||
if tx == nil {
|
||||
return nil, fmt.Errorf("transaction %x not found", txHash)
|
||||
}
|
||||
msg, context, statedb, err := api.computeTxEnv(blockHash, int(txIndex))
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case *trie.MissingNodeError:
|
||||
return nil, fmt.Errorf("required historical state unavailable")
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Run the transaction with tracing enabled.
|
||||
vmenv := vm.NewEVM(context, statedb, api.config, vm.Config{Debug: true, Tracer: tracer})
|
||||
ret, gas, failed, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas()))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("tracing failed: %v", err)
|
||||
}
|
||||
switch tracer := tracer.(type) {
|
||||
case *vm.StructLogger:
|
||||
return ðapi.ExecutionResult{
|
||||
Gas: gas,
|
||||
Failed: failed,
|
||||
ReturnValue: fmt.Sprintf("%x", ret),
|
||||
StructLogs: ethapi.FormatLogs(tracer.StructLogs()),
|
||||
}, nil
|
||||
case *ethapi.JavascriptTracer:
|
||||
return tracer.GetResult()
|
||||
default:
|
||||
panic(fmt.Sprintf("bad tracer type %T", tracer))
|
||||
}
|
||||
}
|
||||
|
||||
// computeTxEnv returns the execution environment of a certain transaction.
|
||||
func (api *PrivateDebugAPI) computeTxEnv(blockHash common.Hash, txIndex int) (core.Message, vm.Context, *state.StateDB, error) {
|
||||
// Create the parent state.
|
||||
block := api.eth.BlockChain().GetBlockByHash(blockHash)
|
||||
if block == nil {
|
||||
return nil, vm.Context{}, nil, fmt.Errorf("block %x not found", blockHash)
|
||||
}
|
||||
parent := api.eth.BlockChain().GetBlock(block.ParentHash(), block.NumberU64()-1)
|
||||
if parent == nil {
|
||||
return nil, vm.Context{}, nil, fmt.Errorf("block parent %x not found", block.ParentHash())
|
||||
}
|
||||
statedb, err := api.eth.BlockChain().StateAt(parent.Root())
|
||||
if err != nil {
|
||||
return nil, vm.Context{}, nil, err
|
||||
}
|
||||
txs := block.Transactions()
|
||||
|
||||
// Recompute transactions up to the target index.
|
||||
signer := types.MakeSigner(api.config, block.Number())
|
||||
for idx, tx := range txs {
|
||||
// Assemble the transaction call message
|
||||
msg, _ := tx.AsMessage(signer)
|
||||
context := core.NewEVMContext(msg, block.Header(), api.eth.BlockChain(), nil)
|
||||
if idx == txIndex {
|
||||
return msg, context, statedb, nil
|
||||
}
|
||||
|
||||
vmenv := vm.NewEVM(context, statedb, api.config, vm.Config{})
|
||||
gp := new(core.GasPool).AddGas(tx.Gas())
|
||||
_, _, _, err := core.ApplyMessage(vmenv, msg, gp)
|
||||
if err != nil {
|
||||
return nil, vm.Context{}, nil, fmt.Errorf("tx %x failed: %v", tx.Hash(), err)
|
||||
}
|
||||
statedb.DeleteSuicides()
|
||||
}
|
||||
return nil, vm.Context{}, nil, fmt.Errorf("tx index %d out of range for block %x", txIndex, blockHash)
|
||||
}
|
||||
|
||||
// Preimage is a debug API function that returns the preimage for a sha3 hash, if known.
|
||||
func (api *PrivateDebugAPI) Preimage(ctx context.Context, hash common.Hash) (hexutil.Bytes, error) {
|
||||
db := core.PreimageTable(api.eth.ChainDb())
|
||||
@ -617,7 +368,7 @@ type storageEntry struct {
|
||||
|
||||
// StorageRangeAt returns the storage at the given block height and transaction index.
|
||||
func (api *PrivateDebugAPI) StorageRangeAt(ctx context.Context, blockHash common.Hash, txIndex int, contractAddress common.Address, keyStart hexutil.Bytes, maxResult int) (StorageRangeResult, error) {
|
||||
_, _, statedb, err := api.computeTxEnv(blockHash, txIndex)
|
||||
_, _, statedb, err := api.computeTxEnv(blockHash, txIndex, 0)
|
||||
if err != nil {
|
||||
return StorageRangeResult{}, err
|
||||
}
|
||||
|
727
eth/api_tracer.go
Normal file
727
eth/api_tracer.go
Normal file
@ -0,0 +1,727 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package eth
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/eth/tracers"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultTraceTimeout is the amount of time a single transaction can execute
|
||||
// by default before being forcefully aborted.
|
||||
defaultTraceTimeout = 5 * time.Second
|
||||
|
||||
// defaultTraceReexec is the number of blocks the tracer is willing to go back
|
||||
// and reexecute to produce missing historical state necessary to run a specific
|
||||
// trace.
|
||||
defaultTraceReexec = uint64(128)
|
||||
)
|
||||
|
||||
// TraceConfig holds extra parameters to trace functions.
|
||||
type TraceConfig struct {
|
||||
*vm.LogConfig
|
||||
Tracer *string
|
||||
Timeout *string
|
||||
Reexec *uint64
|
||||
}
|
||||
|
||||
// txTraceResult is the result of a single transaction trace.
|
||||
type txTraceResult struct {
|
||||
Result interface{} `json:"result,omitempty"` // Trace results produced by the tracer
|
||||
Error string `json:"error,omitempty"` // Trace failure produced by the tracer
|
||||
}
|
||||
|
||||
// blockTraceTask represents a single block trace task when an entire chain is
|
||||
// being traced.
|
||||
type blockTraceTask struct {
|
||||
statedb *state.StateDB // Intermediate state prepped for tracing
|
||||
block *types.Block // Block to trace the transactions from
|
||||
results []*txTraceResult // Trace results procudes by the task
|
||||
}
|
||||
|
||||
// blockTraceResult represets the results of tracing a single block when an entire
|
||||
// chain is being traced.
|
||||
type blockTraceResult struct {
|
||||
Block hexutil.Uint64 `json:"block"` // Block number corresponding to this trace
|
||||
Hash common.Hash `json:"hash"` // Block hash corresponding to this trace
|
||||
Traces []*txTraceResult `json:"traces"` // Trace results produced by the task
|
||||
}
|
||||
|
||||
// txTraceTask represents a single transaction trace task when an entire block
|
||||
// is being traced.
|
||||
type txTraceTask struct {
|
||||
statedb *state.StateDB // Intermediate state prepped for tracing
|
||||
index int // Transaction offset in the block
|
||||
}
|
||||
|
||||
// ephemeralDatabase is a memory wrapper around a proper database, which acts as
|
||||
// an ephemeral write layer. This construct is used by the chain tracer to write
|
||||
// state tries for intermediate blocks without serializing to disk, but at the
|
||||
// same time to allow disk fallback for reads that do no hit the memory layer.
|
||||
type ephemeralDatabase struct {
|
||||
diskdb ethdb.Database // Persistent disk database to fall back to with reads
|
||||
memdb *ethdb.MemDatabase // Ephemeral memory database for primary reads and writes
|
||||
}
|
||||
|
||||
func (db *ephemeralDatabase) Put(key []byte, value []byte) error { return db.memdb.Put(key, value) }
|
||||
func (db *ephemeralDatabase) Delete(key []byte) error { return errors.New("delete not supported") }
|
||||
func (db *ephemeralDatabase) Close() { db.memdb.Close() }
|
||||
func (db *ephemeralDatabase) NewBatch() ethdb.Batch {
|
||||
return db.memdb.NewBatch()
|
||||
}
|
||||
func (db *ephemeralDatabase) Has(key []byte) (bool, error) {
|
||||
if has, _ := db.memdb.Has(key); has {
|
||||
return has, nil
|
||||
}
|
||||
return db.diskdb.Has(key)
|
||||
}
|
||||
func (db *ephemeralDatabase) Get(key []byte) ([]byte, error) {
|
||||
if blob, _ := db.memdb.Get(key); blob != nil {
|
||||
return blob, nil
|
||||
}
|
||||
return db.diskdb.Get(key)
|
||||
}
|
||||
|
||||
// Prune does a state sync into a new memory write layer and replaces the old one.
|
||||
// This allows us to discard entries that are no longer referenced from the current
|
||||
// state.
|
||||
func (db *ephemeralDatabase) Prune(root common.Hash) {
|
||||
// Pull the still relevant state data into memory
|
||||
sync := state.NewStateSync(root, db.diskdb)
|
||||
for sync.Pending() > 0 {
|
||||
hash := sync.Missing(1)[0]
|
||||
|
||||
// Move the next trie node from the memory layer into a sync struct
|
||||
node, err := db.memdb.Get(hash[:])
|
||||
if err != nil {
|
||||
panic(err) // memdb must have the data
|
||||
}
|
||||
if _, _, err := sync.Process([]trie.SyncResult{{Hash: hash, Data: node}}); err != nil {
|
||||
panic(err) // it's not possible to fail processing a node
|
||||
}
|
||||
}
|
||||
// Discard the old memory layer and write a new one
|
||||
db.memdb, _ = ethdb.NewMemDatabaseWithCap(db.memdb.Len())
|
||||
if _, err := sync.Commit(db); err != nil {
|
||||
panic(err) // writing into a memdb cannot fail
|
||||
}
|
||||
}
|
||||
|
||||
// TraceChain returns the structured logs created during the execution of EVM
|
||||
// between two blocks (excluding start) and returns them as a JSON object.
|
||||
func (api *PrivateDebugAPI) TraceChain(ctx context.Context, start, end rpc.BlockNumber, config *TraceConfig) (*rpc.Subscription, error) {
|
||||
// Fetch the block interval that we want to trace
|
||||
var from, to *types.Block
|
||||
|
||||
switch start {
|
||||
case rpc.PendingBlockNumber:
|
||||
from = api.eth.miner.PendingBlock()
|
||||
case rpc.LatestBlockNumber:
|
||||
from = api.eth.blockchain.CurrentBlock()
|
||||
default:
|
||||
from = api.eth.blockchain.GetBlockByNumber(uint64(start))
|
||||
}
|
||||
switch end {
|
||||
case rpc.PendingBlockNumber:
|
||||
to = api.eth.miner.PendingBlock()
|
||||
case rpc.LatestBlockNumber:
|
||||
to = api.eth.blockchain.CurrentBlock()
|
||||
default:
|
||||
to = api.eth.blockchain.GetBlockByNumber(uint64(end))
|
||||
}
|
||||
// Trace the chain if we've found all our blocks
|
||||
if from == nil {
|
||||
return nil, fmt.Errorf("starting block #%d not found", start)
|
||||
}
|
||||
if to == nil {
|
||||
return nil, fmt.Errorf("end block #%d not found", end)
|
||||
}
|
||||
return api.traceChain(ctx, from, to, config)
|
||||
}
|
||||
|
||||
// traceChain configures a new tracer according to the provided configuration, and
|
||||
// executes all the transactions contained within. The return value will be one item
|
||||
// per transaction, dependent on the requestd tracer.
|
||||
func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Block, config *TraceConfig) (*rpc.Subscription, error) {
|
||||
// Tracing a chain is a **long** operation, only do with subscriptions
|
||||
notifier, supported := rpc.NotifierFromContext(ctx)
|
||||
if !supported {
|
||||
return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported
|
||||
}
|
||||
sub := notifier.CreateSubscription()
|
||||
|
||||
// Ensure we have a valid starting state before doing any work
|
||||
origin := start.NumberU64()
|
||||
|
||||
memdb, _ := ethdb.NewMemDatabase()
|
||||
db := &ephemeralDatabase{
|
||||
diskdb: api.eth.ChainDb(),
|
||||
memdb: memdb,
|
||||
}
|
||||
if number := start.NumberU64(); number > 0 {
|
||||
start = api.eth.blockchain.GetBlock(start.ParentHash(), start.NumberU64()-1)
|
||||
if start == nil {
|
||||
return nil, fmt.Errorf("parent block #%d not found", number-1)
|
||||
}
|
||||
}
|
||||
statedb, err := state.New(start.Root(), state.NewDatabase(db))
|
||||
if err != nil {
|
||||
// If the starting state is missing, allow some number of blocks to be reexecuted
|
||||
reexec := defaultTraceReexec
|
||||
if config.Reexec != nil {
|
||||
reexec = *config.Reexec
|
||||
}
|
||||
// Find the most recent block that has the state available
|
||||
for i := uint64(0); i < reexec; i++ {
|
||||
start = api.eth.blockchain.GetBlock(start.ParentHash(), start.NumberU64()-1)
|
||||
if start == nil {
|
||||
break
|
||||
}
|
||||
if statedb, err = state.New(start.Root(), state.NewDatabase(db)); err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
// If we still don't have the state available, bail out
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case *trie.MissingNodeError:
|
||||
return nil, errors.New("required historical state unavailable")
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
// Execute all the transaction contained within the chain concurrently for each block
|
||||
blocks := int(end.NumberU64() - origin)
|
||||
|
||||
threads := runtime.NumCPU()
|
||||
if threads > blocks {
|
||||
threads = blocks
|
||||
}
|
||||
var (
|
||||
pend = new(sync.WaitGroup)
|
||||
tasks = make(chan *blockTraceTask, threads)
|
||||
results = make(chan *blockTraceTask, threads)
|
||||
)
|
||||
for th := 0; th < threads; th++ {
|
||||
pend.Add(1)
|
||||
go func() {
|
||||
defer pend.Done()
|
||||
|
||||
// Fetch and execute the next block trace tasks
|
||||
for task := range tasks {
|
||||
signer := types.MakeSigner(api.config, task.block.Number())
|
||||
|
||||
// Trace all the transactions contained within
|
||||
for i, tx := range task.block.Transactions() {
|
||||
msg, _ := tx.AsMessage(signer)
|
||||
vmctx := core.NewEVMContext(msg, task.block.Header(), api.eth.blockchain, nil)
|
||||
|
||||
res, err := api.traceTx(ctx, msg, vmctx, task.statedb, config)
|
||||
if err != nil {
|
||||
task.results[i] = &txTraceResult{Error: err.Error()}
|
||||
log.Warn("Tracing failed", "err", err)
|
||||
break
|
||||
}
|
||||
task.statedb.DeleteSuicides()
|
||||
task.results[i] = &txTraceResult{Result: res}
|
||||
}
|
||||
// Stream the result back to the user or abort on teardown
|
||||
select {
|
||||
case results <- task:
|
||||
case <-notifier.Closed():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
// Start a goroutine to feed all the blocks into the tracers
|
||||
begin := time.Now()
|
||||
complete := start.NumberU64()
|
||||
|
||||
go func() {
|
||||
var (
|
||||
logged time.Time
|
||||
number uint64
|
||||
traced uint64
|
||||
failed error
|
||||
)
|
||||
// Ensure everything is properly cleaned up on any exit path
|
||||
defer func() {
|
||||
close(tasks)
|
||||
pend.Wait()
|
||||
|
||||
switch {
|
||||
case failed != nil:
|
||||
log.Warn("Chain tracing failed", "start", start.NumberU64(), "end", end.NumberU64(), "transactions", traced, "elapsed", time.Since(begin), "err", failed)
|
||||
case number < end.NumberU64():
|
||||
log.Warn("Chain tracing aborted", "start", start.NumberU64(), "end", end.NumberU64(), "abort", number, "transactions", traced, "elapsed", time.Since(begin))
|
||||
default:
|
||||
log.Info("Chain tracing finished", "start", start.NumberU64(), "end", end.NumberU64(), "transactions", traced, "elapsed", time.Since(begin))
|
||||
}
|
||||
close(results)
|
||||
}()
|
||||
// Feed all the blocks both into the tracer, as well as fast process concurrently
|
||||
for number = start.NumberU64() + 1; number <= end.NumberU64(); number++ {
|
||||
// Stop tracing if interruption was requested
|
||||
select {
|
||||
case <-notifier.Closed():
|
||||
return
|
||||
default:
|
||||
}
|
||||
// Print progress logs if long enough time elapsed
|
||||
if time.Since(logged) > 8*time.Second {
|
||||
if number > origin {
|
||||
log.Info("Tracing chain segment", "start", origin, "end", end.NumberU64(), "current", number, "transactions", traced, "elapsed", time.Since(begin))
|
||||
} else {
|
||||
log.Info("Preparing state for chain trace", "block", number, "start", origin, "elapsed", time.Since(begin))
|
||||
}
|
||||
logged = time.Now()
|
||||
}
|
||||
// Retrieve the next block to trace
|
||||
block := api.eth.blockchain.GetBlockByNumber(number)
|
||||
if block == nil {
|
||||
failed = fmt.Errorf("block #%d not found", number)
|
||||
break
|
||||
}
|
||||
// Send the block over to the concurrent tracers (if not in the fast-forward phase)
|
||||
if number > origin {
|
||||
txs := block.Transactions()
|
||||
|
||||
select {
|
||||
case tasks <- &blockTraceTask{statedb: statedb.Copy(), block: block, results: make([]*txTraceResult, len(txs))}:
|
||||
case <-notifier.Closed():
|
||||
return
|
||||
}
|
||||
traced += uint64(len(txs))
|
||||
} else {
|
||||
atomic.StoreUint64(&complete, number)
|
||||
}
|
||||
// Generate the next state snapshot fast without tracing
|
||||
_, _, _, err := api.eth.blockchain.Processor().Process(block, statedb, vm.Config{})
|
||||
if err != nil {
|
||||
failed = err
|
||||
break
|
||||
}
|
||||
// Finalize the state so any modifications are written to the trie
|
||||
root, err := statedb.CommitTo(db, true)
|
||||
if err != nil {
|
||||
failed = err
|
||||
break
|
||||
}
|
||||
if err := statedb.Reset(root); err != nil {
|
||||
failed = err
|
||||
break
|
||||
}
|
||||
// After every N blocks, prune the database to only retain relevant data
|
||||
if (number-start.NumberU64())%4096 == 0 {
|
||||
// Wait until currently pending trace jobs finish
|
||||
for atomic.LoadUint64(&complete) != number {
|
||||
select {
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
case <-notifier.Closed():
|
||||
return
|
||||
}
|
||||
}
|
||||
// No more concurrent access at this point, prune the database
|
||||
var (
|
||||
nodes = db.memdb.Len()
|
||||
start = time.Now()
|
||||
)
|
||||
db.Prune(root)
|
||||
log.Info("Pruned tracer state entries", "deleted", nodes-db.memdb.Len(), "left", db.memdb.Len(), "elapsed", time.Since(start))
|
||||
|
||||
statedb, _ = state.New(root, state.NewDatabase(db))
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Keep reading the trace results and stream the to the user
|
||||
go func() {
|
||||
var (
|
||||
done = make(map[uint64]*blockTraceResult)
|
||||
next = origin + 1
|
||||
)
|
||||
for res := range results {
|
||||
// Queue up next received result
|
||||
result := &blockTraceResult{
|
||||
Block: hexutil.Uint64(res.block.NumberU64()),
|
||||
Hash: res.block.Hash(),
|
||||
Traces: res.results,
|
||||
}
|
||||
done[uint64(result.Block)] = result
|
||||
|
||||
// Stream completed traces to the user, aborting on the first error
|
||||
for result, ok := done[next]; ok; result, ok = done[next] {
|
||||
if len(result.Traces) > 0 || next == end.NumberU64() {
|
||||
notifier.Notify(sub.ID, result)
|
||||
}
|
||||
atomic.StoreUint64(&complete, next)
|
||||
delete(done, next)
|
||||
next++
|
||||
}
|
||||
}
|
||||
}()
|
||||
return sub, nil
|
||||
}
|
||||
|
||||
// TraceBlockByNumber returns the structured logs created during the execution of
|
||||
// EVM and returns them as a JSON object.
|
||||
func (api *PrivateDebugAPI) TraceBlockByNumber(ctx context.Context, number rpc.BlockNumber, config *TraceConfig) ([]*txTraceResult, error) {
|
||||
// Fetch the block that we want to trace
|
||||
var block *types.Block
|
||||
|
||||
switch number {
|
||||
case rpc.PendingBlockNumber:
|
||||
block = api.eth.miner.PendingBlock()
|
||||
case rpc.LatestBlockNumber:
|
||||
block = api.eth.blockchain.CurrentBlock()
|
||||
default:
|
||||
block = api.eth.blockchain.GetBlockByNumber(uint64(number))
|
||||
}
|
||||
// Trace the block if it was found
|
||||
if block == nil {
|
||||
return nil, fmt.Errorf("block #%d not found", number)
|
||||
}
|
||||
return api.traceBlock(ctx, block, config)
|
||||
}
|
||||
|
||||
// TraceBlockByHash returns the structured logs created during the execution of
|
||||
// EVM and returns them as a JSON object.
|
||||
func (api *PrivateDebugAPI) TraceBlockByHash(ctx context.Context, hash common.Hash, config *TraceConfig) ([]*txTraceResult, error) {
|
||||
block := api.eth.blockchain.GetBlockByHash(hash)
|
||||
if block == nil {
|
||||
return nil, fmt.Errorf("block #%x not found", hash)
|
||||
}
|
||||
return api.traceBlock(ctx, block, config)
|
||||
}
|
||||
|
||||
// TraceBlock returns the structured logs created during the execution of EVM
|
||||
// and returns them as a JSON object.
|
||||
func (api *PrivateDebugAPI) TraceBlock(ctx context.Context, blob []byte, config *TraceConfig) ([]*txTraceResult, error) {
|
||||
block := new(types.Block)
|
||||
if err := rlp.Decode(bytes.NewReader(blob), block); err != nil {
|
||||
return nil, fmt.Errorf("could not decode block: %v", err)
|
||||
}
|
||||
return api.traceBlock(ctx, block, config)
|
||||
}
|
||||
|
||||
// TraceBlockFromFile returns the structured logs created during the execution of
|
||||
// EVM and returns them as a JSON object.
|
||||
func (api *PrivateDebugAPI) TraceBlockFromFile(ctx context.Context, file string, config *TraceConfig) ([]*txTraceResult, error) {
|
||||
blob, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not read file: %v", err)
|
||||
}
|
||||
return api.TraceBlock(ctx, blob, config)
|
||||
}
|
||||
|
||||
// traceBlock configures a new tracer according to the provided configuration, and
|
||||
// executes all the transactions contained within. The return value will be one item
|
||||
// per transaction, dependent on the requestd tracer.
|
||||
func (api *PrivateDebugAPI) traceBlock(ctx context.Context, block *types.Block, config *TraceConfig) ([]*txTraceResult, error) {
|
||||
// Create the parent state database
|
||||
if err := api.eth.engine.VerifyHeader(api.eth.blockchain, block.Header(), true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parent := api.eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1)
|
||||
if parent == nil {
|
||||
return nil, fmt.Errorf("parent %x not found", block.ParentHash())
|
||||
}
|
||||
reexec := defaultTraceReexec
|
||||
if config.Reexec != nil {
|
||||
reexec = *config.Reexec
|
||||
}
|
||||
statedb, err := api.computeStateDB(parent, reexec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Execute all the transaction contained within the block concurrently
|
||||
var (
|
||||
signer = types.MakeSigner(api.config, block.Number())
|
||||
|
||||
txs = block.Transactions()
|
||||
results = make([]*txTraceResult, len(txs))
|
||||
|
||||
pend = new(sync.WaitGroup)
|
||||
jobs = make(chan *txTraceTask, len(txs))
|
||||
)
|
||||
threads := runtime.NumCPU()
|
||||
if threads > len(txs) {
|
||||
threads = len(txs)
|
||||
}
|
||||
for th := 0; th < threads; th++ {
|
||||
pend.Add(1)
|
||||
go func() {
|
||||
defer pend.Done()
|
||||
|
||||
// Fetch and execute the next transaction trace tasks
|
||||
for task := range jobs {
|
||||
msg, _ := txs[task.index].AsMessage(signer)
|
||||
vmctx := core.NewEVMContext(msg, block.Header(), api.eth.blockchain, nil)
|
||||
|
||||
res, err := api.traceTx(ctx, msg, vmctx, task.statedb, config)
|
||||
if err != nil {
|
||||
results[task.index] = &txTraceResult{Error: err.Error()}
|
||||
continue
|
||||
}
|
||||
results[task.index] = &txTraceResult{Result: res}
|
||||
}
|
||||
}()
|
||||
}
|
||||
// Feed the transactions into the tracers and return
|
||||
var failed error
|
||||
for i, tx := range txs {
|
||||
// Send the trace task over for execution
|
||||
jobs <- &txTraceTask{statedb: statedb.Copy(), index: i}
|
||||
|
||||
// Generate the next state snapshot fast without tracing
|
||||
msg, _ := tx.AsMessage(signer)
|
||||
vmctx := core.NewEVMContext(msg, block.Header(), api.eth.blockchain, nil)
|
||||
|
||||
vmenv := vm.NewEVM(vmctx, statedb, api.config, vm.Config{})
|
||||
if _, _, _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas())); err != nil {
|
||||
failed = err
|
||||
break
|
||||
}
|
||||
// Finalize the state so any modifications are written to the trie
|
||||
statedb.Finalise(true)
|
||||
}
|
||||
close(jobs)
|
||||
pend.Wait()
|
||||
|
||||
// If execution failed in between, abort
|
||||
if failed != nil {
|
||||
return nil, failed
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// computeStateDB retrieves the state database associated with a certain block.
|
||||
// If no state is locally available for the given block, a number of blocks are
|
||||
// attempted to be reexecuted to generate the desired state.
|
||||
func (api *PrivateDebugAPI) computeStateDB(block *types.Block, reexec uint64) (*state.StateDB, error) {
|
||||
// If we have the state fully available, use that
|
||||
statedb, err := api.eth.blockchain.StateAt(block.Root())
|
||||
if err == nil {
|
||||
return statedb, nil
|
||||
}
|
||||
// Otherwise try to reexec blocks until we find a state or reach our limit
|
||||
origin := block.NumberU64()
|
||||
|
||||
memdb, _ := ethdb.NewMemDatabase()
|
||||
db := &ephemeralDatabase{
|
||||
diskdb: api.eth.ChainDb(),
|
||||
memdb: memdb,
|
||||
}
|
||||
for i := uint64(0); i < reexec; i++ {
|
||||
block = api.eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1)
|
||||
if block == nil {
|
||||
break
|
||||
}
|
||||
if statedb, err = state.New(block.Root(), state.NewDatabase(db)); err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case *trie.MissingNodeError:
|
||||
return nil, errors.New("required historical state unavailable")
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// State was available at historical point, regenerate
|
||||
var (
|
||||
start = time.Now()
|
||||
logged time.Time
|
||||
)
|
||||
for block.NumberU64() < origin {
|
||||
// Print progress logs if long enough time elapsed
|
||||
if time.Since(logged) > 8*time.Second {
|
||||
log.Info("Regenerating historical state", "block", block.NumberU64()+1, "target", origin, "elapsed", time.Since(start))
|
||||
logged = time.Now()
|
||||
}
|
||||
// Retrieve the next block to regenerate and process it
|
||||
if block = api.eth.blockchain.GetBlockByNumber(block.NumberU64() + 1); block == nil {
|
||||
return nil, fmt.Errorf("block #%d not found", block.NumberU64()+1)
|
||||
}
|
||||
_, _, _, err := api.eth.blockchain.Processor().Process(block, statedb, vm.Config{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Finalize the state so any modifications are written to the trie
|
||||
root, err := statedb.CommitTo(db, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := statedb.Reset(root); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// After every N blocks, prune the database to only retain relevant data
|
||||
if block.NumberU64()%4096 == 0 || block.NumberU64() == origin {
|
||||
var (
|
||||
nodes = db.memdb.Len()
|
||||
begin = time.Now()
|
||||
)
|
||||
db.Prune(root)
|
||||
log.Info("Pruned tracer state entries", "deleted", nodes-db.memdb.Len(), "left", db.memdb.Len(), "elapsed", time.Since(begin))
|
||||
|
||||
statedb, _ = state.New(root, state.NewDatabase(db))
|
||||
}
|
||||
}
|
||||
log.Info("Historical state regenerated", "block", block.NumberU64(), "elapsed", time.Since(start))
|
||||
return statedb, nil
|
||||
}
|
||||
|
||||
// TraceTransaction returns the structured logs created during the execution of EVM
|
||||
// and returns them as a JSON object.
|
||||
func (api *PrivateDebugAPI) TraceTransaction(ctx context.Context, hash common.Hash, config *TraceConfig) (interface{}, error) {
|
||||
// Retrieve the transaction and assemble its EVM context
|
||||
tx, blockHash, _, index := core.GetTransaction(api.eth.ChainDb(), hash)
|
||||
if tx == nil {
|
||||
return nil, fmt.Errorf("transaction %x not found", hash)
|
||||
}
|
||||
reexec := defaultTraceReexec
|
||||
if config.Reexec != nil {
|
||||
reexec = *config.Reexec
|
||||
}
|
||||
msg, vmctx, statedb, err := api.computeTxEnv(blockHash, int(index), reexec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Trace the transaction and return
|
||||
return api.traceTx(ctx, msg, vmctx, statedb, config)
|
||||
}
|
||||
|
||||
// traceTx configures a new tracer according to the provided configuration, and
|
||||
// executes the given message in the provided environment. The return value will
|
||||
// be tracer dependent.
|
||||
func (api *PrivateDebugAPI) traceTx(ctx context.Context, message core.Message, vmctx vm.Context, statedb *state.StateDB, config *TraceConfig) (interface{}, error) {
|
||||
// Assemble the structured logger or the JavaScript tracer
|
||||
var (
|
||||
tracer vm.Tracer
|
||||
err error
|
||||
)
|
||||
switch {
|
||||
case config != nil && config.Tracer != nil:
|
||||
// Define a meaningful timeout of a single transaction trace
|
||||
timeout := defaultTraceTimeout
|
||||
if config.Timeout != nil {
|
||||
if timeout, err = time.ParseDuration(*config.Timeout); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Constuct the JavaScript tracer to execute with
|
||||
if tracer, err = tracers.New(*config.Tracer); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Handle timeouts and RPC cancellations
|
||||
deadlineCtx, cancel := context.WithTimeout(ctx, timeout)
|
||||
go func() {
|
||||
<-deadlineCtx.Done()
|
||||
tracer.(*tracers.Tracer).Stop(errors.New("execution timeout"))
|
||||
}()
|
||||
defer cancel()
|
||||
|
||||
case config == nil:
|
||||
tracer = vm.NewStructLogger(nil)
|
||||
|
||||
default:
|
||||
tracer = vm.NewStructLogger(config.LogConfig)
|
||||
}
|
||||
// Run the transaction with tracing enabled.
|
||||
vmenv := vm.NewEVM(vmctx, statedb, api.config, vm.Config{Debug: true, Tracer: tracer})
|
||||
|
||||
ret, gas, failed, err := core.ApplyMessage(vmenv, message, new(core.GasPool).AddGas(message.Gas()))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("tracing failed: %v", err)
|
||||
}
|
||||
// Depending on the tracer type, format and return the output
|
||||
switch tracer := tracer.(type) {
|
||||
case *vm.StructLogger:
|
||||
return ðapi.ExecutionResult{
|
||||
Gas: gas,
|
||||
Failed: failed,
|
||||
ReturnValue: fmt.Sprintf("%x", ret),
|
||||
StructLogs: ethapi.FormatLogs(tracer.StructLogs()),
|
||||
}, nil
|
||||
|
||||
case *tracers.Tracer:
|
||||
return tracer.GetResult()
|
||||
|
||||
default:
|
||||
panic(fmt.Sprintf("bad tracer type %T", tracer))
|
||||
}
|
||||
}
|
||||
|
||||
// computeTxEnv returns the execution environment of a certain transaction.
|
||||
func (api *PrivateDebugAPI) computeTxEnv(blockHash common.Hash, txIndex int, reexec uint64) (core.Message, vm.Context, *state.StateDB, error) {
|
||||
// Create the parent state database
|
||||
block := api.eth.blockchain.GetBlockByHash(blockHash)
|
||||
if block == nil {
|
||||
return nil, vm.Context{}, nil, fmt.Errorf("block %x not found", blockHash)
|
||||
}
|
||||
parent := api.eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1)
|
||||
if parent == nil {
|
||||
return nil, vm.Context{}, nil, fmt.Errorf("parent %x not found", block.ParentHash())
|
||||
}
|
||||
statedb, err := api.computeStateDB(parent, reexec)
|
||||
if err != nil {
|
||||
return nil, vm.Context{}, nil, err
|
||||
}
|
||||
// Recompute transactions up to the target index.
|
||||
signer := types.MakeSigner(api.config, block.Number())
|
||||
|
||||
for idx, tx := range block.Transactions() {
|
||||
// Assemble the transaction call message and return if the requested offset
|
||||
msg, _ := tx.AsMessage(signer)
|
||||
context := core.NewEVMContext(msg, block.Header(), api.eth.blockchain, nil)
|
||||
if idx == txIndex {
|
||||
return msg, context, statedb, nil
|
||||
}
|
||||
// Not yet the searched for transaction, execute on top of the current state
|
||||
vmenv := vm.NewEVM(context, statedb, api.config, vm.Config{})
|
||||
if _, _, _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil {
|
||||
return nil, vm.Context{}, nil, fmt.Errorf("tx %x failed: %v", tx.Hash(), err)
|
||||
}
|
||||
statedb.DeleteSuicides()
|
||||
}
|
||||
return nil, vm.Context{}, nil, fmt.Errorf("tx index %d out of range for block %x", txIndex, blockHash)
|
||||
}
|
@ -747,10 +747,11 @@ func (self *ProtocolManager) txBroadcastLoop() {
|
||||
// EthNodeInfo represents a short summary of the Ethereum sub-protocol metadata known
|
||||
// about the host peer.
|
||||
type EthNodeInfo struct {
|
||||
Network uint64 `json:"network"` // Ethereum network ID (1=Frontier, 2=Morden, Ropsten=3)
|
||||
Difficulty *big.Int `json:"difficulty"` // Total difficulty of the host's blockchain
|
||||
Genesis common.Hash `json:"genesis"` // SHA3 hash of the host's genesis block
|
||||
Head common.Hash `json:"head"` // SHA3 hash of the host's best owned block
|
||||
Network uint64 `json:"network"` // Ethereum network ID (1=Frontier, 2=Morden, Ropsten=3)
|
||||
Difficulty *big.Int `json:"difficulty"` // Total difficulty of the host's blockchain
|
||||
Genesis common.Hash `json:"genesis"` // SHA3 hash of the host's genesis block
|
||||
Config *params.ChainConfig `json:"config"` // Chain configuration for the fork rules
|
||||
Head common.Hash `json:"head"` // SHA3 hash of the host's best owned block
|
||||
}
|
||||
|
||||
// NodeInfo retrieves some protocol metadata about the running host node.
|
||||
@ -760,6 +761,7 @@ func (self *ProtocolManager) NodeInfo() *EthNodeInfo {
|
||||
Network: self.networkId,
|
||||
Difficulty: self.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64()),
|
||||
Genesis: self.blockchain.Genesis().Hash(),
|
||||
Config: self.blockchain.Config(),
|
||||
Head: currentBlock.Hash(),
|
||||
}
|
||||
}
|
||||
|
86
eth/tracers/internal/tracers/4byte_tracer.js
Normal file
86
eth/tracers/internal/tracers/4byte_tracer.js
Normal file
@ -0,0 +1,86 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// 4byteTracer searches for 4byte-identifiers, and collects them for post-processing.
|
||||
// It collects the methods identifiers along with the size of the supplied data, so
|
||||
// a reversed signature can be matched against the size of the data.
|
||||
//
|
||||
// Example:
|
||||
// > debug.traceTransaction( "0x214e597e35da083692f5386141e69f47e973b2c56e7a8073b1ea08fd7571e9de", {tracer: "4byteTracer"})
|
||||
// {
|
||||
// 0x27dc297e-128: 1,
|
||||
// 0x38cc4831-0: 2,
|
||||
// 0x524f3889-96: 1,
|
||||
// 0xadf59f99-288: 1,
|
||||
// 0xc281d19e-0: 1
|
||||
// }
|
||||
{
|
||||
// ids aggregates the 4byte ids found.
|
||||
ids : {},
|
||||
|
||||
// callType returns 'false' for non-calls, or the peek-index for the first param
|
||||
// after 'value', i.e. meminstart.
|
||||
callType: function(opstr){
|
||||
switch(opstr){
|
||||
case "CALL": case "CALLCODE":
|
||||
// gas, addr, val, memin, meminsz, memout, memoutsz
|
||||
return 3; // stack ptr to memin
|
||||
|
||||
case "DELEGATECALL": case "STATICCALL":
|
||||
// gas, addr, memin, meminsz, memout, memoutsz
|
||||
return 2; // stack ptr to memin
|
||||
}
|
||||
return false;
|
||||
},
|
||||
|
||||
// store save the given indentifier and datasize.
|
||||
store: function(id, size){
|
||||
var key = "" + toHex(id) + "-" + size;
|
||||
this.ids[key] = this.ids[key] + 1 || 1;
|
||||
},
|
||||
|
||||
// step is invoked for every opcode that the VM executes.
|
||||
step: function(log, db) {
|
||||
// Skip any opcodes that are not internal calls
|
||||
var ct = this.callType(log.op.toString());
|
||||
if (!ct) {
|
||||
return;
|
||||
}
|
||||
// Skip any pre-compile invocations, those are just fancy opcodes
|
||||
if (isPrecompiled(toAddress(log.stack.peek(1)))) {
|
||||
return;
|
||||
}
|
||||
// Gather internal call details
|
||||
var inSz = log.stack.peek(ct + 1).valueOf();
|
||||
if (inSz >= 4) {
|
||||
var inOff = log.stack.peek(ct).valueOf();
|
||||
this.store(log.memory.slice(inOff, inOff + 4), inSz-4);
|
||||
}
|
||||
},
|
||||
|
||||
// fault is invoked when the actual execution of an opcode fails.
|
||||
fault: function(log, db) { },
|
||||
|
||||
// result is invoked when all the opcodes have been iterated over and returns
|
||||
// the final result of the tracing.
|
||||
result: function(ctx) {
|
||||
// Save the outer calldata also
|
||||
if (ctx.input.length > 4) {
|
||||
this.store(slice(ctx.input, 0, 4), ctx.input.length-4)
|
||||
}
|
||||
return this.ids;
|
||||
},
|
||||
}
|
350
eth/tracers/internal/tracers/assets.go
Normal file
350
eth/tracers/internal/tracers/assets.go
Normal file
File diff suppressed because one or more lines are too long
246
eth/tracers/internal/tracers/call_tracer.js
Normal file
246
eth/tracers/internal/tracers/call_tracer.js
Normal file
@ -0,0 +1,246 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// callTracer is a full blown transaction tracer that extracts and reports all
|
||||
// the internal calls made by a transaction, along with any useful information.
|
||||
{
|
||||
// callstack is the current recursive call stack of the EVM execution.
|
||||
callstack: [{}],
|
||||
|
||||
// descended tracks whether we've just descended from an outer transaction into
|
||||
// an inner call.
|
||||
descended: false,
|
||||
|
||||
// step is invoked for every opcode that the VM executes.
|
||||
step: function(log, db) {
|
||||
// Capture any errors immediately
|
||||
var error = log.getError();
|
||||
if (error !== undefined) {
|
||||
this.fault(log, db);
|
||||
return;
|
||||
}
|
||||
// We only care about system opcodes, faster if we pre-check once
|
||||
var syscall = (log.op.toNumber() & 0xf0) == 0xf0;
|
||||
if (syscall) {
|
||||
var op = log.op.toString();
|
||||
}
|
||||
// If a new contract is being created, add to the call stack
|
||||
if (syscall && op == 'CREATE') {
|
||||
var inOff = log.stack.peek(1).valueOf();
|
||||
var inEnd = inOff + log.stack.peek(2).valueOf();
|
||||
|
||||
// Assemble the internal call report and store for completion
|
||||
var call = {
|
||||
type: op,
|
||||
from: toHex(log.contract.getAddress()),
|
||||
input: toHex(log.memory.slice(inOff, inEnd)),
|
||||
gasIn: log.getGas(),
|
||||
gasCost: log.getCost(),
|
||||
value: '0x' + log.stack.peek(0).toString(16)
|
||||
};
|
||||
this.callstack.push(call);
|
||||
this.descended = true
|
||||
return;
|
||||
}
|
||||
// If a contract is being self destructed, gather that as a subcall too
|
||||
if (syscall && op == 'SELFDESTRUCT') {
|
||||
var left = this.callstack.length;
|
||||
if (this.callstack[left-1].calls === undefined) {
|
||||
this.callstack[left-1].calls = [];
|
||||
}
|
||||
this.callstack[left-1].calls.push({type: op});
|
||||
return
|
||||
}
|
||||
// If a new method invocation is being done, add to the call stack
|
||||
if (syscall && (op == 'CALL' || op == 'CALLCODE' || op == 'DELEGATECALL' || op == 'STATICCALL')) {
|
||||
// Skip any pre-compile invocations, those are just fancy opcodes
|
||||
var to = toAddress(log.stack.peek(1).toString(16));
|
||||
if (isPrecompiled(to)) {
|
||||
return
|
||||
}
|
||||
var off = (op == 'DELEGATECALL' || op == 'STATICCALL' ? 0 : 1);
|
||||
|
||||
var inOff = log.stack.peek(2 + off).valueOf();
|
||||
var inEnd = inOff + log.stack.peek(3 + off).valueOf();
|
||||
|
||||
// Assemble the internal call report and store for completion
|
||||
var call = {
|
||||
type: op,
|
||||
from: toHex(log.contract.getAddress()),
|
||||
to: toHex(to),
|
||||
input: toHex(log.memory.slice(inOff, inEnd)),
|
||||
gasIn: log.getGas(),
|
||||
gasCost: log.getCost(),
|
||||
outOff: log.stack.peek(4 + off).valueOf(),
|
||||
outLen: log.stack.peek(5 + off).valueOf()
|
||||
};
|
||||
if (op != 'DELEGATECALL' && op != 'STATICCALL') {
|
||||
call.value = '0x' + log.stack.peek(2).toString(16);
|
||||
}
|
||||
this.callstack.push(call);
|
||||
this.descended = true
|
||||
return;
|
||||
}
|
||||
// If we've just descended into an inner call, retrieve it's true allowance. We
|
||||
// need to extract if from within the call as there may be funky gas dynamics
|
||||
// with regard to requested and actually given gas (2300 stipend, 63/64 rule).
|
||||
if (this.descended) {
|
||||
if (log.getDepth() >= this.callstack.length) {
|
||||
this.callstack[this.callstack.length - 1].gas = log.getGas();
|
||||
} else {
|
||||
// TODO(karalabe): The call was made to a plain account. We currently don't
|
||||
// have access to the true gas amount inside the call and so any amount will
|
||||
// mostly be wrong since it depends on a lot of input args. Skip gas for now.
|
||||
}
|
||||
this.descended = false;
|
||||
}
|
||||
// If an existing call is returning, pop off the call stack
|
||||
if (syscall && op == 'REVERT') {
|
||||
this.callstack[this.callstack.length - 1].error = "execution reverted";
|
||||
return;
|
||||
}
|
||||
if (log.getDepth() == this.callstack.length - 1) {
|
||||
// Pop off the last call and get the execution results
|
||||
var call = this.callstack.pop();
|
||||
|
||||
if (call.type == 'CREATE') {
|
||||
// If the call was a CREATE, retrieve the contract address and output code
|
||||
call.gasUsed = '0x' + bigInt(call.gasIn - call.gasCost - log.getGas()).toString(16);
|
||||
delete call.gasIn; delete call.gasCost;
|
||||
|
||||
var ret = log.stack.peek(0);
|
||||
if (!ret.equals(0)) {
|
||||
call.to = toHex(toAddress(ret.toString(16)));
|
||||
call.output = toHex(db.getCode(toAddress(ret.toString(16))));
|
||||
} else if (call.error === undefined) {
|
||||
call.error = "internal failure"; // TODO(karalabe): surface these faults somehow
|
||||
}
|
||||
} else {
|
||||
// If the call was a contract call, retrieve the gas usage and output
|
||||
if (call.gas !== undefined) {
|
||||
call.gasUsed = '0x' + bigInt(call.gasIn - call.gasCost + call.gas - log.getGas()).toString(16);
|
||||
|
||||
var ret = log.stack.peek(0);
|
||||
if (!ret.equals(0)) {
|
||||
call.output = toHex(log.memory.slice(call.outOff, call.outOff + call.outLen));
|
||||
} else if (call.error === undefined) {
|
||||
call.error = "internal failure"; // TODO(karalabe): surface these faults somehow
|
||||
}
|
||||
}
|
||||
delete call.gasIn; delete call.gasCost;
|
||||
delete call.outOff; delete call.outLen;
|
||||
}
|
||||
if (call.gas !== undefined) {
|
||||
call.gas = '0x' + bigInt(call.gas).toString(16);
|
||||
}
|
||||
// Inject the call into the previous one
|
||||
var left = this.callstack.length;
|
||||
if (this.callstack[left-1].calls === undefined) {
|
||||
this.callstack[left-1].calls = [];
|
||||
}
|
||||
this.callstack[left-1].calls.push(call);
|
||||
}
|
||||
},
|
||||
|
||||
// fault is invoked when the actual execution of an opcode fails.
|
||||
fault: function(log, db) {
|
||||
// If the topmost call already reverted, don't handle the additional fault again
|
||||
if (this.callstack[this.callstack.length - 1].error !== undefined) {
|
||||
return;
|
||||
}
|
||||
// Pop off the just failed call
|
||||
var call = this.callstack.pop();
|
||||
call.error = log.getError();
|
||||
|
||||
// Consume all available gas and clean any leftovers
|
||||
if (call.gas !== undefined) {
|
||||
call.gas = '0x' + bigInt(call.gas).toString(16);
|
||||
call.gasUsed = call.gas
|
||||
}
|
||||
delete call.gasIn; delete call.gasCost;
|
||||
delete call.outOff; delete call.outLen;
|
||||
|
||||
// Flatten the failed call into its parent
|
||||
var left = this.callstack.length;
|
||||
if (left > 0) {
|
||||
if (this.callstack[left-1].calls === undefined) {
|
||||
this.callstack[left-1].calls = [];
|
||||
}
|
||||
this.callstack[left-1].calls.push(call);
|
||||
return;
|
||||
}
|
||||
// Last call failed too, leave it in the stack
|
||||
this.callstack.push(call);
|
||||
},
|
||||
|
||||
// result is invoked when all the opcodes have been iterated over and returns
|
||||
// the final result of the tracing.
|
||||
result: function(ctx, db) {
|
||||
var result = {
|
||||
type: ctx.type,
|
||||
from: toHex(ctx.from),
|
||||
to: toHex(ctx.to),
|
||||
value: '0x' + ctx.value.toString(16),
|
||||
gas: '0x' + bigInt(ctx.gas).toString(16),
|
||||
gasUsed: '0x' + bigInt(ctx.gasUsed).toString(16),
|
||||
input: toHex(ctx.input),
|
||||
output: toHex(ctx.output),
|
||||
time: ctx.time,
|
||||
};
|
||||
if (this.callstack[0].calls !== undefined) {
|
||||
result.calls = this.callstack[0].calls;
|
||||
}
|
||||
if (this.callstack[0].error !== undefined) {
|
||||
result.error = this.callstack[0].error;
|
||||
} else if (ctx.error !== undefined) {
|
||||
result.error = ctx.error;
|
||||
}
|
||||
if (result.error !== undefined) {
|
||||
delete result.output;
|
||||
}
|
||||
return this.finalize(result);
|
||||
},
|
||||
|
||||
// finalize recreates a call object using the final desired field oder for json
|
||||
// serialization. This is a nicety feature to pass meaningfully ordered results
|
||||
// to users who don't interpret it, just display it.
|
||||
finalize: function(call) {
|
||||
var sorted = {
|
||||
type: call.type,
|
||||
from: call.from,
|
||||
to: call.to,
|
||||
value: call.value,
|
||||
gas: call.gas,
|
||||
gasUsed: call.gasUsed,
|
||||
input: call.input,
|
||||
output: call.output,
|
||||
error: call.error,
|
||||
time: call.time,
|
||||
calls: call.calls,
|
||||
}
|
||||
for (var key in sorted) {
|
||||
if (sorted[key] === undefined) {
|
||||
delete sorted[key];
|
||||
}
|
||||
}
|
||||
if (sorted.calls !== undefined) {
|
||||
for (var i=0; i<sorted.calls.length; i++) {
|
||||
sorted.calls[i] = this.finalize(sorted.calls[i]);
|
||||
}
|
||||
}
|
||||
return sorted;
|
||||
}
|
||||
}
|
93
eth/tracers/internal/tracers/evmdis_tracer.js
Normal file
93
eth/tracers/internal/tracers/evmdis_tracer.js
Normal file
@ -0,0 +1,93 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// evmdisTracer returns sufficent information from a trace to perform evmdis-style
|
||||
// disassembly.
|
||||
{
|
||||
stack: [{ops: []}],
|
||||
|
||||
npushes: {0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1, 6: 1, 7: 1, 8: 1, 9: 1, 10: 1, 11: 1, 16: 1, 17: 1, 18: 1, 19: 1, 20: 1, 21: 1, 22: 1, 23: 1, 24: 1, 25: 1, 26: 1, 32: 1, 48: 1, 49: 1, 50: 1, 51: 1, 52: 1, 53: 1, 54: 1, 55: 0, 56: 1, 57: 0, 58: 1, 59: 1, 60: 0, 64: 1, 65: 1, 66: 1, 67: 1, 68: 1, 69: 1, 80: 0, 81: 1, 82: 0, 83: 0, 84: 1, 85: 0, 86: 0, 87: 0, 88: 1, 89: 1, 90: 1, 91: 0, 96: 1, 97: 1, 98: 1, 99: 1, 100: 1, 101: 1, 102: 1, 103: 1, 104: 1, 105: 1, 106: 1, 107: 1, 108: 1, 109: 1, 110: 1, 111: 1, 112: 1, 113: 1, 114: 1, 115: 1, 116: 1, 117: 1, 118: 1, 119: 1, 120: 1, 121: 1, 122: 1, 123: 1, 124: 1, 125: 1, 126: 1, 127: 1, 128: 2, 129: 3, 130: 4, 131: 5, 132: 6, 133: 7, 134: 8, 135: 9, 136: 10, 137: 11, 138: 12, 139: 13, 140: 14, 141: 15, 142: 16, 143: 17, 144: 2, 145: 3, 146: 4, 147: 5, 148: 6, 149: 7, 150: 8, 151: 9, 152: 10, 153: 11, 154: 12, 155: 13, 156: 14, 157: 15, 158: 16, 159: 17, 160: 0, 161: 0, 162: 0, 163: 0, 164: 0, 240: 1, 241: 1, 242: 1, 243: 0, 244: 0, 255: 0},
|
||||
|
||||
// result is invoked when all the opcodes have been iterated over and returns
|
||||
// the final result of the tracing.
|
||||
result: function() { return this.stack[0].ops; },
|
||||
|
||||
// fault is invoked when the actual execution of an opcode fails.
|
||||
fault: function(log, db) { },
|
||||
|
||||
// step is invoked for every opcode that the VM executes.
|
||||
step: function(log, db) {
|
||||
var frame = this.stack[this.stack.length - 1];
|
||||
|
||||
var error = log.getError();
|
||||
if (error) {
|
||||
frame["error"] = error;
|
||||
} else if (log.getDepth() == this.stack.length) {
|
||||
opinfo = {
|
||||
op: log.op.toNumber(),
|
||||
depth : log.getDepth(),
|
||||
result: [],
|
||||
};
|
||||
if (frame.ops.length > 0) {
|
||||
var prevop = frame.ops[frame.ops.length - 1];
|
||||
for(var i = 0; i < this.npushes[prevop.op]; i++)
|
||||
prevop.result.push(log.stack.peek(i).toString(16));
|
||||
}
|
||||
switch(log.op.toString()) {
|
||||
case "CALL": case "CALLCODE":
|
||||
var instart = log.stack.peek(3).valueOf();
|
||||
var insize = log.stack.peek(4).valueOf();
|
||||
opinfo["gas"] = log.stack.peek(0).valueOf();
|
||||
opinfo["to"] = log.stack.peek(1).toString(16);
|
||||
opinfo["value"] = log.stack.peek(2).toString();
|
||||
opinfo["input"] = log.memory.slice(instart, instart + insize);
|
||||
opinfo["error"] = null;
|
||||
opinfo["return"] = null;
|
||||
opinfo["ops"] = [];
|
||||
this.stack.push(opinfo);
|
||||
break;
|
||||
case "DELEGATECALL": case "STATICCALL":
|
||||
var instart = log.stack.peek(2).valueOf();
|
||||
var insize = log.stack.peek(3).valueOf();
|
||||
opinfo["op"] = log.op.toString();
|
||||
opinfo["gas"] = log.stack.peek(0).valueOf();
|
||||
opinfo["to"] = log.stack.peek(1).toString(16);
|
||||
opinfo["input"] = log.memory.slice(instart, instart + insize);
|
||||
opinfo["error"] = null;
|
||||
opinfo["return"] = null;
|
||||
opinfo["ops"] = [];
|
||||
this.stack.push(opinfo);
|
||||
break;
|
||||
case "RETURN":
|
||||
var out = log.stack.peek(0).valueOf();
|
||||
var outsize = log.stack.peek(1).valueOf();
|
||||
frame.return = log.memory.slice(out, out + outsize);
|
||||
break;
|
||||
case "STOP": case "SUICIDE":
|
||||
frame.return = log.memory.slice(0, 0);
|
||||
break;
|
||||
case "JUMPDEST":
|
||||
opinfo["pc"] = log.getPC();
|
||||
}
|
||||
if(log.op.isPush()) {
|
||||
opinfo["len"] = log.op.toNumber() - 0x5e;
|
||||
}
|
||||
frame.ops.push(opinfo);
|
||||
} else {
|
||||
this.stack = this.stack.slice(0, log.getDepth());
|
||||
}
|
||||
}
|
||||
}
|
29
eth/tracers/internal/tracers/noop_tracer.js
Normal file
29
eth/tracers/internal/tracers/noop_tracer.js
Normal file
@ -0,0 +1,29 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// noopTracer is just the barebone boilerplate code required from a JavaScript
|
||||
// object to be usable as a transaction tracer.
|
||||
{
|
||||
// step is invoked for every opcode that the VM executes.
|
||||
step: function(log, db) { },
|
||||
|
||||
// fault is invoked when the actual execution of an opcode fails.
|
||||
fault: function(log, db) { },
|
||||
|
||||
// result is invoked when all the opcodes have been iterated over and returns
|
||||
// the final result of the tracing.
|
||||
result: function(ctx, db) { }
|
||||
}
|
32
eth/tracers/internal/tracers/opcount_tracer.js
Normal file
32
eth/tracers/internal/tracers/opcount_tracer.js
Normal file
@ -0,0 +1,32 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// opcountTracer is a sample tracer that just counts the number of instructions
|
||||
// executed by the EVM before the transaction terminated.
|
||||
{
|
||||
// count tracks the number of EVM instructions executed.
|
||||
count: 0,
|
||||
|
||||
// step is invoked for every opcode that the VM executes.
|
||||
step: function(log, db) { this.count++ },
|
||||
|
||||
// fault is invoked when the actual execution of an opcode fails.
|
||||
fault: function(log, db) { },
|
||||
|
||||
// result is invoked when all the opcodes have been iterated over and returns
|
||||
// the final result of the tracing.
|
||||
result: function(ctx, db) { return this.count }
|
||||
}
|
103
eth/tracers/internal/tracers/prestate_tracer.js
Normal file
103
eth/tracers/internal/tracers/prestate_tracer.js
Normal file
@ -0,0 +1,103 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// prestateTracer outputs sufficient information to create a local execution of
|
||||
// the transaction from a custom assembled genesis block.
|
||||
{
|
||||
// prestate is the genesis that we're building.
|
||||
prestate: null,
|
||||
|
||||
// lookupAccount injects the specified account into the prestate object.
|
||||
lookupAccount: function(addr, db){
|
||||
var acc = toHex(addr);
|
||||
if (this.prestate[acc] === undefined) {
|
||||
this.prestate[acc] = {
|
||||
balance: '0x' + db.getBalance(addr).toString(16),
|
||||
nonce: db.getNonce(addr),
|
||||
code: toHex(db.getCode(addr)),
|
||||
storage: {}
|
||||
};
|
||||
}
|
||||
},
|
||||
|
||||
// lookupStorage injects the specified storage entry of the given account into
|
||||
// the prestate object.
|
||||
lookupStorage: function(addr, key, db){
|
||||
var acc = toHex(addr);
|
||||
var idx = toHex(key);
|
||||
|
||||
if (this.prestate[acc].storage[idx] === undefined) {
|
||||
var val = toHex(db.getState(addr, key));
|
||||
if (val != "0x0000000000000000000000000000000000000000000000000000000000000000") {
|
||||
this.prestate[acc].storage[idx] = toHex(db.getState(addr, key));
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
// result is invoked when all the opcodes have been iterated over and returns
|
||||
// the final result of the tracing.
|
||||
result: function(ctx, db) {
|
||||
// At this point, we need to deduct the 'value' from the
|
||||
// outer transaction, and move it back to the origin
|
||||
this.lookupAccount(ctx.from, db);
|
||||
|
||||
var fromBal = bigInt(this.prestate[toHex(ctx.from)].balance.slice(2), 16);
|
||||
var toBal = bigInt(this.prestate[toHex(ctx.to)].balance.slice(2), 16);
|
||||
|
||||
this.prestate[toHex(ctx.to)].balance = '0x'+toBal.subtract(ctx.value).toString(16);
|
||||
this.prestate[toHex(ctx.from)].balance = '0x'+fromBal.add(ctx.value).toString(16);
|
||||
|
||||
// Decrement the caller's nonce, and remove empty create targets
|
||||
this.prestate[toHex(ctx.from)].nonce--;
|
||||
if (ctx.type == 'CREATE') {
|
||||
// We can blibdly delete the contract prestate, as any existing state would
|
||||
// have caused the transaction to be rejected as invalid in the first place.
|
||||
delete this.prestate[toHex(ctx.to)];
|
||||
}
|
||||
// Return the assembled allocations (prestate)
|
||||
return this.prestate;
|
||||
},
|
||||
|
||||
// step is invoked for every opcode that the VM executes.
|
||||
step: function(log, db) {
|
||||
// Add the current account if we just started tracing
|
||||
if (this.prestate === null){
|
||||
this.prestate = {};
|
||||
// Balance will potentially be wrong here, since this will include the value
|
||||
// sent along with the message. We fix that in 'result()'.
|
||||
this.lookupAccount(log.contract.getAddress(), db);
|
||||
}
|
||||
// Whenever new state is accessed, add it to the prestate
|
||||
switch (log.op.toString()) {
|
||||
case "EXTCODECOPY": case "EXTCODESIZE": case "BALANCE":
|
||||
this.lookupAccount(toAddress(log.stack.peek(0).toString(16)), db);
|
||||
break;
|
||||
case "CREATE":
|
||||
var from = log.contract.getAddress();
|
||||
this.lookupAccount(toContract(from, db.getNonce(from)), db);
|
||||
break;
|
||||
case "CALL": case "CALLCODE": case "DELEGATECALL": case "STATICCALL":
|
||||
this.lookupAccount(toAddress(log.stack.peek(1).toString(16)), db);
|
||||
break;
|
||||
case 'SSTORE':case 'SLOAD':
|
||||
this.lookupStorage(log.contract.getAddress(), toWord(log.stack.peek(0).toString(16)), db);
|
||||
break;
|
||||
}
|
||||
},
|
||||
|
||||
// fault is invoked when the actual execution of an opcode fails.
|
||||
fault: function(log, db) {}
|
||||
}
|
21
eth/tracers/internal/tracers/tracers.go
Normal file
21
eth/tracers/internal/tracers/tracers.go
Normal file
@ -0,0 +1,21 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//go:generate go-bindata -nometadata -o assets.go -pkg tracers -ignore ((tracers)|(assets)).go ./...
|
||||
//go:generate gofmt -s -w assets.go
|
||||
|
||||
// Package tracers contains the actual JavaScript tracer assets.
|
||||
package tracers
|
58
eth/tracers/testdata/call_tracer_create.json
vendored
Normal file
58
eth/tracers/testdata/call_tracer_create.json
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
{
|
||||
"context": {
|
||||
"difficulty": "3755480783",
|
||||
"gasLimit": "5401723",
|
||||
"miner": "0xd049bfd667cb46aa3ef5df0da3e57db3be39e511",
|
||||
"number": "2294702",
|
||||
"timestamp": "1513676146"
|
||||
},
|
||||
"genesis": {
|
||||
"alloc": {
|
||||
"0x13e4acefe6a6700604929946e70e6443e4e73447": {
|
||||
"balance": "0xcf3e0938579f000",
|
||||
"code": "0x",
|
||||
"nonce": "9",
|
||||
"storage": {}
|
||||
},
|
||||
"0x7dc9c9730689ff0b0fd506c67db815f12d90a448": {
|
||||
"balance": "0x0",
|
||||
"code": "0x",
|
||||
"nonce": "0",
|
||||
"storage": {}
|
||||
}
|
||||
},
|
||||
"config": {
|
||||
"byzantiumBlock": 1700000,
|
||||
"chainId": 3,
|
||||
"daoForkSupport": true,
|
||||
"eip150Block": 0,
|
||||
"eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d",
|
||||
"eip155Block": 10,
|
||||
"eip158Block": 10,
|
||||
"ethash": {},
|
||||
"homesteadBlock": 0
|
||||
},
|
||||
"difficulty": "3757315409",
|
||||
"extraData": "0x566961425443",
|
||||
"gasLimit": "5406414",
|
||||
"hash": "0xae107f592eebdd9ff8d6ba00363676096e6afb0e1007a7d3d0af88173077378d",
|
||||
"miner": "0xd049bfd667cb46aa3ef5df0da3e57db3be39e511",
|
||||
"mixHash": "0xc927aa05a38bc3de864e95c33b3ae559d3f39c4ccd51cef6f113f9c50ba0caf1",
|
||||
"nonce": "0x93363bbd2c95f410",
|
||||
"number": "2294701",
|
||||
"stateRoot": "0x6b6737d5bde8058990483e915866bd1578014baeff57bd5e4ed228a2bfad635c",
|
||||
"timestamp": "1513676127",
|
||||
"totalDifficulty": "7160808139332585"
|
||||
},
|
||||
"input": "0xf907ef098504e3b29200830897be8080b9079c606060405260405160208061077c83398101604052808051906020019091905050600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415151561007d57600080fd5b336000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555080600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506001600460006101000a81548160ff02191690831515021790555050610653806101296000396000f300606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806305e4382a146100855780631c02708d146100ae5780632e1a7d4d146100c35780635114cb52146100e6578063a37dda2c146100fe578063ae200e7914610153578063b5769f70146101a8575b005b341561009057600080fd5b6100986101d1565b6040518082815260200191505060405180910390f35b34156100b957600080fd5b6100c16101d7565b005b34156100ce57600080fd5b6100e460048080359060200190919050506102eb565b005b6100fc6004808035906020019091905050610513565b005b341561010957600080fd5b6101116105d6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561015e57600080fd5b6101666105fc565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101b357600080fd5b6101bb610621565b6040518082815260200191505060405180910390f35b60025481565b60011515600460009054906101000a900460ff1615151415156101f957600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806102a15750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156102ac57600080fd5b6000600460006101000a81548160ff0219169083151502179055506003543073ffffffffffffffffffffffffffffffffffffffff163103600281905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806103935750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b151561039e57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561048357600060025411801561040757506002548111155b151561041257600080fd5b80600254036002819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561047e57600080fd5b610510565b600060035411801561049757506003548111155b15156104a257600080fd5b8060035403600381905550600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561050f57600080fd5b5b50565b60011515600460009054906101000a900460ff16151514151561053557600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614801561059657506003548160035401115b80156105bd575080600354013073ffffffffffffffffffffffffffffffffffffffff163110155b15156105c857600080fd5b806003540160038190555050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600354815600a165627a7a72305820c3b849e8440987ce43eae3097b77672a69234d516351368b03fe5b7de03807910029000000000000000000000000c65e620a3a55451316168d57e268f5702ef56a1129a01060f46676a5dff6f407f0f51eb6f37f5c8c54e238c70221e18e65fc29d3ea65a0557b01c50ff4ffaac8ed6e5d31237a4ecbac843ab1bfe8bb0165a0060df7c54f",
|
||||
"result": {
|
||||
"from": "0x13e4acefe6a6700604929946e70e6443e4e73447",
|
||||
"gas": "0x5e106",
|
||||
"gasUsed": "0x5e106",
|
||||
"input": "0x606060405260405160208061077c83398101604052808051906020019091905050600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415151561007d57600080fd5b336000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555080600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506001600460006101000a81548160ff02191690831515021790555050610653806101296000396000f300606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806305e4382a146100855780631c02708d146100ae5780632e1a7d4d146100c35780635114cb52146100e6578063a37dda2c146100fe578063ae200e7914610153578063b5769f70146101a8575b005b341561009057600080fd5b6100986101d1565b6040518082815260200191505060405180910390f35b34156100b957600080fd5b6100c16101d7565b005b34156100ce57600080fd5b6100e460048080359060200190919050506102eb565b005b6100fc6004808035906020019091905050610513565b005b341561010957600080fd5b6101116105d6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561015e57600080fd5b6101666105fc565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101b357600080fd5b6101bb610621565b6040518082815260200191505060405180910390f35b60025481565b60011515600460009054906101000a900460ff1615151415156101f957600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806102a15750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156102ac57600080fd5b6000600460006101000a81548160ff0219169083151502179055506003543073ffffffffffffffffffffffffffffffffffffffff163103600281905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806103935750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b151561039e57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561048357600060025411801561040757506002548111155b151561041257600080fd5b80600254036002819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561047e57600080fd5b610510565b600060035411801561049757506003548111155b15156104a257600080fd5b8060035403600381905550600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561050f57600080fd5b5b50565b60011515600460009054906101000a900460ff16151514151561053557600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614801561059657506003548160035401115b80156105bd575080600354013073ffffffffffffffffffffffffffffffffffffffff163110155b15156105c857600080fd5b806003540160038190555050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600354815600a165627a7a72305820c3b849e8440987ce43eae3097b77672a69234d516351368b03fe5b7de03807910029000000000000000000000000c65e620a3a55451316168d57e268f5702ef56a11",
|
||||
"output": "0x606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806305e4382a146100855780631c02708d146100ae5780632e1a7d4d146100c35780635114cb52146100e6578063a37dda2c146100fe578063ae200e7914610153578063b5769f70146101a8575b005b341561009057600080fd5b6100986101d1565b6040518082815260200191505060405180910390f35b34156100b957600080fd5b6100c16101d7565b005b34156100ce57600080fd5b6100e460048080359060200190919050506102eb565b005b6100fc6004808035906020019091905050610513565b005b341561010957600080fd5b6101116105d6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561015e57600080fd5b6101666105fc565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101b357600080fd5b6101bb610621565b6040518082815260200191505060405180910390f35b60025481565b60011515600460009054906101000a900460ff1615151415156101f957600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806102a15750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156102ac57600080fd5b6000600460006101000a81548160ff0219169083151502179055506003543073ffffffffffffffffffffffffffffffffffffffff163103600281905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806103935750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b151561039e57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561048357600060025411801561040757506002548111155b151561041257600080fd5b80600254036002819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561047e57600080fd5b610510565b600060035411801561049757506003548111155b15156104a257600080fd5b8060035403600381905550600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561050f57600080fd5b5b50565b60011515600460009054906101000a900460ff16151514151561053557600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614801561059657506003548160035401115b80156105bd575080600354013073ffffffffffffffffffffffffffffffffffffffff163110155b15156105c857600080fd5b806003540160038190555050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600354815600a165627a7a72305820c3b849e8440987ce43eae3097b77672a69234d516351368b03fe5b7de03807910029",
|
||||
"to": "0x7dc9c9730689ff0b0fd506c67db815f12d90a448",
|
||||
"type": "CREATE",
|
||||
"value": "0x0"
|
||||
}
|
||||
}
|
415
eth/tracers/testdata/call_tracer_deep_calls.json
vendored
Normal file
415
eth/tracers/testdata/call_tracer_deep_calls.json
vendored
Normal file
File diff suppressed because one or more lines are too long
97
eth/tracers/testdata/call_tracer_delegatecall.json
vendored
Normal file
97
eth/tracers/testdata/call_tracer_delegatecall.json
vendored
Normal file
File diff suppressed because one or more lines are too long
77
eth/tracers/testdata/call_tracer_inner_create_oog_outer_throw.json
vendored
Normal file
77
eth/tracers/testdata/call_tracer_inner_create_oog_outer_throw.json
vendored
Normal file
File diff suppressed because one or more lines are too long
81
eth/tracers/testdata/call_tracer_inner_throw_outer_revert.json
vendored
Normal file
81
eth/tracers/testdata/call_tracer_inner_throw_outer_revert.json
vendored
Normal file
File diff suppressed because one or more lines are too long
60
eth/tracers/testdata/call_tracer_oog.json
vendored
Normal file
60
eth/tracers/testdata/call_tracer_oog.json
vendored
Normal file
File diff suppressed because one or more lines are too long
58
eth/tracers/testdata/call_tracer_revert.json
vendored
Normal file
58
eth/tracers/testdata/call_tracer_revert.json
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
{
|
||||
"context": {
|
||||
"difficulty": "3665057456",
|
||||
"gasLimit": "5232723",
|
||||
"miner": "0xf4d8e706cfb25c0decbbdd4d2e2cc10c66376a3f",
|
||||
"number": "2294501",
|
||||
"timestamp": "1513673601"
|
||||
},
|
||||
"genesis": {
|
||||
"alloc": {
|
||||
"0x0f6cef2b7fbb504782e35aa82a2207e816a2b7a9": {
|
||||
"balance": "0x2a3fc32bcc019283",
|
||||
"code": "0x",
|
||||
"nonce": "10",
|
||||
"storage": {}
|
||||
},
|
||||
"0xabbcd5b340c80b5f1c0545c04c987b87310296ae": {
|
||||
"balance": "0x0",
|
||||
"code": "0x606060405236156100755763ffffffff7c01000000000000000000000000000000000000000000000000000000006000350416632d0335ab811461007a578063548db174146100ab5780637f649783146100fc578063b092145e1461014d578063c3f44c0a14610186578063c47cf5de14610203575b600080fd5b341561008557600080fd5b610099600160a060020a0360043516610270565b60405190815260200160405180910390f35b34156100b657600080fd5b6100fa600460248135818101908301358060208181020160405190810160405280939291908181526020018383602002808284375094965061028f95505050505050565b005b341561010757600080fd5b6100fa600460248135818101908301358060208181020160405190810160405280939291908181526020018383602002808284375094965061029e95505050505050565b005b341561015857600080fd5b610172600160a060020a03600435811690602435166102ad565b604051901515815260200160405180910390f35b341561019157600080fd5b6100fa6004803560ff1690602480359160443591606435600160a060020a0316919060a49060843590810190830135806020601f8201819004810201604051908101604052818152929190602084018383808284375094965050509235600160a060020a031692506102cd915050565b005b341561020e57600080fd5b61025460046024813581810190830135806020601f8201819004810201604051908101604052818152929190602084018383808284375094965061056a95505050505050565b604051600160a060020a03909116815260200160405180910390f35b600160a060020a0381166000908152602081905260409020545b919050565b61029a816000610594565b5b50565b61029a816001610594565b5b50565b600160209081526000928352604080842090915290825290205460ff1681565b60008080600160a060020a038416158061030d5750600160a060020a038085166000908152600160209081526040808320339094168352929052205460ff165b151561031857600080fd5b6103218561056a565b600160a060020a038116600090815260208190526040808220549295507f19000000000000000000000000000000000000000000000000000000000000009230918891908b908b90517fff000000000000000000000000000000000000000000000000000000000000008089168252871660018201526c01000000000000000000000000600160a060020a038088168202600284015286811682026016840152602a8301869052841602604a820152605e810182805190602001908083835b6020831061040057805182525b601f1990920191602091820191016103e0565b6001836020036101000a0380198251168184511617909252505050919091019850604097505050505050505051809103902091506001828a8a8a6040516000815260200160405260006040516020015260405193845260ff90921660208085019190915260408085019290925260608401929092526080909201915160208103908084039060008661646e5a03f1151561049957600080fd5b5050602060405103519050600160a060020a03838116908216146104bc57600080fd5b600160a060020a0380841660009081526020819052604090819020805460010190559087169086905180828051906020019080838360005b8381101561050d5780820151818401525b6020016104f4565b50505050905090810190601f16801561053a5780820380516001836020036101000a031916815260200191505b5091505060006040518083038160008661646e5a03f1915050151561055e57600080fd5b5b505050505050505050565b600060248251101561057e5750600061028a565b600160a060020a0360248301511690505b919050565b60005b825181101561060157600160a060020a033316600090815260016020526040812083918584815181106105c657fe5b90602001906020020151600160a060020a031681526020810191909152604001600020805460ff19169115159190911790555b600101610597565b5b5050505600a165627a7a723058200027e8b695e9d2dea9f3629519022a69f3a1d23055ce86406e686ea54f31ee9c0029",
|
||||
"nonce": "1",
|
||||
"storage": {}
|
||||
}
|
||||
},
|
||||
"config": {
|
||||
"byzantiumBlock": 1700000,
|
||||
"chainId": 3,
|
||||
"daoForkSupport": true,
|
||||
"eip150Block": 0,
|
||||
"eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d",
|
||||
"eip155Block": 10,
|
||||
"eip158Block": 10,
|
||||
"ethash": {},
|
||||
"homesteadBlock": 0
|
||||
},
|
||||
"difficulty": "3672229776",
|
||||
"extraData": "0x4554482e45544846414e532e4f52472d4641313738394444",
|
||||
"gasLimit": "5227619",
|
||||
"hash": "0xa07b3d6c6bf63f5f981016db9f2d1d93033833f2c17e8bf7209e85f1faf08076",
|
||||
"miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3",
|
||||
"mixHash": "0x806e151ce2817be922e93e8d5921fa0f0d0fd213d6b2b9a3fa17458e74a163d0",
|
||||
"nonce": "0xbc5d43adc2c30c7d",
|
||||
"number": "2294500",
|
||||
"stateRoot": "0xca645b335888352ef9d8b1ef083e9019648180b259026572e3139717270de97d",
|
||||
"timestamp": "1513673552",
|
||||
"totalDifficulty": "7160066586979149"
|
||||
},
|
||||
"input": "0xf9018b0a8505d21dba00832dc6c094abbcd5b340c80b5f1c0545c04c987b87310296ae80b9012473b40a5c000000000000000000000000400de2e016bda6577407dfc379faba9899bc73ef0000000000000000000000002cc31912b2b0f3075a87b3640923d45a26cef3ee000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000064d79d8e6c7265636f76657279416464726573730000000000000000000000000000000000000000000000000000000000383e3ec32dc0f66d8fe60dbdc2f6815bdf73a988383e3ec32dc0f66d8fe60dbdc2f6815bdf73a988000000000000000000000000000000000000000000000000000000000000000000000000000000001ba0fd659d76a4edbd2a823e324c93f78ad6803b30ff4a9c8bce71ba82798975c70ca06571eecc0b765688ec6c78942c5ee8b585e00988c0141b518287e9be919bc48a",
|
||||
"result": {
|
||||
"error": "execution reverted",
|
||||
"from": "0x0f6cef2b7fbb504782e35aa82a2207e816a2b7a9",
|
||||
"gas": "0x2d55e8",
|
||||
"gasUsed": "0xc3",
|
||||
"input": "0x73b40a5c000000000000000000000000400de2e016bda6577407dfc379faba9899bc73ef0000000000000000000000002cc31912b2b0f3075a87b3640923d45a26cef3ee000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000064d79d8e6c7265636f76657279416464726573730000000000000000000000000000000000000000000000000000000000383e3ec32dc0f66d8fe60dbdc2f6815bdf73a988383e3ec32dc0f66d8fe60dbdc2f6815bdf73a98800000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"to": "0xabbcd5b340c80b5f1c0545c04c987b87310296ae",
|
||||
"type": "CALL",
|
||||
"value": "0x0"
|
||||
}
|
||||
}
|
78
eth/tracers/testdata/call_tracer_simple.json
vendored
Normal file
78
eth/tracers/testdata/call_tracer_simple.json
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
{
|
||||
"context": {
|
||||
"difficulty": "3502894804",
|
||||
"gasLimit": "4722976",
|
||||
"miner": "0x1585936b53834b021f68cc13eeefdec2efc8e724",
|
||||
"number": "2289806",
|
||||
"timestamp": "1513601314"
|
||||
},
|
||||
"genesis": {
|
||||
"alloc": {
|
||||
"0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5": {
|
||||
"balance": "0x0",
|
||||
"code": "0x",
|
||||
"nonce": "22",
|
||||
"storage": {}
|
||||
},
|
||||
"0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe": {
|
||||
"balance": "0x4d87094125a369d9bd5",
|
||||
"code": "0x606060405236156100935763ffffffff60e060020a60003504166311ee8382811461009c57806313af4035146100be5780631f5e8f4c146100ee57806324daddc5146101125780634921a91a1461013b57806363e4bff414610157578063764978f91461017f578063893d20e8146101a1578063ba40aaa1146101cd578063cebc9a82146101f4578063e177246e14610216575b61009a5b5b565b005b34156100a457fe5b6100ac61023d565b60408051918252519081900360200190f35b34156100c657fe5b6100da600160a060020a0360043516610244565b604080519115158252519081900360200190f35b34156100f657fe5b6100da610307565b604080519115158252519081900360200190f35b341561011a57fe5b6100da6004351515610318565b604080519115158252519081900360200190f35b6100da6103d6565b604080519115158252519081900360200190f35b6100da600160a060020a0360043516610420565b604080519115158252519081900360200190f35b341561018757fe5b6100ac61046c565b60408051918252519081900360200190f35b34156101a957fe5b6101b1610473565b60408051600160a060020a039092168252519081900360200190f35b34156101d557fe5b6100da600435610483565b604080519115158252519081900360200190f35b34156101fc57fe5b6100ac61050d565b60408051918252519081900360200190f35b341561021e57fe5b6100da600435610514565b604080519115158252519081900360200190f35b6003545b90565b60006000610250610473565b600160a060020a031633600160a060020a03161415156102705760006000fd5b600160a060020a03831615156102865760006000fd5b50600054600160a060020a0390811690831681146102fb57604051600160a060020a0380851691908316907ffcf23a92150d56e85e3a3d33b357493246e55783095eb6a733eb8439ffc752c890600090a360008054600160a060020a031916600160a060020a03851617905560019150610300565b600091505b5b50919050565b60005460a060020a900460ff165b90565b60006000610324610473565b600160a060020a031633600160a060020a03161415156103445760006000fd5b5060005460a060020a900460ff16801515831515146102fb576000546040805160a060020a90920460ff1615158252841515602083015280517fe6cd46a119083b86efc6884b970bfa30c1708f53ba57b86716f15b2f4551a9539281900390910190a16000805460a060020a60ff02191660a060020a8515150217905560019150610300565b600091505b5b50919050565b60006103e0610307565b801561040557506103ef610473565b600160a060020a031633600160a060020a031614155b156104105760006000fd5b610419336105a0565b90505b5b90565b600061042a610307565b801561044f5750610439610473565b600160a060020a031633600160a060020a031614155b1561045a5760006000fd5b610463826105a0565b90505b5b919050565b6001545b90565b600054600160a060020a03165b90565b6000600061048f610473565b600160a060020a031633600160a060020a03161415156104af5760006000fd5b506001548281146102fb57604080518281526020810185905281517f79a3746dde45672c9e8ab3644b8bb9c399a103da2dc94b56ba09777330a83509929181900390910190a160018381559150610300565b600091505b5b50919050565b6002545b90565b60006000610520610473565b600160a060020a031633600160a060020a03161415156105405760006000fd5b506002548281146102fb57604080518281526020810185905281517ff6991a728965fedd6e927fdf16bdad42d8995970b4b31b8a2bf88767516e2494929181900390910190a1600283905560019150610300565b600091505b5b50919050565b60006000426105ad61023d565b116102fb576105c46105bd61050d565b4201610652565b6105cc61046c565b604051909150600160a060020a038416908290600081818185876187965a03f1925050501561063d57604080518281529051600160a060020a038516917f9bca65ce52fdef8a470977b51f247a2295123a4807dfa9e502edf0d30722da3b919081900360200190a260019150610300565b6102fb42610652565b5b600091505b50919050565b60038190555b505600a165627a7a72305820f3c973c8b7ed1f62000b6701bd5b708469e19d0f1d73fde378a56c07fd0b19090029",
|
||||
"nonce": "1",
|
||||
"storage": {
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000001b436ba50d378d4bbc8660d312a13df6af6e89dfb",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000001": "0x00000000000000000000000000000000000000000000000006f05b59d3b20000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000002": "0x000000000000000000000000000000000000000000000000000000000000003c",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000003": "0x000000000000000000000000000000000000000000000000000000005a37b834"
|
||||
}
|
||||
},
|
||||
"0xb436ba50d378d4bbc8660d312a13df6af6e89dfb": {
|
||||
"balance": "0x1780d77678137ac1b775",
|
||||
"code": "0x",
|
||||
"nonce": "29072",
|
||||
"storage": {}
|
||||
}
|
||||
},
|
||||
"config": {
|
||||
"byzantiumBlock": 1700000,
|
||||
"chainId": 3,
|
||||
"daoForkSupport": true,
|
||||
"eip150Block": 0,
|
||||
"eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d",
|
||||
"eip155Block": 10,
|
||||
"eip158Block": 10,
|
||||
"ethash": {},
|
||||
"homesteadBlock": 0
|
||||
},
|
||||
"difficulty": "3509749784",
|
||||
"extraData": "0x4554482e45544846414e532e4f52472d4641313738394444",
|
||||
"gasLimit": "4727564",
|
||||
"hash": "0x609948ac3bd3c00b7736b933248891d6c901ee28f066241bddb28f4e00a9f440",
|
||||
"miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3",
|
||||
"mixHash": "0xb131e4507c93c7377de00e7c271bf409ec7492767142ff0f45c882f8068c2ada",
|
||||
"nonce": "0x4eb12e19c16d43da",
|
||||
"number": "2289805",
|
||||
"stateRoot": "0xc7f10f352bff82fac3c2999d3085093d12652e19c7fd32591de49dc5d91b4f1f",
|
||||
"timestamp": "1513601261",
|
||||
"totalDifficulty": "7143276353481064"
|
||||
},
|
||||
"input": "0xf88b8271908506fc23ac0083015f90943b873a919aa0512d5a0f09e6dcceaa4a6727fafe80a463e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c52aa0bdce0b59e8761854e857fe64015f06dd08a4fbb7624f6094893a79a72e6ad6bea01d9dde033cff7bb235a3163f348a6d7ab8d6b52bc0963a95b91612e40ca766a4",
|
||||
"result": {
|
||||
"calls": [
|
||||
{
|
||||
"from": "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe",
|
||||
"input": "0x",
|
||||
"to": "0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5",
|
||||
"type": "CALL",
|
||||
"value": "0x6f05b59d3b20000"
|
||||
}
|
||||
],
|
||||
"from": "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb",
|
||||
"gas": "0x10738",
|
||||
"gasUsed": "0x3ef9",
|
||||
"input": "0x63e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c5",
|
||||
"output": "0x0000000000000000000000000000000000000000000000000000000000000001",
|
||||
"to": "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe",
|
||||
"type": "CALL",
|
||||
"value": "0x0"
|
||||
}
|
||||
}
|
62
eth/tracers/testdata/call_tracer_throw.json
vendored
Normal file
62
eth/tracers/testdata/call_tracer_throw.json
vendored
Normal file
File diff suppressed because one or more lines are too long
618
eth/tracers/tracer.go
Normal file
618
eth/tracers/tracer.go
Normal file
File diff suppressed because one or more lines are too long
@ -14,12 +14,13 @@
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ethapi
|
||||
package tracers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -42,8 +43,8 @@ func (account) ReturnGas(*big.Int) {}
|
||||
func (account) SetCode(common.Hash, []byte) {}
|
||||
func (account) ForEachStorage(cb func(key, value common.Hash) bool) {}
|
||||
|
||||
func runTrace(tracer *JavascriptTracer) (interface{}, error) {
|
||||
env := vm.NewEVM(vm.Context{}, nil, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer})
|
||||
func runTrace(tracer *Tracer) (json.RawMessage, error) {
|
||||
env := vm.NewEVM(vm.Context{BlockNumber: big.NewInt(1)}, nil, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer})
|
||||
|
||||
contract := vm.NewContract(account{}, account{}, big.NewInt(0), 10000)
|
||||
contract.Code = []byte{byte(vm.PUSH1), 0x1, byte(vm.PUSH1), 0x1, 0x0}
|
||||
@ -52,12 +53,11 @@ func runTrace(tracer *JavascriptTracer) (interface{}, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return tracer.GetResult()
|
||||
}
|
||||
|
||||
func TestTracing(t *testing.T) {
|
||||
tracer, err := NewJavascriptTracer("{count: 0, step: function() { this.count += 1; }, result: function() { return this.count; }}")
|
||||
tracer, err := New("{count: 0, step: function() { this.count += 1; }, fault: function() {}, result: function() { return this.count; }}")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -66,18 +66,13 @@ func TestTracing(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, ok := ret.(float64)
|
||||
if !ok {
|
||||
t.Errorf("Expected return value to be float64, was %T", ret)
|
||||
}
|
||||
if value != 3 {
|
||||
t.Errorf("Expected return value to be 3, got %v", value)
|
||||
if !bytes.Equal(ret, []byte("3")) {
|
||||
t.Errorf("Expected return value to be 3, got %s", string(ret))
|
||||
}
|
||||
}
|
||||
|
||||
func TestStack(t *testing.T) {
|
||||
tracer, err := NewJavascriptTracer("{depths: [], step: function(log) { this.depths.push(log.stack.length()); }, result: function() { return this.depths; }}")
|
||||
tracer, err := New("{depths: [], step: function(log) { this.depths.push(log.stack.length()); }, fault: function() {}, result: function() { return this.depths; }}")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -86,15 +81,13 @@ func TestStack(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expected := []int{0, 1, 2}
|
||||
if !reflect.DeepEqual(ret, expected) {
|
||||
t.Errorf("Expected return value to be %#v, got %#v", expected, ret)
|
||||
if !bytes.Equal(ret, []byte("[0,1,2]")) {
|
||||
t.Errorf("Expected return value to be [0,1,2], got %s", string(ret))
|
||||
}
|
||||
}
|
||||
|
||||
func TestOpcodes(t *testing.T) {
|
||||
tracer, err := NewJavascriptTracer("{opcodes: [], step: function(log) { this.opcodes.push(log.op.toString()); }, result: function() { return this.opcodes; }}")
|
||||
tracer, err := New("{opcodes: [], step: function(log) { this.opcodes.push(log.op.toString()); }, fault: function() {}, result: function() { return this.opcodes; }}")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -103,16 +96,16 @@ func TestOpcodes(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expected := []string{"PUSH1", "PUSH1", "STOP"}
|
||||
if !reflect.DeepEqual(ret, expected) {
|
||||
t.Errorf("Expected return value to be %#v, got %#v", expected, ret)
|
||||
if !bytes.Equal(ret, []byte("[\"PUSH1\",\"PUSH1\",\"STOP\"]")) {
|
||||
t.Errorf("Expected return value to be [\"PUSH1\",\"PUSH1\",\"STOP\"], got %s", string(ret))
|
||||
}
|
||||
}
|
||||
|
||||
func TestHalt(t *testing.T) {
|
||||
t.Skip("duktape doesn't support abortion")
|
||||
|
||||
timeout := errors.New("stahp")
|
||||
tracer, err := NewJavascriptTracer("{step: function() { while(1); }, result: function() { return null; }}")
|
||||
tracer, err := New("{step: function() { while(1); }, result: function() { return null; }}")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -128,12 +121,12 @@ func TestHalt(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHaltBetweenSteps(t *testing.T) {
|
||||
tracer, err := NewJavascriptTracer("{step: function() {}, result: function() { return null; }}")
|
||||
tracer, err := New("{step: function() {}, fault: function() {}, result: function() { return null; }}")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
env := vm.NewEVM(vm.Context{}, nil, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer})
|
||||
env := vm.NewEVM(vm.Context{BlockNumber: big.NewInt(1)}, nil, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer})
|
||||
contract := vm.NewContract(&account{}, &account{}, big.NewInt(0), 0)
|
||||
|
||||
tracer.CaptureState(env, 0, 0, 0, 0, nil, nil, contract, 0, nil)
|
||||
@ -141,7 +134,7 @@ func TestHaltBetweenSteps(t *testing.T) {
|
||||
tracer.Stop(timeout)
|
||||
tracer.CaptureState(env, 0, 0, 0, 0, nil, nil, contract, 0, nil)
|
||||
|
||||
if _, err := tracer.GetResult(); err.Error() != "stahp in server-side tracer function 'step'" {
|
||||
if _, err := tracer.GetResult(); err.Error() != timeout.Error() {
|
||||
t.Errorf("Expected timeout error, got %v", err)
|
||||
}
|
||||
}
|
53
eth/tracers/tracers.go
Normal file
53
eth/tracers/tracers.go
Normal file
@ -0,0 +1,53 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package tracers is a collection of JavaScript transaction tracers.
|
||||
package tracers
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"github.com/ethereum/go-ethereum/eth/tracers/internal/tracers"
|
||||
)
|
||||
|
||||
// all contains all the built in JavaScript tracers by name.
|
||||
var all = make(map[string]string)
|
||||
|
||||
// camel converts a snake cased input string into a camel cased output.
|
||||
func camel(str string) string {
|
||||
pieces := strings.Split(str, "_")
|
||||
for i := 1; i < len(pieces); i++ {
|
||||
pieces[i] = string(unicode.ToUpper(rune(pieces[i][0]))) + pieces[i][1:]
|
||||
}
|
||||
return strings.Join(pieces, "")
|
||||
}
|
||||
|
||||
// init retrieves the JavaScript transaction tracers included in go-ethereum.
|
||||
func init() {
|
||||
for _, file := range tracers.AssetNames() {
|
||||
name := camel(strings.TrimSuffix(file, ".js"))
|
||||
all[name] = string(tracers.MustAsset(file))
|
||||
}
|
||||
}
|
||||
|
||||
// tracer retrieves a specific JavaScript tracer by name.
|
||||
func tracer(name string) (string, bool) {
|
||||
if tracer, ok := all[name]; ok {
|
||||
return tracer, true
|
||||
}
|
||||
return "", false
|
||||
}
|
194
eth/tracers/tracers_test.go
Normal file
194
eth/tracers/tracers_test.go
Normal file
@ -0,0 +1,194 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package tracers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/tests"
|
||||
)
|
||||
|
||||
// To generate a new callTracer test, copy paste the makeTest method below into
|
||||
// a Geth console and call it with a transaction hash you which to export.
|
||||
|
||||
/*
|
||||
// makeTest generates a callTracer test by running a prestate reassembled and a
|
||||
// call trace run, assembling all the gathered information into a test case.
|
||||
var makeTest = function(tx, rewind) {
|
||||
// Generate the genesis block from the block, transaction and prestate data
|
||||
var block = eth.getBlock(eth.getTransaction(tx).blockHash);
|
||||
var genesis = eth.getBlock(block.parentHash);
|
||||
|
||||
delete genesis.gasUsed;
|
||||
delete genesis.logsBloom;
|
||||
delete genesis.parentHash;
|
||||
delete genesis.receiptsRoot;
|
||||
delete genesis.sha3Uncles;
|
||||
delete genesis.size;
|
||||
delete genesis.transactions;
|
||||
delete genesis.transactionsRoot;
|
||||
delete genesis.uncles;
|
||||
|
||||
genesis.gasLimit = genesis.gasLimit.toString();
|
||||
genesis.number = genesis.number.toString();
|
||||
genesis.timestamp = genesis.timestamp.toString();
|
||||
|
||||
genesis.alloc = debug.traceTransaction(tx, {tracer: "prestateTracer", rewind: rewind});
|
||||
for (var key in genesis.alloc) {
|
||||
genesis.alloc[key].nonce = genesis.alloc[key].nonce.toString();
|
||||
}
|
||||
genesis.config = admin.nodeInfo.protocols.eth.config;
|
||||
|
||||
// Generate the call trace and produce the test input
|
||||
var result = debug.traceTransaction(tx, {tracer: "callTracer", rewind: rewind});
|
||||
delete result.time;
|
||||
|
||||
console.log(JSON.stringify({
|
||||
genesis: genesis,
|
||||
context: {
|
||||
number: block.number.toString(),
|
||||
difficulty: block.difficulty,
|
||||
timestamp: block.timestamp.toString(),
|
||||
gasLimit: block.gasLimit.toString(),
|
||||
miner: block.miner,
|
||||
},
|
||||
input: eth.getRawTransaction(tx),
|
||||
result: result,
|
||||
}, null, 2));
|
||||
}
|
||||
*/
|
||||
|
||||
// callTrace is the result of a callTracer run.
|
||||
type callTrace struct {
|
||||
Type string `json:"type"`
|
||||
From common.Address `json:"from"`
|
||||
To common.Address `json:"to"`
|
||||
Input hexutil.Bytes `json:"input"`
|
||||
Output hexutil.Bytes `json:"output"`
|
||||
Gas *hexutil.Uint64 `json:"gas,omitempty"`
|
||||
GasUsed *hexutil.Uint64 `json:"gasUsed,omitempty"`
|
||||
Value *hexutil.Big `json:"value,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
Calls []callTrace `json:"calls,omitempty"`
|
||||
}
|
||||
|
||||
type callContext struct {
|
||||
Number math.HexOrDecimal64 `json:"number"`
|
||||
Difficulty *math.HexOrDecimal256 `json:"difficulty"`
|
||||
Time math.HexOrDecimal64 `json:"timestamp"`
|
||||
GasLimit math.HexOrDecimal64 `json:"gasLimit"`
|
||||
Miner common.Address `json:"miner"`
|
||||
}
|
||||
|
||||
// callTracerTest defines a single test to check the call tracer against.
|
||||
type callTracerTest struct {
|
||||
Genesis *core.Genesis `json:"genesis"`
|
||||
Context *callContext `json:"context"`
|
||||
Input string `json:"input"`
|
||||
Result *callTrace `json:"result"`
|
||||
}
|
||||
|
||||
// Iterates over all the input-output datasets in the tracer test harness and
|
||||
// runs the JavaScript tracers against them.
|
||||
func TestCallTracer(t *testing.T) {
|
||||
files, err := ioutil.ReadDir("testdata")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to retrieve tracer test suite: %v", err)
|
||||
}
|
||||
for _, file := range files {
|
||||
if !strings.HasPrefix(file.Name(), "call_tracer_") {
|
||||
continue
|
||||
}
|
||||
file := file // capture range variable
|
||||
t.Run(camel(strings.TrimSuffix(strings.TrimPrefix(file.Name(), "call_tracer_"), ".json")), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Call tracer test found, read if from disk
|
||||
blob, err := ioutil.ReadFile(filepath.Join("testdata", file.Name()))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read testcase: %v", err)
|
||||
}
|
||||
test := new(callTracerTest)
|
||||
if err := json.Unmarshal(blob, test); err != nil {
|
||||
t.Fatalf("failed to parse testcase: %v", err)
|
||||
}
|
||||
// Configure a blockchain with the given prestate
|
||||
tx := new(types.Transaction)
|
||||
if err := rlp.DecodeBytes(common.FromHex(test.Input), tx); err != nil {
|
||||
t.Fatalf("failed to parse testcase input: %v", err)
|
||||
}
|
||||
signer := types.MakeSigner(test.Genesis.Config, new(big.Int).SetUint64(uint64(test.Context.Number)))
|
||||
origin, _ := signer.Sender(tx)
|
||||
|
||||
context := vm.Context{
|
||||
CanTransfer: core.CanTransfer,
|
||||
Transfer: core.Transfer,
|
||||
Origin: origin,
|
||||
Coinbase: test.Context.Miner,
|
||||
BlockNumber: new(big.Int).SetUint64(uint64(test.Context.Number)),
|
||||
Time: new(big.Int).SetUint64(uint64(test.Context.Time)),
|
||||
Difficulty: (*big.Int)(test.Context.Difficulty),
|
||||
GasLimit: new(big.Int).SetUint64(uint64(test.Context.GasLimit)),
|
||||
GasPrice: tx.GasPrice(),
|
||||
}
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
statedb := tests.MakePreState(db, test.Genesis.Alloc)
|
||||
|
||||
// Create the tracer, the EVM environment and run it
|
||||
tracer, err := New("callTracer")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create call tracer: %v", err)
|
||||
}
|
||||
evm := vm.NewEVM(context, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer})
|
||||
|
||||
msg, err := tx.AsMessage(signer)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to prepare transaction for tracing: %v", err)
|
||||
}
|
||||
st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
|
||||
if _, _, _, _, err = st.TransitionDb(); err != nil {
|
||||
t.Fatalf("failed to execute transaction: %v", err)
|
||||
}
|
||||
// Retrieve the trace result and compare against the etalon
|
||||
res, err := tracer.GetResult()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to retrieve trace result: %v", err)
|
||||
}
|
||||
ret := new(callTrace)
|
||||
if err := json.Unmarshal(res, ret); err != nil {
|
||||
t.Fatalf("failed to unmarshal trace result: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(ret, test.Result) {
|
||||
t.Fatalf("trace mismatch: have %+v, want %+v", ret, test.Result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -37,6 +37,12 @@ func NewMemDatabase() (*MemDatabase, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewMemDatabaseWithCap(size int) (*MemDatabase, error) {
|
||||
return &MemDatabase{
|
||||
db: make(map[string][]byte, size),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (db *MemDatabase) Put(key []byte, value []byte) error {
|
||||
db.lock.Lock()
|
||||
defer db.lock.Unlock()
|
||||
@ -74,14 +80,6 @@ func (db *MemDatabase) Keys() [][]byte {
|
||||
return keys
|
||||
}
|
||||
|
||||
/*
|
||||
func (db *MemDatabase) GetKeys() []*common.Key {
|
||||
data, _ := db.Get([]byte("KeyRing"))
|
||||
|
||||
return []*common.Key{common.NewKeyFromBytes(data)}
|
||||
}
|
||||
*/
|
||||
|
||||
func (db *MemDatabase) Delete(key []byte) error {
|
||||
db.lock.Lock()
|
||||
defer db.lock.Unlock()
|
||||
@ -96,6 +94,8 @@ func (db *MemDatabase) NewBatch() Batch {
|
||||
return &memBatch{db: db}
|
||||
}
|
||||
|
||||
func (db *MemDatabase) Len() int { return len(db.db) }
|
||||
|
||||
type kv struct{ k, v []byte }
|
||||
|
||||
type memBatch struct {
|
||||
|
@ -1,364 +0,0 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ethapi
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/robertkrimen/otto"
|
||||
)
|
||||
|
||||
// fakeBig is used to provide an interface to Javascript for 'big.NewInt'
|
||||
type fakeBig struct{}
|
||||
|
||||
// NewInt creates a new big.Int with the specified int64 value.
|
||||
func (fb *fakeBig) NewInt(x int64) *big.Int {
|
||||
return big.NewInt(x)
|
||||
}
|
||||
|
||||
// OpCodeWrapper provides a JavaScript-friendly wrapper around OpCode, to convince Otto to treat it
|
||||
// as an object, instead of a number.
|
||||
type opCodeWrapper struct {
|
||||
op vm.OpCode
|
||||
}
|
||||
|
||||
// toNumber returns the ID of this opcode as an integer
|
||||
func (ocw *opCodeWrapper) toNumber() int {
|
||||
return int(ocw.op)
|
||||
}
|
||||
|
||||
// toString returns the string representation of the opcode
|
||||
func (ocw *opCodeWrapper) toString() string {
|
||||
return ocw.op.String()
|
||||
}
|
||||
|
||||
// isPush returns true if the op is a Push
|
||||
func (ocw *opCodeWrapper) isPush() bool {
|
||||
return ocw.op.IsPush()
|
||||
}
|
||||
|
||||
// MarshalJSON serializes the opcode as JSON
|
||||
func (ocw *opCodeWrapper) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(ocw.op.String())
|
||||
}
|
||||
|
||||
// toValue returns an otto.Value for the opCodeWrapper
|
||||
func (ocw *opCodeWrapper) toValue(vm *otto.Otto) otto.Value {
|
||||
value, _ := vm.ToValue(ocw)
|
||||
obj := value.Object()
|
||||
obj.Set("toNumber", ocw.toNumber)
|
||||
obj.Set("toString", ocw.toString)
|
||||
obj.Set("isPush", ocw.isPush)
|
||||
return value
|
||||
}
|
||||
|
||||
// memoryWrapper provides a JS wrapper around vm.Memory
|
||||
type memoryWrapper struct {
|
||||
memory *vm.Memory
|
||||
}
|
||||
|
||||
// slice returns the requested range of memory as a byte slice
|
||||
func (mw *memoryWrapper) slice(begin, end int64) []byte {
|
||||
return mw.memory.Get(begin, end-begin)
|
||||
}
|
||||
|
||||
// getUint returns the 32 bytes at the specified address interpreted
|
||||
// as an unsigned integer
|
||||
func (mw *memoryWrapper) getUint(addr int64) *big.Int {
|
||||
ret := big.NewInt(0)
|
||||
ret.SetBytes(mw.memory.GetPtr(addr, 32))
|
||||
return ret
|
||||
}
|
||||
|
||||
// toValue returns an otto.Value for the memoryWrapper
|
||||
func (mw *memoryWrapper) toValue(vm *otto.Otto) otto.Value {
|
||||
value, _ := vm.ToValue(mw)
|
||||
obj := value.Object()
|
||||
obj.Set("slice", mw.slice)
|
||||
obj.Set("getUint", mw.getUint)
|
||||
return value
|
||||
}
|
||||
|
||||
// stackWrapper provides a JS wrapper around vm.Stack
|
||||
type stackWrapper struct {
|
||||
stack *vm.Stack
|
||||
}
|
||||
|
||||
// peek returns the nth-from-the-top element of the stack.
|
||||
func (sw *stackWrapper) peek(idx int) *big.Int {
|
||||
return sw.stack.Data()[len(sw.stack.Data())-idx-1]
|
||||
}
|
||||
|
||||
// length returns the length of the stack
|
||||
func (sw *stackWrapper) length() int {
|
||||
return len(sw.stack.Data())
|
||||
}
|
||||
|
||||
// toValue returns an otto.Value for the stackWrapper
|
||||
func (sw *stackWrapper) toValue(vm *otto.Otto) otto.Value {
|
||||
value, _ := vm.ToValue(sw)
|
||||
obj := value.Object()
|
||||
obj.Set("peek", sw.peek)
|
||||
obj.Set("length", sw.length)
|
||||
return value
|
||||
}
|
||||
|
||||
// dbWrapper provides a JS wrapper around vm.Database
|
||||
type dbWrapper struct {
|
||||
db vm.StateDB
|
||||
}
|
||||
|
||||
// getBalance retrieves an account's balance
|
||||
func (dw *dbWrapper) getBalance(addr []byte) *big.Int {
|
||||
return dw.db.GetBalance(common.BytesToAddress(addr))
|
||||
}
|
||||
|
||||
// getNonce retrieves an account's nonce
|
||||
func (dw *dbWrapper) getNonce(addr []byte) uint64 {
|
||||
return dw.db.GetNonce(common.BytesToAddress(addr))
|
||||
}
|
||||
|
||||
// getCode retrieves an account's code
|
||||
func (dw *dbWrapper) getCode(addr []byte) []byte {
|
||||
return dw.db.GetCode(common.BytesToAddress(addr))
|
||||
}
|
||||
|
||||
// getState retrieves an account's state data for the given hash
|
||||
func (dw *dbWrapper) getState(addr []byte, hash common.Hash) common.Hash {
|
||||
return dw.db.GetState(common.BytesToAddress(addr), hash)
|
||||
}
|
||||
|
||||
// exists returns true iff the account exists
|
||||
func (dw *dbWrapper) exists(addr []byte) bool {
|
||||
return dw.db.Exist(common.BytesToAddress(addr))
|
||||
}
|
||||
|
||||
// toValue returns an otto.Value for the dbWrapper
|
||||
func (dw *dbWrapper) toValue(vm *otto.Otto) otto.Value {
|
||||
value, _ := vm.ToValue(dw)
|
||||
obj := value.Object()
|
||||
obj.Set("getBalance", dw.getBalance)
|
||||
obj.Set("getNonce", dw.getNonce)
|
||||
obj.Set("getCode", dw.getCode)
|
||||
obj.Set("getState", dw.getState)
|
||||
obj.Set("exists", dw.exists)
|
||||
return value
|
||||
}
|
||||
|
||||
// contractWrapper provides a JS wrapper around vm.Contract
|
||||
type contractWrapper struct {
|
||||
contract *vm.Contract
|
||||
}
|
||||
|
||||
func (c *contractWrapper) caller() common.Address {
|
||||
return c.contract.Caller()
|
||||
}
|
||||
|
||||
func (c *contractWrapper) address() common.Address {
|
||||
return c.contract.Address()
|
||||
}
|
||||
|
||||
func (c *contractWrapper) value() *big.Int {
|
||||
return c.contract.Value()
|
||||
}
|
||||
|
||||
func (c *contractWrapper) calldata() []byte {
|
||||
return c.contract.Input
|
||||
}
|
||||
|
||||
func (c *contractWrapper) toValue(vm *otto.Otto) otto.Value {
|
||||
value, _ := vm.ToValue(c)
|
||||
obj := value.Object()
|
||||
obj.Set("caller", c.caller)
|
||||
obj.Set("address", c.address)
|
||||
obj.Set("value", c.value)
|
||||
obj.Set("calldata", c.calldata)
|
||||
return value
|
||||
}
|
||||
|
||||
// JavascriptTracer provides an implementation of Tracer that evaluates a
|
||||
// Javascript function for each VM execution step.
|
||||
type JavascriptTracer struct {
|
||||
vm *otto.Otto // Javascript VM instance
|
||||
traceobj *otto.Object // User-supplied object to call
|
||||
op *opCodeWrapper // Wrapper around the VM opcode
|
||||
log map[string]interface{} // (Reusable) map for the `log` arg to `step`
|
||||
logvalue otto.Value // JS view of `log`
|
||||
memory *memoryWrapper // Wrapper around the VM memory
|
||||
stack *stackWrapper // Wrapper around the VM stack
|
||||
db *dbWrapper // Wrapper around the VM environment
|
||||
dbvalue otto.Value // JS view of `db`
|
||||
contract *contractWrapper // Wrapper around the contract object
|
||||
err error // Error, if one has occurred
|
||||
result interface{} // Final result to return to the user
|
||||
}
|
||||
|
||||
// NewJavascriptTracer instantiates a new JavascriptTracer instance.
|
||||
// code specifies a Javascript snippet, which must evaluate to an expression
|
||||
// returning an object with 'step' and 'result' functions.
|
||||
func NewJavascriptTracer(code string) (*JavascriptTracer, error) {
|
||||
vm := otto.New()
|
||||
vm.Interrupt = make(chan func(), 1)
|
||||
|
||||
// Set up builtins for this environment
|
||||
vm.Set("big", &fakeBig{})
|
||||
vm.Set("toHex", hexutil.Encode)
|
||||
|
||||
jstracer, err := vm.Object("(" + code + ")")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Check the required functions exist
|
||||
step, err := jstracer.Get("step")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !step.IsFunction() {
|
||||
return nil, fmt.Errorf("Trace object must expose a function step()")
|
||||
}
|
||||
|
||||
result, err := jstracer.Get("result")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !result.IsFunction() {
|
||||
return nil, fmt.Errorf("Trace object must expose a function result()")
|
||||
}
|
||||
// Create the persistent log object
|
||||
var (
|
||||
op = new(opCodeWrapper)
|
||||
mem = new(memoryWrapper)
|
||||
stack = new(stackWrapper)
|
||||
db = new(dbWrapper)
|
||||
contract = new(contractWrapper)
|
||||
)
|
||||
log := map[string]interface{}{
|
||||
"op": op.toValue(vm),
|
||||
"memory": mem.toValue(vm),
|
||||
"stack": stack.toValue(vm),
|
||||
"contract": contract.toValue(vm),
|
||||
}
|
||||
logvalue, _ := vm.ToValue(log)
|
||||
|
||||
return &JavascriptTracer{
|
||||
vm: vm,
|
||||
traceobj: jstracer,
|
||||
op: op,
|
||||
log: log,
|
||||
logvalue: logvalue,
|
||||
memory: mem,
|
||||
stack: stack,
|
||||
db: db,
|
||||
dbvalue: db.toValue(vm),
|
||||
contract: contract,
|
||||
err: nil,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Stop terminates execution of any JavaScript
|
||||
func (jst *JavascriptTracer) Stop(err error) {
|
||||
jst.vm.Interrupt <- func() {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// callSafely executes a method on a JS object, catching any panics and
|
||||
// returning them as error objects.
|
||||
func (jst *JavascriptTracer) callSafely(method string, argumentList ...interface{}) (ret interface{}, err error) {
|
||||
defer func() {
|
||||
if caught := recover(); caught != nil {
|
||||
switch caught := caught.(type) {
|
||||
case error:
|
||||
err = caught
|
||||
case string:
|
||||
err = errors.New(caught)
|
||||
case fmt.Stringer:
|
||||
err = errors.New(caught.String())
|
||||
default:
|
||||
panic(caught)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
value, err := jst.traceobj.Call(method, argumentList...)
|
||||
ret, _ = value.Export()
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func wrapError(context string, err error) error {
|
||||
var message string
|
||||
switch err := err.(type) {
|
||||
case *otto.Error:
|
||||
message = err.String()
|
||||
default:
|
||||
message = err.Error()
|
||||
}
|
||||
return fmt.Errorf("%v in server-side tracer function '%v'", message, context)
|
||||
}
|
||||
|
||||
// CaptureState implements the Tracer interface to trace a single step of VM execution
|
||||
func (jst *JavascriptTracer) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, memory *vm.Memory, stack *vm.Stack, contract *vm.Contract, depth int, err error) error {
|
||||
if jst.err == nil {
|
||||
jst.op.op = op
|
||||
jst.memory.memory = memory
|
||||
jst.stack.stack = stack
|
||||
jst.db.db = env.StateDB
|
||||
jst.contract.contract = contract
|
||||
|
||||
jst.log["pc"] = pc
|
||||
jst.log["gas"] = gas
|
||||
jst.log["cost"] = cost
|
||||
jst.log["depth"] = depth
|
||||
jst.log["account"] = contract.Address()
|
||||
|
||||
delete(jst.log, "error")
|
||||
if err != nil {
|
||||
jst.log["error"] = err
|
||||
}
|
||||
_, err := jst.callSafely("step", jst.logvalue, jst.dbvalue)
|
||||
if err != nil {
|
||||
jst.err = wrapError("step", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CaptureEnd is called after the call finishes
|
||||
func (jst *JavascriptTracer) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) error {
|
||||
//TODO! @Arachnid please figure out of there's anything we can use this method for
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetResult calls the Javascript 'result' function and returns its value, or any accumulated error
|
||||
func (jst *JavascriptTracer) GetResult() (result interface{}, err error) {
|
||||
if jst.err != nil {
|
||||
return nil, jst.err
|
||||
}
|
||||
|
||||
result, err = jst.callSafely("result")
|
||||
if err != nil {
|
||||
err = wrapError("result", err)
|
||||
}
|
||||
return
|
||||
}
|
@ -196,26 +196,6 @@ web3._extend({
|
||||
call: 'debug_setHead',
|
||||
params: 1
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'traceBlock',
|
||||
call: 'debug_traceBlock',
|
||||
params: 1
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'traceBlockFromFile',
|
||||
call: 'debug_traceBlockFromFile',
|
||||
params: 1
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'traceBlockByNumber',
|
||||
call: 'debug_traceBlockByNumber',
|
||||
params: 1
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'traceBlockByHash',
|
||||
call: 'debug_traceBlockByHash',
|
||||
params: 1
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'seedHash',
|
||||
call: 'debug_seedHash',
|
||||
@ -332,6 +312,30 @@ web3._extend({
|
||||
call: 'debug_writeMemProfile',
|
||||
params: 1
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'traceBlock',
|
||||
call: 'debug_traceBlock',
|
||||
params: 2,
|
||||
inputFormatter: [null, null]
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'traceBlockFromFile',
|
||||
call: 'debug_traceBlockFromFile',
|
||||
params: 2,
|
||||
inputFormatter: [null, null]
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'traceBlockByNumber',
|
||||
call: 'debug_traceBlockByNumber',
|
||||
params: 2,
|
||||
inputFormatter: [null, null]
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'traceBlockByHash',
|
||||
call: 'debug_traceBlockByHash',
|
||||
params: 2,
|
||||
inputFormatter: [null, null]
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'traceTransaction',
|
||||
call: 'debug_traceTransaction',
|
||||
|
@ -127,7 +127,7 @@ func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config) (*state.StateD
|
||||
}
|
||||
block, _ := t.genesis(config).ToBlock()
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
statedb := makePreState(db, t.json.Pre)
|
||||
statedb := MakePreState(db, t.json.Pre)
|
||||
|
||||
post := t.json.Post[subtest.Fork][subtest.Index]
|
||||
msg, err := t.json.Tx.toMessage(post)
|
||||
@ -158,7 +158,7 @@ func (t *StateTest) gasLimit(subtest StateSubtest) uint64 {
|
||||
return t.json.Tx.GasLimit[t.json.Post[subtest.Fork][subtest.Index].Indexes.Gas]
|
||||
}
|
||||
|
||||
func makePreState(db ethdb.Database, accounts core.GenesisAlloc) *state.StateDB {
|
||||
func MakePreState(db ethdb.Database, accounts core.GenesisAlloc) *state.StateDB {
|
||||
sdb := state.NewDatabase(db)
|
||||
statedb, _ := state.New(common.Hash{}, sdb)
|
||||
for addr, a := range accounts {
|
||||
|
@ -80,7 +80,7 @@ type vmExecMarshaling struct {
|
||||
|
||||
func (t *VMTest) Run(vmconfig vm.Config) error {
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
statedb := makePreState(db, t.json.Pre)
|
||||
statedb := MakePreState(db, t.json.Pre)
|
||||
ret, gasRemaining, err := t.exec(statedb, vmconfig)
|
||||
|
||||
if t.json.GasRemaining == nil {
|
||||
|
21
vendor/gopkg.in/olebedev/go-duktape.v3/Gopkg.lock
generated
vendored
Normal file
21
vendor/gopkg.in/olebedev/go-duktape.v3/Gopkg.lock
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
branch = "v1"
|
||||
name = "gopkg.in/check.v1"
|
||||
packages = ["."]
|
||||
revision = "20d25e2804050c1cd24a7eea1e7a6447dd0e74ec"
|
||||
|
||||
[[projects]]
|
||||
branch = "v3"
|
||||
name = "gopkg.in/olebedev/go-duktape.v3"
|
||||
packages = ["."]
|
||||
revision = "391c1c40178e77a6003d889b96e0e41129aeb894"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "043f802c0b40e2622bf784443d3e3959f0d01e9a795e3bfe30a72060dec10c63"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
3
vendor/gopkg.in/olebedev/go-duktape.v3/Gopkg.toml
generated
vendored
Normal file
3
vendor/gopkg.in/olebedev/go-duktape.v3/Gopkg.toml
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
[[constraint]]
|
||||
branch = "v1"
|
||||
name = "gopkg.in/check.v1"
|
21
vendor/gopkg.in/olebedev/go-duktape.v3/LICENSE.md
generated
vendored
Normal file
21
vendor/gopkg.in/olebedev/go-duktape.v3/LICENSE.md
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Oleg Lebedev
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
124
vendor/gopkg.in/olebedev/go-duktape.v3/README.md
generated
vendored
Normal file
124
vendor/gopkg.in/olebedev/go-duktape.v3/README.md
generated
vendored
Normal file
@ -0,0 +1,124 @@
|
||||
# Duktape bindings for Go(Golang)
|
||||
|
||||
[![wercker status](https://app.wercker.com/status/3a5bb2e639a4b4efaf4c8bf7cab7442d/s "wercker status")](https://app.wercker.com/project/bykey/3a5bb2e639a4b4efaf4c8bf7cab7442d)
|
||||
[![Travis status](https://travis-ci.org/olebedev/go-duktape.svg?branch=v3)](https://travis-ci.org/olebedev/go-duktape)
|
||||
[![Appveyor status](https://ci.appveyor.com/api/projects/status/github/olebedev/go-duktape?branch=v3&svg=true)](https://ci.appveyor.com/project/olebedev/go-duktape/branch/v3)
|
||||
[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/olebedev/go-duktape?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
|
||||
|
||||
[Duktape](http://duktape.org/index.html) is a thin, embeddable javascript engine.
|
||||
Most of the [api](http://duktape.org/api.html) is implemented.
|
||||
The exceptions are listed [here](https://github.com/olebedev/go-duktape/blob/master/api.go#L1566).
|
||||
|
||||
### Usage
|
||||
|
||||
The package is fully go-getable, no need to install any external C libraries.
|
||||
So, just type `go get gopkg.in/olebedev/go-duktape.v3` to install.
|
||||
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import "fmt"
|
||||
import "gopkg.in/olebedev/go-duktape.v3"
|
||||
|
||||
func main() {
|
||||
ctx := duktape.New()
|
||||
ctx.PevalString(`2 + 3`)
|
||||
result := ctx.GetNumber(-1)
|
||||
ctx.Pop()
|
||||
fmt.Println("result is:", result)
|
||||
// To prevent memory leaks, don't forget to clean up after
|
||||
// yourself when you're done using a context.
|
||||
ctx.DestroyHeap()
|
||||
}
|
||||
```
|
||||
|
||||
### Go specific notes
|
||||
|
||||
Bindings between Go and Javascript contexts are not fully functional.
|
||||
However, binding a Go function to the Javascript context is available:
|
||||
```go
|
||||
package main
|
||||
|
||||
import "fmt"
|
||||
import "gopkg.in/olebedev/go-duktape.v3"
|
||||
|
||||
func main() {
|
||||
ctx := duktape.New()
|
||||
ctx.PushGlobalGoFunction("log", func(c *duktape.Context) int {
|
||||
fmt.Println(c.SafeToString(-1))
|
||||
return 0
|
||||
})
|
||||
ctx.PevalString(`log('Go lang Go!')`)
|
||||
}
|
||||
```
|
||||
then run it.
|
||||
```bash
|
||||
$ go run *.go
|
||||
Go lang Go!
|
||||
$
|
||||
```
|
||||
|
||||
### Timers
|
||||
|
||||
There is a method to inject timers to the global scope:
|
||||
```go
|
||||
package main
|
||||
|
||||
import "fmt"
|
||||
import "gopkg.in/olebedev/go-duktape.v3"
|
||||
|
||||
func main() {
|
||||
ctx := duktape.New()
|
||||
|
||||
// Let's inject `setTimeout`, `setInterval`, `clearTimeout`,
|
||||
// `clearInterval` into global scope.
|
||||
ctx.PushTimers()
|
||||
|
||||
ch := make(chan string)
|
||||
ctx.PushGlobalGoFunction("second", func(_ *Context) int {
|
||||
ch <- "second step"
|
||||
return 0
|
||||
})
|
||||
ctx.PevalString(`
|
||||
setTimeout(second, 0);
|
||||
print('first step');
|
||||
`)
|
||||
fmt.Println(<-ch)
|
||||
}
|
||||
```
|
||||
then run it
|
||||
```bash
|
||||
$ go run *.go
|
||||
first step
|
||||
second step
|
||||
$
|
||||
```
|
||||
|
||||
Also you can `FlushTimers()`.
|
||||
|
||||
### Command line tool
|
||||
|
||||
Install `go get gopkg.in/olebedev/go-duktape.v3/...`.
|
||||
Execute file.js: `$GOPATH/bin/go-duk file.js`.
|
||||
|
||||
### Benchmarks
|
||||
| prog | time |
|
||||
| ------------|-------|
|
||||
|[otto](https://github.com/robertkrimen/otto)|200.13s|
|
||||
|[anko](https://github.com/mattn/anko)|231.19s|
|
||||
|[agora](https://github.com/PuerkitoBio/agora/)|149.33s|
|
||||
|[GopherLua](https://github.com/yuin/gopher-lua/)|8.39s|
|
||||
|**go-duktape**|**9.80s**|
|
||||
|
||||
More details are [here](https://github.com/olebedev/go-duktape/wiki/Benchmarks).
|
||||
|
||||
### Status
|
||||
|
||||
The package is not fully tested, so be careful.
|
||||
|
||||
|
||||
### Contribution
|
||||
|
||||
Pull requests are welcome! Also, if you want to discuss something send a pull request with proposal and changes.
|
||||
__Convention:__ fork the repository and make changes on your fork in a feature branch.
|
1616
vendor/gopkg.in/olebedev/go-duktape.v3/api.go
generated
vendored
Normal file
1616
vendor/gopkg.in/olebedev/go-duktape.v3/api.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
34
vendor/gopkg.in/olebedev/go-duktape.v3/appveyor.yml
generated
vendored
Normal file
34
vendor/gopkg.in/olebedev/go-duktape.v3/appveyor.yml
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
os: Visual Studio 2015
|
||||
|
||||
clone_folder: C:\gopath\src\gopkg.in/olebedev/go-duktape.v3
|
||||
clone_depth: 5
|
||||
version: "{branch}.{build}"
|
||||
environment:
|
||||
global:
|
||||
GOPATH: C:\gopath
|
||||
CC: gcc.exe
|
||||
matrix:
|
||||
- DUKTAPE_ARCH: amd64
|
||||
MSYS2_ARCH: x86_64
|
||||
MSYS2_BITS: 64
|
||||
MSYSTEM: MINGW64
|
||||
PATH: C:\msys64\mingw64\bin\;C:\Program Files (x86)\NSIS\;%PATH%
|
||||
- DUKTAPE_ARCH: 386
|
||||
MSYS2_ARCH: i686
|
||||
MSYS2_BITS: 32
|
||||
MSYSTEM: MINGW32
|
||||
PATH: C:\msys64\mingw32\bin\;C:\Program Files (x86)\NSIS\;%PATH%
|
||||
|
||||
install:
|
||||
- rmdir C:\go /s /q
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.9.2.windows-%DUKTAPE_ARCH%.zip
|
||||
- 7z x go1.9.2.windows-%DUKTAPE_ARCH%.zip -y -oC:\ > NUL
|
||||
- go version
|
||||
- gcc --version
|
||||
|
||||
build_script:
|
||||
- go get -t
|
||||
- go install ./...
|
||||
|
||||
test_script:
|
||||
- go test ./...
|
121
vendor/gopkg.in/olebedev/go-duktape.v3/conts.go
generated
vendored
Normal file
121
vendor/gopkg.in/olebedev/go-duktape.v3/conts.go
generated
vendored
Normal file
@ -0,0 +1,121 @@
|
||||
package duktape
|
||||
|
||||
const (
|
||||
CompileEval uint = 1 << iota
|
||||
CompileFunction
|
||||
CompileStrict
|
||||
CompileSafe
|
||||
CompileNoResult
|
||||
CompileNoSource
|
||||
CompileStrlen
|
||||
)
|
||||
|
||||
const (
|
||||
TypeNone Type = iota
|
||||
TypeUndefined
|
||||
TypeNull
|
||||
TypeBoolean
|
||||
TypeNumber
|
||||
TypeString
|
||||
TypeObject
|
||||
TypeBuffer
|
||||
TypePointer
|
||||
TypeLightFunc
|
||||
)
|
||||
|
||||
const (
|
||||
TypeMaskNone uint = 1 << iota
|
||||
TypeMaskUndefined
|
||||
TypeMaskNull
|
||||
TypeMaskBoolean
|
||||
TypeMaskNumber
|
||||
TypeMaskString
|
||||
TypeMaskObject
|
||||
TypeMaskBuffer
|
||||
TypeMaskPointer
|
||||
TypeMaskLightFunc
|
||||
)
|
||||
|
||||
const (
|
||||
EnumIncludeNonenumerable uint = 1 << iota
|
||||
EnumIncludeInternal
|
||||
EnumOwnPropertiesOnly
|
||||
EnumArrayIndicesOnly
|
||||
EnumSortArrayIndices
|
||||
NoProxyBehavior
|
||||
)
|
||||
|
||||
const (
|
||||
ErrNone int = 0
|
||||
|
||||
// Internal to Duktape
|
||||
ErrUnimplemented int = 50 + iota
|
||||
ErrUnsupported
|
||||
ErrInternal
|
||||
ErrAlloc
|
||||
ErrAssertion
|
||||
ErrAPI
|
||||
ErrUncaughtError
|
||||
)
|
||||
|
||||
const (
|
||||
// Common prototypes
|
||||
ErrError int = 1 + iota
|
||||
ErrEval
|
||||
ErrRange
|
||||
ErrReference
|
||||
ErrSyntax
|
||||
ErrType
|
||||
ErrURI
|
||||
)
|
||||
|
||||
const (
|
||||
// Returned error values
|
||||
ErrRetUnimplemented int = -(ErrUnimplemented + iota)
|
||||
ErrRetUnsupported
|
||||
ErrRetInternal
|
||||
ErrRetAlloc
|
||||
ErrRetAssertion
|
||||
ErrRetAPI
|
||||
ErrRetUncaughtError
|
||||
)
|
||||
|
||||
const (
|
||||
ErrRetError int = -(ErrError + iota)
|
||||
ErrRetEval
|
||||
ErrRetRange
|
||||
ErrRetReference
|
||||
ErrRetSyntax
|
||||
ErrRetType
|
||||
ErrRetURI
|
||||
)
|
||||
|
||||
const (
|
||||
ExecSuccess = iota
|
||||
ExecError
|
||||
)
|
||||
|
||||
const (
|
||||
LogTrace int = iota
|
||||
LogDebug
|
||||
LogInfo
|
||||
LogWarn
|
||||
LogError
|
||||
LogFatal
|
||||
)
|
||||
|
||||
const (
|
||||
BufobjDuktapeAuffer = 0
|
||||
BufobjNodejsAuffer = 1
|
||||
BufobjArraybuffer = 2
|
||||
BufobjDataview = 3
|
||||
BufobjInt8array = 4
|
||||
BufobjUint8array = 5
|
||||
BufobjUint8clampedarray = 6
|
||||
BufobjInt16array = 7
|
||||
BufobjUint16array = 8
|
||||
BufobjInt32array = 9
|
||||
BufobjUint32array = 10
|
||||
BufobjFloat32array = 11
|
||||
BufobjFloat64array = 12
|
||||
)
|
612
vendor/gopkg.in/olebedev/go-duktape.v3/duk_alloc_pool.c
generated
vendored
Executable file
612
vendor/gopkg.in/olebedev/go-duktape.v3/duk_alloc_pool.c
generated
vendored
Executable file
@ -0,0 +1,612 @@
|
||||
/*
|
||||
* Pool allocator for low memory targets.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <stdint.h>
|
||||
#include <stdarg.h>
|
||||
#include "duktape.h"
|
||||
#include "duk_alloc_pool.h"
|
||||
|
||||
/* Define to enable some debug printfs. */
|
||||
/* #define DUK_ALLOC_POOL_DEBUG */
|
||||
|
||||
/* Define to enable approximate waste tracking. */
|
||||
/* #define DUK_ALLOC_POOL_TRACK_WASTE */
|
||||
|
||||
/* Define to track global highwater for used and waste bytes. VERY SLOW, only
|
||||
* useful for manual testing.
|
||||
*/
|
||||
/* #define DUK_ALLOC_POOL_TRACK_HIGHWATER */
|
||||
|
||||
#if defined(DUK_ALLOC_POOL_ROMPTR_COMPRESSION)
|
||||
#if 0 /* This extern declaration is provided by duktape.h, array provided by duktape.c. */
|
||||
extern const void * const duk_rom_compressed_pointers[];
|
||||
#endif
|
||||
const void *duk_alloc_pool_romptr_low = NULL;
|
||||
const void *duk_alloc_pool_romptr_high = NULL;
|
||||
static void duk__alloc_pool_romptr_init(void);
|
||||
#endif
|
||||
|
||||
#if defined(DUK_USE_HEAPPTR16)
|
||||
void *duk_alloc_pool_ptrcomp_base = NULL;
|
||||
#endif
|
||||
|
||||
#if defined(DUK_ALLOC_POOL_DEBUG)
|
||||
static void duk__alloc_pool_dprintf(const char *fmt, ...) {
|
||||
va_list ap;
|
||||
va_start(ap, fmt);
|
||||
vfprintf(stderr, fmt, ap);
|
||||
va_end(ap);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Pool initialization
|
||||
*/
|
||||
|
||||
void *duk_alloc_pool_init(char *buffer,
|
||||
size_t size,
|
||||
const duk_pool_config *configs,
|
||||
duk_pool_state *states,
|
||||
int num_pools,
|
||||
duk_pool_global *global) {
|
||||
double t_min, t_max, t_curr, x;
|
||||
int step, i, j, n;
|
||||
size_t total;
|
||||
char *p;
|
||||
|
||||
/* XXX: check that 'size' is not too large when using pointer
|
||||
* compression.
|
||||
*/
|
||||
|
||||
/* To optimize pool counts first come up with a 't' which still allows
|
||||
* total pool size to fit within user provided region. After that
|
||||
* sprinkle any remaining bytes to the counts. Binary search with a
|
||||
* fixed step count; last round uses 't_min' as 't_curr' to ensure it
|
||||
* succeeds.
|
||||
*/
|
||||
|
||||
t_min = 0.0; /* Unless config is insane, this should always be "good". */
|
||||
t_max = 1e6;
|
||||
|
||||
for (step = 0; ; step++) {
|
||||
if (step >= 100) {
|
||||
/* Force "known good", rerun config, and break out.
|
||||
* Deals with rounding corner cases where t_curr is
|
||||
* persistently "bad" even though t_min is a valid
|
||||
* solution.
|
||||
*/
|
||||
t_curr = t_min;
|
||||
} else {
|
||||
t_curr = (t_min + t_max) / 2.0;
|
||||
}
|
||||
|
||||
for (i = 0, total = 0; i < num_pools; i++) {
|
||||
states[i].size = configs[i].size;
|
||||
|
||||
/* Target bytes = A*t + B ==> target count = (A*t + B) / block_size.
|
||||
* Rely on A and B being small enough so that 'x' won't wrap.
|
||||
*/
|
||||
x = ((double) configs[i].a * t_curr + (double) configs[i].b) / (double) configs[i].size;
|
||||
|
||||
states[i].count = (unsigned int) x;
|
||||
total += (size_t) states[i].size * (size_t) states[i].count;
|
||||
if (total > size) {
|
||||
goto bad;
|
||||
}
|
||||
}
|
||||
|
||||
/* t_curr is good. */
|
||||
#if defined(DUK_ALLOC_POOL_DEBUG)
|
||||
duk__alloc_pool_dprintf("duk_alloc_pool_init: step=%d, t=[%lf %lf %lf] -> total %ld/%ld (good)\n",
|
||||
step, t_min, t_curr, t_max, (long) total, (long) size);
|
||||
#endif
|
||||
if (step >= 100) {
|
||||
/* Keep state[] initialization state. The state was
|
||||
* created using the highest 't_min'.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
t_min = t_curr;
|
||||
continue;
|
||||
|
||||
bad:
|
||||
/* t_curr is bad. */
|
||||
#if defined(DUK_ALLOC_POOL_DEBUG)
|
||||
duk__alloc_pool_dprintf("duk_alloc_pool_init: step=%d, t=[%lf %lf %lf] -> total %ld/%ld (bad)\n",
|
||||
step, t_min, t_curr, t_max, (long) total, (long) size);
|
||||
#endif
|
||||
|
||||
if (step >= 1000) {
|
||||
/* Cannot find any good solution; shouldn't happen
|
||||
* unless config is bad or 'size' is so small that
|
||||
* even a baseline allocation won't fit.
|
||||
*/
|
||||
return NULL;
|
||||
}
|
||||
t_max = t_curr;
|
||||
/* continue */
|
||||
}
|
||||
|
||||
/* The base configuration is now good; sprinkle any leftovers to
|
||||
* pools in descending order. Note that for good t_curr, 'total'
|
||||
* indicates allocated bytes so far and 'size - total' indicates
|
||||
* leftovers.
|
||||
*/
|
||||
for (i = num_pools - 1; i >= 0; i--) {
|
||||
while (size - total >= states[i].size) {
|
||||
/* Ignore potential wrapping of states[i].count as the count
|
||||
* is 32 bits and shouldn't wrap in practice.
|
||||
*/
|
||||
states[i].count++;
|
||||
total += states[i].size;
|
||||
#if defined(DUK_ALLOC_POOL_DEBUG)
|
||||
duk__alloc_pool_dprintf("duk_alloc_pool_init: sprinkle %ld bytes (%ld left after) to pool index %ld, new count %ld\n",
|
||||
(long) states[i].size, (long) (size - total), (long) i, (long) states[i].count);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
/* Pool counts are final. Allocate the user supplied region based
|
||||
* on the final counts, initialize free lists for each block size,
|
||||
* and otherwise finalize 'state' for use.
|
||||
*/
|
||||
p = buffer;
|
||||
global->num_pools = num_pools;
|
||||
global->states = states;
|
||||
#if defined(DUK_ALLOC_POOL_TRACK_HIGHWATER)
|
||||
#if defined(DUK_ALLOC_POOL_DEBUG)
|
||||
duk__alloc_pool_dprintf("duk_alloc_pool_init: global highwater mark tracking enabled, THIS IS VERY SLOW!\n");
|
||||
#endif
|
||||
global->hwm_used_bytes = 0U;
|
||||
global->hwm_waste_bytes = 0U;
|
||||
#endif
|
||||
#if defined(DUK_ALLOC_POOL_TRACK_WASTE)
|
||||
#if defined(DUK_ALLOC_POOL_DEBUG)
|
||||
duk__alloc_pool_dprintf("duk_alloc_pool_init: approximate waste tracking enabled\n");
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(DUK_USE_HEAPPTR16)
|
||||
/* Register global base value for pointer compression, assumes
|
||||
* a single active pool -4 allows a single subtract to be used and
|
||||
* still ensures no non-NULL pointer encodes to zero.
|
||||
*/
|
||||
duk_alloc_pool_ptrcomp_base = (void *) (p - 4);
|
||||
#endif
|
||||
|
||||
for (i = 0; i < num_pools; i++) {
|
||||
n = (int) states[i].count;
|
||||
if (n > 0) {
|
||||
states[i].first = (duk_pool_free *) p;
|
||||
for (j = 0; j < n; j++) {
|
||||
char *p_next = p + states[i].size;
|
||||
((duk_pool_free *) p)->next =
|
||||
(j == n - 1) ? (duk_pool_free *) NULL : (duk_pool_free *) p_next;
|
||||
p = p_next;
|
||||
}
|
||||
} else {
|
||||
states[i].first = (duk_pool_free *) NULL;
|
||||
}
|
||||
states[i].alloc_end = p;
|
||||
#if defined(DUK_ALLOC_POOL_TRACK_HIGHWATER)
|
||||
states[i].hwm_used_count = 0;
|
||||
#endif
|
||||
/* All members of 'state' now initialized. */
|
||||
|
||||
#if defined(DUK_ALLOC_POOL_DEBUG)
|
||||
duk__alloc_pool_dprintf("duk_alloc_pool_init: block size %5ld, count %5ld, %8ld total bytes, "
|
||||
"end %p\n",
|
||||
(long) states[i].size, (long) states[i].count,
|
||||
(long) states[i].size * (long) states[i].count,
|
||||
(void *) states[i].alloc_end);
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(DUK_ALLOC_POOL_ROMPTR_COMPRESSION)
|
||||
/* ROM pointer compression precomputation. Assumes a single active
|
||||
* pool.
|
||||
*/
|
||||
duk__alloc_pool_romptr_init();
|
||||
#endif
|
||||
|
||||
/* Use 'global' as udata. */
|
||||
return (void *) global;
|
||||
}
|
||||
|
||||
/*
|
||||
* Misc helpers
|
||||
*/
|
||||
|
||||
#if defined(DUK_ALLOC_POOL_TRACK_WASTE)
|
||||
static void duk__alloc_pool_set_waste_marker(void *ptr, size_t used, size_t size) {
|
||||
/* Rely on the base pointer and size being divisible by 4 and thus
|
||||
* aligned. Use 32-bit markers: a 4-byte resolution is good enough,
|
||||
* and comparing 32 bits at a time makes false waste estimates less
|
||||
* likely than when comparing as bytes.
|
||||
*/
|
||||
duk_uint32_t *p, *p_start, *p_end;
|
||||
size_t used_round;
|
||||
|
||||
used_round = (used + 3U) & ~0x03U; /* round up to 4 */
|
||||
p_end = (duk_uint32_t *) ((duk_uint8_t *) ptr + size);
|
||||
p_start = (duk_uint32_t *) ((duk_uint8_t *) ptr + used_round);
|
||||
p = (duk_uint32_t *) p_start;
|
||||
while (p != p_end) {
|
||||
*p++ = DUK_ALLOC_POOL_WASTE_MARKER;
|
||||
}
|
||||
}
|
||||
#else /* DUK_ALLOC_POOL_TRACK_WASTE */
|
||||
static void duk__alloc_pool_set_waste_marker(void *ptr, size_t used, size_t size) {
|
||||
(void) ptr; (void) used; (void) size;
|
||||
}
|
||||
#endif /* DUK_ALLOC_POOL_TRACK_WASTE */
|
||||
|
||||
#if defined(DUK_ALLOC_POOL_TRACK_WASTE)
|
||||
static size_t duk__alloc_pool_get_waste_estimate(void *ptr, size_t size) {
|
||||
duk_uint32_t *p, *p_end, *p_start;
|
||||
|
||||
/* Assumes size is >= 4. */
|
||||
p_start = (duk_uint32_t *) ptr;
|
||||
p_end = (duk_uint32_t *) ((duk_uint8_t *) ptr + size);
|
||||
p = p_end;
|
||||
|
||||
/* This scan may cause harmless valgrind complaints: there may be
|
||||
* uninitialized bytes within the legitimate allocation or between
|
||||
* the start of the waste marker and the end of the allocation.
|
||||
*/
|
||||
do {
|
||||
p--;
|
||||
if (*p == DUK_ALLOC_POOL_WASTE_MARKER) {
|
||||
;
|
||||
} else {
|
||||
return (size_t) (p_end - p - 1) * 4U;
|
||||
}
|
||||
} while (p != p_start);
|
||||
|
||||
return size;
|
||||
}
|
||||
#else /* DUK_ALLOC_POOL_TRACK_WASTE */
|
||||
static size_t duk__alloc_pool_get_waste_estimate(void *ptr, size_t size) {
|
||||
(void) ptr; (void) size;
|
||||
return 0;
|
||||
}
|
||||
#endif /* DUK_ALLOC_POOL_TRACK_WASTE */
|
||||
|
||||
static int duk__alloc_pool_ptr_in_freelist(duk_pool_state *s, void *ptr) {
|
||||
duk_pool_free *curr;
|
||||
|
||||
for (curr = s->first; curr != NULL; curr = curr->next) {
|
||||
if ((void *) curr == ptr) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void duk_alloc_pool_get_pool_stats(duk_pool_state *s, duk_pool_stats *res) {
|
||||
void *curr;
|
||||
size_t free_count;
|
||||
size_t used_count;
|
||||
size_t waste_bytes;
|
||||
|
||||
curr = s->alloc_end - (s->size * s->count);
|
||||
free_count = 0U;
|
||||
waste_bytes = 0U;
|
||||
while (curr != s->alloc_end) {
|
||||
if (duk__alloc_pool_ptr_in_freelist(s, curr)) {
|
||||
free_count++;
|
||||
} else {
|
||||
waste_bytes += duk__alloc_pool_get_waste_estimate(curr, s->size);
|
||||
}
|
||||
curr = curr + s->size;
|
||||
}
|
||||
used_count = (size_t) (s->count - free_count);
|
||||
|
||||
res->used_count = used_count;
|
||||
res->used_bytes = (size_t) (used_count * s->size);
|
||||
res->free_count = free_count;
|
||||
res->free_bytes = (size_t) (free_count * s->size);
|
||||
res->waste_bytes = waste_bytes;
|
||||
#if defined(DUK_ALLOC_POOL_TRACK_HIGHWATER)
|
||||
res->hwm_used_count = s->hwm_used_count;
|
||||
#else
|
||||
res->hwm_used_count = 0U;
|
||||
#endif
|
||||
}
|
||||
|
||||
void duk_alloc_pool_get_global_stats(duk_pool_global *g, duk_pool_global_stats *res) {
|
||||
int i;
|
||||
size_t total_used = 0U;
|
||||
size_t total_free = 0U;
|
||||
size_t total_waste = 0U;
|
||||
|
||||
for (i = 0; i < g->num_pools; i++) {
|
||||
duk_pool_state *s = &g->states[i];
|
||||
duk_pool_stats stats;
|
||||
|
||||
duk_alloc_pool_get_pool_stats(s, &stats);
|
||||
|
||||
total_used += stats.used_bytes;
|
||||
total_free += stats.free_bytes;
|
||||
total_waste += stats.waste_bytes;
|
||||
}
|
||||
|
||||
res->used_bytes = total_used;
|
||||
res->free_bytes = total_free;
|
||||
res->waste_bytes = total_waste;
|
||||
#if defined(DUK_ALLOC_POOL_TRACK_HIGHWATER)
|
||||
res->hwm_used_bytes = g->hwm_used_bytes;
|
||||
res->hwm_waste_bytes = g->hwm_waste_bytes;
|
||||
#else
|
||||
res->hwm_used_bytes = 0U;
|
||||
res->hwm_waste_bytes = 0U;
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(DUK_ALLOC_POOL_TRACK_HIGHWATER)
|
||||
static void duk__alloc_pool_update_highwater(duk_pool_global *g) {
|
||||
int i;
|
||||
size_t total_used = 0U;
|
||||
size_t total_free = 0U;
|
||||
size_t total_waste = 0U;
|
||||
|
||||
/* Per pool highwater used count, useful to checking if a pool is
|
||||
* too small.
|
||||
*/
|
||||
for (i = 0; i < g->num_pools; i++) {
|
||||
duk_pool_state *s = &g->states[i];
|
||||
duk_pool_stats stats;
|
||||
|
||||
duk_alloc_pool_get_pool_stats(s, &stats);
|
||||
if (stats.used_count > s->hwm_used_count) {
|
||||
#if defined(DUK_ALLOC_POOL_DEBUG)
|
||||
duk__alloc_pool_dprintf("duk__alloc_pool_update_highwater: pool %ld (%ld bytes) highwater updated: count %ld -> %ld\n",
|
||||
(long) i, (long) s->size,
|
||||
(long) s->hwm_used_count, (long) stats.used_count);
|
||||
#endif
|
||||
s->hwm_used_count = stats.used_count;
|
||||
}
|
||||
|
||||
total_used += stats.used_bytes;
|
||||
total_free += stats.free_bytes;
|
||||
total_waste += stats.waste_bytes;
|
||||
}
|
||||
|
||||
/* Global highwater mark for used and waste bytes. Both fields are
|
||||
* updated from the same snapshot based on highest used count.
|
||||
* This is VERY, VERY slow and only useful for development.
|
||||
* (Note that updating HWM states for pools individually and then
|
||||
* summing them won't create a consistent global snapshot. There
|
||||
* are still easy ways to make this much, much faster.)
|
||||
*/
|
||||
if (total_used > g->hwm_used_bytes) {
|
||||
#if defined(DUK_ALLOC_POOL_DEBUG)
|
||||
duk__alloc_pool_dprintf("duk__alloc_pool_update_highwater: global highwater updated: used=%ld, bytes=%ld -> "
|
||||
"used=%ld, bytes=%ld\n",
|
||||
(long) g->hwm_used_bytes, (long) g->hwm_waste_bytes,
|
||||
(long) total_used, (long) total_waste);
|
||||
#endif
|
||||
g->hwm_used_bytes = total_used;
|
||||
g->hwm_waste_bytes = total_waste;
|
||||
}
|
||||
}
|
||||
#else /* DUK_ALLOC_POOL_TRACK_HIGHWATER */
|
||||
static void duk__alloc_pool_update_highwater(duk_pool_global *g) {
|
||||
(void) g;
|
||||
}
|
||||
#endif /* DUK_ALLOC_POOL_TRACK_HIGHWATER */
|
||||
|
||||
/*
|
||||
* Allocation providers
|
||||
*/
|
||||
|
||||
void *duk_alloc_pool(void *udata, duk_size_t size) {
|
||||
duk_pool_global *g = (duk_pool_global *) udata;
|
||||
int i, n;
|
||||
|
||||
#if defined(DUK_ALLOC_POOL_DEBUG)
|
||||
duk__alloc_pool_dprintf("duk_alloc_pool: %p %ld\n", udata, (long) size);
|
||||
#endif
|
||||
|
||||
if (size == 0) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (i = 0, n = g->num_pools; i < n; i++) {
|
||||
duk_pool_state *st = g->states + i;
|
||||
|
||||
if (size <= st->size) {
|
||||
duk_pool_free *res = st->first;
|
||||
if (res != NULL) {
|
||||
st->first = res->next;
|
||||
duk__alloc_pool_set_waste_marker((void *) res, size, st->size);
|
||||
duk__alloc_pool_update_highwater(g);
|
||||
return (void *) res;
|
||||
}
|
||||
}
|
||||
|
||||
/* Allocation doesn't fit or no free entries, try to borrow
|
||||
* from the next block size. There's no support for preventing
|
||||
* a borrow at present.
|
||||
*/
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void *duk_realloc_pool(void *udata, void *ptr, duk_size_t size) {
|
||||
duk_pool_global *g = (duk_pool_global *) udata;
|
||||
int i, j, n;
|
||||
|
||||
#if defined(DUK_ALLOC_POOL_DEBUG)
|
||||
duk__alloc_pool_dprintf("duk_realloc_pool: %p %p %ld\n", udata, ptr, (long) size);
|
||||
#endif
|
||||
|
||||
if (ptr == NULL) {
|
||||
return duk_alloc_pool(udata, size);
|
||||
}
|
||||
if (size == 0) {
|
||||
duk_free_pool(udata, ptr);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Non-NULL pointers are necessarily from the pool so we should
|
||||
* always be able to find the allocation.
|
||||
*/
|
||||
|
||||
for (i = 0, n = g->num_pools; i < n; i++) {
|
||||
duk_pool_state *st = g->states + i;
|
||||
char *new_ptr;
|
||||
|
||||
/* Because 'ptr' is assumed to be in the pool and pools are
|
||||
* allocated in sequence, it suffices to check for end pointer
|
||||
* only.
|
||||
*/
|
||||
if ((char *) ptr >= st->alloc_end) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (size <= st->size) {
|
||||
/* Allocation still fits existing allocation. Check if
|
||||
* we can shrink the allocation to a smaller block size
|
||||
* (smallest possible).
|
||||
*/
|
||||
for (j = 0; j < i; j++) {
|
||||
duk_pool_state *st2 = g->states + j;
|
||||
|
||||
if (size <= st2->size) {
|
||||
new_ptr = (char *) st2->first;
|
||||
if (new_ptr != NULL) {
|
||||
#if defined(DUK_ALLOC_POOL_DEBUG)
|
||||
duk__alloc_pool_dprintf("duk_realloc_pool: shrink, block size %ld -> %ld\n",
|
||||
(long) st->size, (long) st2->size);
|
||||
#endif
|
||||
st2->first = ((duk_pool_free *) new_ptr)->next;
|
||||
memcpy((void *) new_ptr, (const void *) ptr, (size_t) size);
|
||||
((duk_pool_free *) ptr)->next = st->first;
|
||||
st->first = (duk_pool_free *) ptr;
|
||||
duk__alloc_pool_set_waste_marker((void *) new_ptr, size, st2->size);
|
||||
duk__alloc_pool_update_highwater(g);
|
||||
return (void *) new_ptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Failed to shrink; return existing pointer. */
|
||||
duk__alloc_pool_set_waste_marker((void *) ptr, size, st->size);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
/* Find first free larger block. */
|
||||
for (j = i + 1; j < n; j++) {
|
||||
duk_pool_state *st2 = g->states + j;
|
||||
|
||||
if (size <= st2->size) {
|
||||
new_ptr = (char *) st2->first;
|
||||
if (new_ptr != NULL) {
|
||||
st2->first = ((duk_pool_free *) new_ptr)->next;
|
||||
memcpy((void *) new_ptr, (const void *) ptr, (size_t) st->size);
|
||||
((duk_pool_free *) ptr)->next = st->first;
|
||||
st->first = (duk_pool_free *) ptr;
|
||||
duk__alloc_pool_set_waste_marker((void *) new_ptr, size, st2->size);
|
||||
duk__alloc_pool_update_highwater(g);
|
||||
return (void *) new_ptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Failed to resize. */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* We should never be here because 'ptr' should be a valid pool
|
||||
* entry and thus always found above.
|
||||
*/
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void duk_free_pool(void *udata, void *ptr) {
|
||||
duk_pool_global *g = (duk_pool_global *) udata;
|
||||
int i, n;
|
||||
|
||||
#if defined(DUK_ALLOC_POOL_DEBUG)
|
||||
duk__alloc_pool_dprintf("duk_free_pool: %p %p\n", udata, ptr);
|
||||
#endif
|
||||
|
||||
if (ptr == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0, n = g->num_pools; i < n; i++) {
|
||||
duk_pool_state *st = g->states + i;
|
||||
|
||||
/* Enough to check end address only. */
|
||||
if ((char *) ptr >= st->alloc_end) {
|
||||
continue;
|
||||
}
|
||||
|
||||
((duk_pool_free *) ptr)->next = st->first;
|
||||
st->first = (duk_pool_free *) ptr;
|
||||
#if 0 /* never necessary when freeing */
|
||||
duk__alloc_pool_update_highwater(g);
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
/* We should never be here because 'ptr' should be a valid pool
|
||||
* entry and thus always found above.
|
||||
*/
|
||||
}
|
||||
|
||||
/*
|
||||
* Pointer compression
|
||||
*/
|
||||
|
||||
#if defined(DUK_ALLOC_POOL_ROMPTR_COMPRESSION)
|
||||
static void duk__alloc_pool_romptr_init(void) {
|
||||
/* Scan ROM pointer range for faster detection of "is 'p' a ROM pointer"
|
||||
* later on.
|
||||
*/
|
||||
const void * const * ptrs = (const void * const *) duk_rom_compressed_pointers;
|
||||
duk_alloc_pool_romptr_low = duk_alloc_pool_romptr_high = (const void *) *ptrs;
|
||||
while (*ptrs) {
|
||||
if (*ptrs > duk_alloc_pool_romptr_high) {
|
||||
duk_alloc_pool_romptr_high = (const void *) *ptrs;
|
||||
}
|
||||
if (*ptrs < duk_alloc_pool_romptr_low) {
|
||||
duk_alloc_pool_romptr_low = (const void *) *ptrs;
|
||||
}
|
||||
ptrs++;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Encode/decode functions are defined in the header to allow inlining. */
|
||||
|
||||
#if defined(DUK_ALLOC_POOL_ROMPTR_COMPRESSION)
|
||||
duk_uint16_t duk_alloc_pool_enc16_rom(void *ptr) {
|
||||
/* The if-condition should be the fastest possible check
|
||||
* for "is 'ptr' in ROM?". If pointer is in ROM, we'd like
|
||||
* to compress it quickly. Here we just scan a ~1K array
|
||||
* which is very bad for performance.
|
||||
*/
|
||||
const void * const * ptrs = duk_rom_compressed_pointers;
|
||||
while (*ptrs) {
|
||||
if (*ptrs == ptr) {
|
||||
return DUK_ALLOC_POOL_ROMPTR_FIRST + (duk_uint16_t) (ptrs - duk_rom_compressed_pointers);
|
||||
}
|
||||
ptrs++;
|
||||
}
|
||||
|
||||
/* We should really never be here: Duktape should only be
|
||||
* compressing pointers which are in the ROM compressed
|
||||
* pointers list, which are known at 'make dist' time.
|
||||
* We go on, causing a pointer compression error.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
#endif
|
223
vendor/gopkg.in/olebedev/go-duktape.v3/duk_alloc_pool.h
generated
vendored
Executable file
223
vendor/gopkg.in/olebedev/go-duktape.v3/duk_alloc_pool.h
generated
vendored
Executable file
@ -0,0 +1,223 @@
|
||||
#if !defined(DUK_ALLOC_POOL_H_INCLUDED)
|
||||
#define DUK_ALLOC_POOL_H_INCLUDED
|
||||
|
||||
#include "duktape.h"
|
||||
|
||||
/* 32-bit (big endian) marker used at the end of pool entries so that wasted
|
||||
* space can be detected. Waste tracking must be enabled explicitly.
|
||||
*/
|
||||
#if defined(DUK_ALLOC_POOL_TRACK_WASTE)
|
||||
#define DUK_ALLOC_POOL_WASTE_MARKER 0xedcb2345UL
|
||||
#endif
|
||||
|
||||
/* Pointer compression with ROM strings/objects:
|
||||
*
|
||||
* For now, use DUK_USE_ROM_OBJECTS to signal the need for compressed ROM
|
||||
* pointers. DUK_USE_ROM_PTRCOMP_FIRST is provided for the ROM pointer
|
||||
* compression range minimum to avoid duplication in user code.
|
||||
*/
|
||||
#if defined(DUK_USE_ROM_OBJECTS) && defined(DUK_USE_HEAPPTR16)
|
||||
#define DUK_ALLOC_POOL_ROMPTR_COMPRESSION
|
||||
#define DUK_ALLOC_POOL_ROMPTR_FIRST DUK_USE_ROM_PTRCOMP_FIRST
|
||||
|
||||
/* This extern declaration is provided by duktape.h, array provided by duktape.c.
|
||||
* Because duk_config.h may include this file (to get the inline functions) we
|
||||
* need to forward declare this also here.
|
||||
*/
|
||||
extern const void * const duk_rom_compressed_pointers[];
|
||||
#endif
|
||||
|
||||
/* Pool configuration for a certain block size. */
|
||||
typedef struct {
|
||||
unsigned int size; /* must be divisible by 4 and >= sizeof(void *) */
|
||||
unsigned int a; /* bytes (not count) to allocate: a*t + b, t is an arbitrary scale parameter */
|
||||
unsigned int b;
|
||||
} duk_pool_config;
|
||||
|
||||
/* Freelist entry, must fit into the smallest block size. */
|
||||
struct duk_pool_free;
|
||||
typedef struct duk_pool_free duk_pool_free;
|
||||
struct duk_pool_free {
|
||||
duk_pool_free *next;
|
||||
};
|
||||
|
||||
/* Pool state for a certain block size. */
|
||||
typedef struct {
|
||||
duk_pool_free *first;
|
||||
char *alloc_end;
|
||||
unsigned int size;
|
||||
unsigned int count;
|
||||
#if defined(DUK_ALLOC_POOL_TRACK_HIGHWATER)
|
||||
unsigned int hwm_used_count;
|
||||
#endif
|
||||
} duk_pool_state;
|
||||
|
||||
/* Statistics for a certain pool. */
|
||||
typedef struct {
|
||||
size_t used_count;
|
||||
size_t used_bytes;
|
||||
size_t free_count;
|
||||
size_t free_bytes;
|
||||
size_t waste_bytes;
|
||||
size_t hwm_used_count;
|
||||
} duk_pool_stats;
|
||||
|
||||
/* Top level state for all pools. Pointer to this struct is used as the allocator
|
||||
* userdata pointer.
|
||||
*/
|
||||
typedef struct {
|
||||
int num_pools;
|
||||
duk_pool_state *states;
|
||||
#if defined(DUK_ALLOC_POOL_TRACK_HIGHWATER)
|
||||
size_t hwm_used_bytes;
|
||||
size_t hwm_waste_bytes;
|
||||
#endif
|
||||
} duk_pool_global;
|
||||
|
||||
/* Statistics for the entire set of pools. */
|
||||
typedef struct {
|
||||
size_t used_bytes;
|
||||
size_t free_bytes;
|
||||
size_t waste_bytes;
|
||||
size_t hwm_used_bytes;
|
||||
size_t hwm_waste_bytes;
|
||||
} duk_pool_global_stats;
|
||||
|
||||
/* Initialize a pool allocator, arguments:
|
||||
* - buffer and size: continuous region to use for pool, must align to 4
|
||||
* - config: configuration for pools in ascending block size
|
||||
* - state: state for pools, matches config order
|
||||
* - num_pools: number of entries in 'config' and 'state'
|
||||
* - global: global state structure
|
||||
*
|
||||
* The 'config', 'state', and 'global' pointers must be valid beyond the init
|
||||
* call, as long as the pool is used.
|
||||
*
|
||||
* Returns a void pointer to be used as userdata for the allocator functions.
|
||||
* Concretely the return value will be "(void *) global", i.e. the global
|
||||
* state struct. If pool init fails, the return value will be NULL.
|
||||
*/
|
||||
void *duk_alloc_pool_init(char *buffer,
|
||||
size_t size,
|
||||
const duk_pool_config *configs,
|
||||
duk_pool_state *states,
|
||||
int num_pools,
|
||||
duk_pool_global *global);
|
||||
|
||||
/* Duktape allocation providers. Typing matches Duktape requirements. */
|
||||
void *duk_alloc_pool(void *udata, duk_size_t size);
|
||||
void *duk_realloc_pool(void *udata, void *ptr, duk_size_t size);
|
||||
void duk_free_pool(void *udata, void *ptr);
|
||||
|
||||
/* Stats. */
|
||||
void duk_alloc_pool_get_pool_stats(duk_pool_state *s, duk_pool_stats *res);
|
||||
void duk_alloc_pool_get_global_stats(duk_pool_global *g, duk_pool_global_stats *res);
|
||||
|
||||
/* Duktape pointer compression global state (assumes single pool). */
|
||||
#if defined(DUK_USE_ROM_OBJECTS) && defined(DUK_USE_HEAPPTR16)
|
||||
extern const void *duk_alloc_pool_romptr_low;
|
||||
extern const void *duk_alloc_pool_romptr_high;
|
||||
duk_uint16_t duk_alloc_pool_enc16_rom(void *ptr);
|
||||
#endif
|
||||
#if defined(DUK_USE_HEAPPTR16)
|
||||
extern void *duk_alloc_pool_ptrcomp_base;
|
||||
#endif
|
||||
|
||||
#if 0
|
||||
duk_uint16_t duk_alloc_pool_enc16(void *ptr);
|
||||
void *duk_alloc_pool_dec16(duk_uint16_t val);
|
||||
#endif
|
||||
|
||||
/* Inlined pointer compression functions. Gcc and clang -Os won't in
|
||||
* practice inline these without an "always inline" attribute because it's
|
||||
* more size efficient (by a few kB) to use explicit calls instead. Having
|
||||
* these defined inline here allows performance optimized builds to inline
|
||||
* pointer compression operations.
|
||||
*
|
||||
* Pointer compression assumes there's a single globally registered memory
|
||||
* pool which makes pointer compression more efficient. This would be easy
|
||||
* to fix by adding a userdata pointer to the compression functions and
|
||||
* plumbing the heap userdata from the compression/decompression macros.
|
||||
*/
|
||||
|
||||
/* DUK_ALWAYS_INLINE is not a public API symbol so it may go away in even a
|
||||
* minor update. But it's pragmatic for this extra because it handles many
|
||||
* compilers via duk_config.h detection. Check that the macro exists so that
|
||||
* if it's gone, we can still compile.
|
||||
*/
|
||||
#if defined(DUK_ALWAYS_INLINE)
|
||||
#define DUK__ALLOC_POOL_ALWAYS_INLINE DUK_ALWAYS_INLINE
|
||||
#else
|
||||
#define DUK__ALLOC_POOL_ALWAYS_INLINE /* nop */
|
||||
#endif
|
||||
|
||||
#if defined(DUK_USE_HEAPPTR16)
|
||||
static DUK__ALLOC_POOL_ALWAYS_INLINE duk_uint16_t duk_alloc_pool_enc16(void *ptr) {
|
||||
if (ptr == NULL) {
|
||||
/* With 'return 0' gcc and clang -Os generate inefficient code.
|
||||
* For example, gcc -Os generates:
|
||||
*
|
||||
* 0804911d <duk_alloc_pool_enc16>:
|
||||
* 804911d: 55 push %ebp
|
||||
* 804911e: 85 c0 test %eax,%eax
|
||||
* 8049120: 89 e5 mov %esp,%ebp
|
||||
* 8049122: 74 0b je 804912f <duk_alloc_pool_enc16+0x12>
|
||||
* 8049124: 2b 05 e4 90 07 08 sub 0x80790e4,%eax
|
||||
* 804912a: c1 e8 02 shr $0x2,%eax
|
||||
* 804912d: eb 02 jmp 8049131 <duk_alloc_pool_enc16+0x14>
|
||||
* 804912f: 31 c0 xor %eax,%eax
|
||||
* 8049131: 5d pop %ebp
|
||||
* 8049132: c3 ret
|
||||
*
|
||||
* The NULL path checks %eax for zero; if it is zero, a zero
|
||||
* is unnecessarily loaded into %eax again. The non-zero path
|
||||
* has an unnecessary jump as a side effect of this.
|
||||
*
|
||||
* Using 'return (duk_uint16_t) (intptr_t) ptr;' generates similarly
|
||||
* inefficient code; not sure how to make the result better.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
#if defined(DUK_ALLOC_POOL_ROMPTR_COMPRESSION)
|
||||
if (ptr >= duk_alloc_pool_romptr_low && ptr <= duk_alloc_pool_romptr_high) {
|
||||
/* This is complex enough now to need a separate function. */
|
||||
return duk_alloc_pool_enc16_rom(ptr);
|
||||
}
|
||||
#endif
|
||||
return (duk_uint16_t) (((size_t) ((char *) ptr - (char *) duk_alloc_pool_ptrcomp_base)) >> 2);
|
||||
}
|
||||
|
||||
static DUK__ALLOC_POOL_ALWAYS_INLINE void *duk_alloc_pool_dec16(duk_uint16_t val) {
|
||||
if (val == 0) {
|
||||
/* As with enc16 the gcc and clang -Os output is inefficient,
|
||||
* e.g. gcc -Os:
|
||||
*
|
||||
* 08049133 <duk_alloc_pool_dec16>:
|
||||
* 8049133: 55 push %ebp
|
||||
* 8049134: 66 85 c0 test %ax,%ax
|
||||
* 8049137: 89 e5 mov %esp,%ebp
|
||||
* 8049139: 74 0e je 8049149 <duk_alloc_pool_dec16+0x16>
|
||||
* 804913b: 8b 15 e4 90 07 08 mov 0x80790e4,%edx
|
||||
* 8049141: 0f b7 c0 movzwl %ax,%eax
|
||||
* 8049144: 8d 04 82 lea (%edx,%eax,4),%eax
|
||||
* 8049147: eb 02 jmp 804914b <duk_alloc_pool_dec16+0x18>
|
||||
* 8049149: 31 c0 xor %eax,%eax
|
||||
* 804914b: 5d pop %ebp
|
||||
* 804914c: c3 ret
|
||||
*/
|
||||
return NULL;
|
||||
}
|
||||
#if defined(DUK_ALLOC_POOL_ROMPTR_COMPRESSION)
|
||||
if (val >= DUK_ALLOC_POOL_ROMPTR_FIRST) {
|
||||
/* This is a blind lookup, could check index validity.
|
||||
* Duktape should never decompress a pointer which would
|
||||
* be out-of-bounds here.
|
||||
*/
|
||||
return (void *) (intptr_t) (duk_rom_compressed_pointers[val - DUK_ALLOC_POOL_ROMPTR_FIRST]);
|
||||
}
|
||||
#endif
|
||||
return (void *) ((char *) duk_alloc_pool_ptrcomp_base + (((size_t) val) << 2));
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* DUK_ALLOC_POOL_H_INCLUDED */
|
3672
vendor/gopkg.in/olebedev/go-duktape.v3/duk_config.h
generated
vendored
Executable file
3672
vendor/gopkg.in/olebedev/go-duktape.v3/duk_config.h
generated
vendored
Executable file
File diff suppressed because it is too large
Load Diff
163
vendor/gopkg.in/olebedev/go-duktape.v3/duk_console.c
generated
vendored
Executable file
163
vendor/gopkg.in/olebedev/go-duktape.v3/duk_console.c
generated
vendored
Executable file
@ -0,0 +1,163 @@
|
||||
/*
|
||||
* Minimal 'console' binding.
|
||||
*
|
||||
* https://github.com/DeveloperToolsWG/console-object/blob/master/api.md
|
||||
* https://developers.google.com/web/tools/chrome-devtools/debug/console/console-reference
|
||||
* https://developer.mozilla.org/en/docs/Web/API/console
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdarg.h>
|
||||
#include "duktape.h"
|
||||
#include "duk_console.h"
|
||||
|
||||
/* XXX: Add some form of log level filtering. */
|
||||
|
||||
/* XXX: For now logs everything to stdout, V8/Node.js logs debug/info level
|
||||
* to stdout, warn and above to stderr. Should this extra do the same?
|
||||
*/
|
||||
|
||||
/* XXX: Should all output be written via e.g. console.write(formattedMsg)?
|
||||
* This would make it easier for user code to redirect all console output
|
||||
* to a custom backend.
|
||||
*/
|
||||
|
||||
/* XXX: Init console object using duk_def_prop() when that call is available. */
|
||||
|
||||
static duk_ret_t duk__console_log_helper(duk_context *ctx, const char *error_name) {
|
||||
duk_idx_t i, n;
|
||||
duk_uint_t flags;
|
||||
|
||||
flags = (duk_uint_t) duk_get_current_magic(ctx);
|
||||
|
||||
n = duk_get_top(ctx);
|
||||
|
||||
duk_get_global_string(ctx, "console");
|
||||
duk_get_prop_string(ctx, -1, "format");
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
if (duk_check_type_mask(ctx, i, DUK_TYPE_MASK_OBJECT)) {
|
||||
/* Slow path formatting. */
|
||||
duk_dup(ctx, -1); /* console.format */
|
||||
duk_dup(ctx, i);
|
||||
duk_call(ctx, 1);
|
||||
duk_replace(ctx, i); /* arg[i] = console.format(arg[i]); */
|
||||
}
|
||||
}
|
||||
|
||||
duk_pop_2(ctx);
|
||||
|
||||
duk_push_string(ctx, " ");
|
||||
duk_insert(ctx, 0);
|
||||
duk_join(ctx, n);
|
||||
|
||||
if (error_name) {
|
||||
duk_push_error_object(ctx, DUK_ERR_ERROR, "%s", duk_require_string(ctx, -1));
|
||||
duk_push_string(ctx, "name");
|
||||
duk_push_string(ctx, error_name);
|
||||
duk_def_prop(ctx, -3, DUK_DEFPROP_FORCE | DUK_DEFPROP_HAVE_VALUE); /* to get e.g. 'Trace: 1 2 3' */
|
||||
duk_get_prop_string(ctx, -1, "stack");
|
||||
}
|
||||
|
||||
fprintf(stdout, "%s\n", duk_to_string(ctx, -1));
|
||||
if (flags & DUK_CONSOLE_FLUSH) {
|
||||
fflush(stdout);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static duk_ret_t duk__console_assert(duk_context *ctx) {
|
||||
if (duk_to_boolean(ctx, 0)) {
|
||||
return 0;
|
||||
}
|
||||
duk_remove(ctx, 0);
|
||||
|
||||
return duk__console_log_helper(ctx, "AssertionError");
|
||||
}
|
||||
|
||||
static duk_ret_t duk__console_log(duk_context *ctx) {
|
||||
return duk__console_log_helper(ctx, NULL);
|
||||
}
|
||||
|
||||
static duk_ret_t duk__console_trace(duk_context *ctx) {
|
||||
return duk__console_log_helper(ctx, "Trace");
|
||||
}
|
||||
|
||||
static duk_ret_t duk__console_info(duk_context *ctx) {
|
||||
return duk__console_log_helper(ctx, NULL);
|
||||
}
|
||||
|
||||
static duk_ret_t duk__console_warn(duk_context *ctx) {
|
||||
return duk__console_log_helper(ctx, NULL);
|
||||
}
|
||||
|
||||
static duk_ret_t duk__console_error(duk_context *ctx) {
|
||||
return duk__console_log_helper(ctx, "Error");
|
||||
}
|
||||
|
||||
static duk_ret_t duk__console_dir(duk_context *ctx) {
|
||||
/* For now, just share the formatting of .log() */
|
||||
return duk__console_log_helper(ctx, 0);
|
||||
}
|
||||
|
||||
static void duk__console_reg_vararg_func(duk_context *ctx, duk_c_function func, const char *name, duk_uint_t flags) {
|
||||
duk_push_c_function(ctx, func, DUK_VARARGS);
|
||||
duk_push_string(ctx, "name");
|
||||
duk_push_string(ctx, name);
|
||||
duk_def_prop(ctx, -3, DUK_DEFPROP_HAVE_VALUE | DUK_DEFPROP_FORCE); /* Improve stacktraces by displaying function name */
|
||||
duk_set_magic(ctx, -1, (duk_int_t) flags);
|
||||
duk_put_prop_string(ctx, -2, name);
|
||||
}
|
||||
|
||||
void duk_console_init(duk_context *ctx, duk_uint_t flags) {
|
||||
duk_push_object(ctx);
|
||||
|
||||
/* Custom function to format objects; user can replace.
|
||||
* For now, try JX-formatting and if that fails, fall back
|
||||
* to ToString(v).
|
||||
*/
|
||||
duk_eval_string(ctx,
|
||||
"(function (E) {"
|
||||
"return function format(v){"
|
||||
"try{"
|
||||
"return E('jx',v);"
|
||||
"}catch(e){"
|
||||
"return String(v);" /* String() allows symbols, ToString() internal algorithm doesn't. */
|
||||
"}"
|
||||
"};"
|
||||
"})(Duktape.enc)");
|
||||
duk_put_prop_string(ctx, -2, "format");
|
||||
|
||||
duk__console_reg_vararg_func(ctx, duk__console_assert, "assert", flags);
|
||||
duk__console_reg_vararg_func(ctx, duk__console_log, "log", flags);
|
||||
duk__console_reg_vararg_func(ctx, duk__console_log, "debug", flags); /* alias to console.log */
|
||||
duk__console_reg_vararg_func(ctx, duk__console_trace, "trace", flags);
|
||||
duk__console_reg_vararg_func(ctx, duk__console_info, "info", flags);
|
||||
duk__console_reg_vararg_func(ctx, duk__console_warn, "warn", flags);
|
||||
duk__console_reg_vararg_func(ctx, duk__console_error, "error", flags);
|
||||
duk__console_reg_vararg_func(ctx, duk__console_error, "exception", flags); /* alias to console.error */
|
||||
duk__console_reg_vararg_func(ctx, duk__console_dir, "dir", flags);
|
||||
|
||||
duk_put_global_string(ctx, "console");
|
||||
|
||||
/* Proxy wrapping: ensures any undefined console method calls are
|
||||
* ignored silently. This is required specifically by the
|
||||
* DeveloperToolsWG proposal (and is implemented also by Firefox:
|
||||
* https://bugzilla.mozilla.org/show_bug.cgi?id=629607).
|
||||
*/
|
||||
|
||||
if (flags & DUK_CONSOLE_PROXY_WRAPPER) {
|
||||
/* Tolerate errors: Proxy may be disabled. */
|
||||
duk_peval_string_noresult(ctx,
|
||||
"(function(){"
|
||||
"var D=function(){};"
|
||||
"console=new Proxy(console,{"
|
||||
"get:function(t,k){"
|
||||
"var v=t[k];"
|
||||
"return typeof v==='function'?v:D;"
|
||||
"}"
|
||||
"});"
|
||||
"})();"
|
||||
);
|
||||
}
|
||||
}
|
14
vendor/gopkg.in/olebedev/go-duktape.v3/duk_console.h
generated
vendored
Executable file
14
vendor/gopkg.in/olebedev/go-duktape.v3/duk_console.h
generated
vendored
Executable file
@ -0,0 +1,14 @@
|
||||
#if !defined(DUK_CONSOLE_H_INCLUDED)
|
||||
#define DUK_CONSOLE_H_INCLUDED
|
||||
|
||||
#include "duktape.h"
|
||||
|
||||
/* Use a proxy wrapper to make undefined methods (console.foo()) no-ops. */
|
||||
#define DUK_CONSOLE_PROXY_WRAPPER (1 << 0)
|
||||
|
||||
/* Flush output after every call. */
|
||||
#define DUK_CONSOLE_FLUSH (1 << 1)
|
||||
|
||||
extern void duk_console_init(duk_context *ctx, duk_uint_t flags);
|
||||
|
||||
#endif /* DUK_CONSOLE_H_INCLUDED */
|
380
vendor/gopkg.in/olebedev/go-duktape.v3/duk_logging.c
generated
vendored
Executable file
380
vendor/gopkg.in/olebedev/go-duktape.v3/duk_logging.c
generated
vendored
Executable file
@ -0,0 +1,380 @@
|
||||
/*
|
||||
* Logging support
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <stdarg.h>
|
||||
#include "duktape.h"
|
||||
#include "duk_logging.h"
|
||||
|
||||
/* XXX: uses stderr always for now, configurable? */
|
||||
|
||||
#define DUK_LOGGING_FLUSH /* Duktape 1.x: flush stderr */
|
||||
|
||||
/* 3-letter log level strings. */
|
||||
static const char duk__log_level_strings[] = {
|
||||
'T', 'R', 'C', 'D', 'B', 'G', 'I', 'N', 'F',
|
||||
'W', 'R', 'N', 'E', 'R', 'R', 'F', 'T', 'L'
|
||||
};
|
||||
|
||||
/* Log method names. */
|
||||
static const char *duk__log_method_names[] = {
|
||||
"trace", "debug", "info", "warn", "error", "fatal"
|
||||
};
|
||||
|
||||
/* Constructor. */
|
||||
static duk_ret_t duk__logger_constructor(duk_context *ctx) {
|
||||
duk_idx_t nargs;
|
||||
|
||||
/* Calling as a non-constructor is not meaningful. */
|
||||
if (!duk_is_constructor_call(ctx)) {
|
||||
return DUK_RET_TYPE_ERROR;
|
||||
}
|
||||
|
||||
nargs = duk_get_top(ctx);
|
||||
duk_set_top(ctx, 1);
|
||||
|
||||
duk_push_this(ctx);
|
||||
|
||||
/* [ name this ] */
|
||||
|
||||
if (nargs == 0) {
|
||||
/* Automatic defaulting of logger name from caller. This
|
||||
* would work poorly with tail calls, but constructor calls
|
||||
* are currently never tail calls, so tail calls are not an
|
||||
* issue now.
|
||||
*/
|
||||
|
||||
duk_inspect_callstack_entry(ctx, -2);
|
||||
if (duk_is_object(ctx, -1)) {
|
||||
if (duk_get_prop_string(ctx, -1, "function")) {
|
||||
if (duk_get_prop_string(ctx, -1, "fileName")) {
|
||||
if (duk_is_string(ctx, -1)) {
|
||||
duk_replace(ctx, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
/* Leave values on stack on purpose, ignored below. */
|
||||
|
||||
/* Stripping the filename might be a good idea
|
||||
* ("/foo/bar/quux.js" -> logger name "quux"),
|
||||
* but now used verbatim.
|
||||
*/
|
||||
}
|
||||
/* The stack is unbalanced here on purpose; we only rely on the
|
||||
* initial two values: [ name this ].
|
||||
*/
|
||||
|
||||
if (duk_is_string(ctx, 0)) {
|
||||
duk_dup(ctx, 0);
|
||||
duk_put_prop_string(ctx, 1, "n");
|
||||
} else {
|
||||
/* don't set 'n' at all, inherited value is used as name */
|
||||
}
|
||||
|
||||
duk_compact(ctx, 1);
|
||||
|
||||
return 0; /* keep default instance */
|
||||
}
|
||||
|
||||
/* Default function to format objects. Tries to use toLogString() but falls
|
||||
* back to toString(). Any errors are propagated out without catching.
|
||||
*/
|
||||
static duk_ret_t duk__logger_prototype_fmt(duk_context *ctx) {
|
||||
if (duk_get_prop_string(ctx, 0, "toLogString")) {
|
||||
/* [ arg toLogString ] */
|
||||
|
||||
duk_dup(ctx, 0);
|
||||
duk_call_method(ctx, 0);
|
||||
|
||||
/* [ arg result ] */
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* [ arg undefined ] */
|
||||
duk_pop(ctx);
|
||||
duk_to_string(ctx, 0);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Default function to write a formatted log line. Writes to stderr,
|
||||
* appending a newline to the log line.
|
||||
*
|
||||
* The argument is a buffer; avoid coercing the buffer to a string to
|
||||
* avoid string table traffic.
|
||||
*/
|
||||
static duk_ret_t duk__logger_prototype_raw(duk_context *ctx) {
|
||||
const char *data;
|
||||
duk_size_t data_len;
|
||||
|
||||
data = (const char *) duk_require_buffer(ctx, 0, &data_len);
|
||||
fwrite((const void *) data, 1, data_len, stderr);
|
||||
fputc((int) '\n', stderr);
|
||||
#if defined(DUK_LOGGING_FLUSH)
|
||||
fflush(stderr);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Log frontend shared helper, magic value indicates log level. Provides
|
||||
* frontend functions: trace(), debug(), info(), warn(), error(), fatal().
|
||||
* This needs to have small footprint, reasonable performance, minimal
|
||||
* memory churn, etc.
|
||||
*/
|
||||
static duk_ret_t duk__logger_prototype_log_shared(duk_context *ctx) {
|
||||
duk_double_t now;
|
||||
duk_time_components comp;
|
||||
duk_small_int_t entry_lev;
|
||||
duk_small_int_t logger_lev;
|
||||
duk_int_t nargs;
|
||||
duk_int_t i;
|
||||
duk_size_t tot_len;
|
||||
const duk_uint8_t *arg_str;
|
||||
duk_size_t arg_len;
|
||||
duk_uint8_t *buf, *p;
|
||||
const duk_uint8_t *q;
|
||||
duk_uint8_t date_buf[32]; /* maximum format length is 24+1 (NUL), round up. */
|
||||
duk_size_t date_len;
|
||||
duk_small_int_t rc;
|
||||
|
||||
/* XXX: sanitize to printable (and maybe ASCII) */
|
||||
/* XXX: better multiline */
|
||||
|
||||
/*
|
||||
* Logger arguments are:
|
||||
*
|
||||
* magic: log level (0-5)
|
||||
* this: logger
|
||||
* stack: plain log args
|
||||
*
|
||||
* We want to minimize memory churn so a two-pass approach
|
||||
* is used: first pass formats arguments and computes final
|
||||
* string length, second pass copies strings into a buffer
|
||||
* allocated directly with the correct size. If the backend
|
||||
* function plays nice, it won't coerce the buffer to a string
|
||||
* (and thus intern it).
|
||||
*/
|
||||
|
||||
entry_lev = duk_get_current_magic(ctx);
|
||||
if (entry_lev < DUK_LOG_TRACE || entry_lev > DUK_LOG_FATAL) {
|
||||
/* Should never happen, check just in case. */
|
||||
return 0;
|
||||
}
|
||||
nargs = duk_get_top(ctx);
|
||||
|
||||
/* [ arg1 ... argN this ] */
|
||||
|
||||
/*
|
||||
* Log level check
|
||||
*/
|
||||
|
||||
duk_push_this(ctx);
|
||||
|
||||
duk_get_prop_string(ctx, -1, "l");
|
||||
logger_lev = (duk_small_int_t) duk_get_int(ctx, -1);
|
||||
if (entry_lev < logger_lev) {
|
||||
return 0;
|
||||
}
|
||||
/* log level could be popped but that's not necessary */
|
||||
|
||||
now = duk_get_now(ctx);
|
||||
duk_time_to_components(ctx, now, &comp);
|
||||
sprintf((char *) date_buf, "%04d-%02d-%02dT%02d:%02d:%02d.%03dZ",
|
||||
(int) comp.year, (int) comp.month + 1, (int) comp.day,
|
||||
(int) comp.hours, (int) comp.minutes, (int) comp.seconds,
|
||||
(int) comp.milliseconds);
|
||||
|
||||
date_len = strlen((const char *) date_buf);
|
||||
|
||||
duk_get_prop_string(ctx, -2, "n");
|
||||
duk_to_string(ctx, -1);
|
||||
|
||||
/* [ arg1 ... argN this loggerLevel loggerName ] */
|
||||
|
||||
/*
|
||||
* Pass 1
|
||||
*/
|
||||
|
||||
/* Line format: <time> <entryLev> <loggerName>: <msg> */
|
||||
|
||||
tot_len = 0;
|
||||
tot_len += 3 + /* separators: space, space, colon */
|
||||
3 + /* level string */
|
||||
date_len + /* time */
|
||||
duk_get_length(ctx, -1); /* loggerName */
|
||||
|
||||
for (i = 0; i < nargs; i++) {
|
||||
/* When formatting an argument to a string, errors may happen from multiple
|
||||
* causes. In general we want to catch obvious errors like a toLogString()
|
||||
* throwing an error, but we don't currently try to catch every possible
|
||||
* error. In particular, internal errors (like out of memory or stack) are
|
||||
* not caught. Also, we expect Error toString() to not throw an error.
|
||||
*/
|
||||
if (duk_is_object(ctx, i)) {
|
||||
/* duk_pcall_prop() may itself throw an error, but we're content
|
||||
* in catching the obvious errors (like toLogString() throwing an
|
||||
* error).
|
||||
*/
|
||||
duk_push_string(ctx, "fmt");
|
||||
duk_dup(ctx, i);
|
||||
/* [ arg1 ... argN this loggerLevel loggerName 'fmt' arg ] */
|
||||
/* call: this.fmt(arg) */
|
||||
rc = duk_pcall_prop(ctx, -5 /*obj_index*/, 1 /*nargs*/);
|
||||
if (rc) {
|
||||
/* Keep the error as the result (coercing it might fail below,
|
||||
* but we don't catch that now).
|
||||
*/
|
||||
;
|
||||
}
|
||||
duk_replace(ctx, i);
|
||||
}
|
||||
(void) duk_to_lstring(ctx, i, &arg_len);
|
||||
tot_len++; /* sep (even before first one) */
|
||||
tot_len += arg_len;
|
||||
}
|
||||
|
||||
/*
|
||||
* Pass 2
|
||||
*/
|
||||
|
||||
/* XXX: Here it'd be nice if we didn't need to allocate a new fixed
|
||||
* buffer for every write. This would be possible if raw() took a
|
||||
* buffer and a length. We could then use a preallocated buffer for
|
||||
* most log writes and request raw() to write a partial buffer.
|
||||
*/
|
||||
|
||||
buf = (duk_uint8_t *) duk_push_fixed_buffer(ctx, tot_len);
|
||||
p = buf;
|
||||
|
||||
memcpy((void *) p, (const void *) date_buf, (size_t) date_len);
|
||||
p += date_len;
|
||||
*p++ = (duk_uint8_t) ' ';
|
||||
|
||||
q = (const duk_uint8_t *) duk__log_level_strings + (entry_lev * 3);
|
||||
memcpy((void *) p, (const void *) q, (size_t) 3);
|
||||
p += 3;
|
||||
|
||||
*p++ = (duk_uint8_t) ' ';
|
||||
|
||||
arg_str = (const duk_uint8_t *) duk_get_lstring(ctx, -2, &arg_len);
|
||||
memcpy((void *) p, (const void *) arg_str, (size_t) arg_len);
|
||||
p += arg_len;
|
||||
|
||||
*p++ = (duk_uint8_t) ':';
|
||||
|
||||
for (i = 0; i < nargs; i++) {
|
||||
*p++ = (duk_uint8_t) ' ';
|
||||
|
||||
arg_str = (const duk_uint8_t *) duk_get_lstring(ctx, i, &arg_len);
|
||||
memcpy((void *) p, (const void *) arg_str, (size_t) arg_len);
|
||||
p += arg_len;
|
||||
}
|
||||
|
||||
/* [ arg1 ... argN this loggerLevel loggerName buffer ] */
|
||||
|
||||
/* Call this.raw(msg); look up through the instance allows user to override
|
||||
* the raw() function in the instance or in the prototype for maximum
|
||||
* flexibility.
|
||||
*/
|
||||
duk_push_string(ctx, "raw");
|
||||
duk_dup(ctx, -2);
|
||||
/* [ arg1 ... argN this loggerLevel loggerName buffer 'raw' buffer ] */
|
||||
duk_call_prop(ctx, -6, 1); /* this.raw(buffer) */
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void duk_log_va(duk_context *ctx, duk_int_t level, const char *fmt, va_list ap) {
|
||||
if (level < 0) {
|
||||
level = 0;
|
||||
} else if (level > (int) (sizeof(duk__log_method_names) / sizeof(const char *)) - 1) {
|
||||
level = (int) (sizeof(duk__log_method_names) / sizeof(const char *)) - 1;
|
||||
}
|
||||
|
||||
duk_push_global_stash(ctx);
|
||||
duk_get_prop_string(ctx, -1, "\xff" "logger:constructor"); /* fixed at init time */
|
||||
duk_get_prop_string(ctx, -1, "clog");
|
||||
duk_get_prop_string(ctx, -1, duk__log_method_names[level]);
|
||||
duk_dup(ctx, -2);
|
||||
duk_push_vsprintf(ctx, fmt, ap);
|
||||
|
||||
/* [ ... stash Logger clog logfunc clog(=this) msg ] */
|
||||
|
||||
duk_call_method(ctx, 1 /*nargs*/);
|
||||
|
||||
/* [ ... stash Logger clog res ] */
|
||||
|
||||
duk_pop_n(ctx, 4);
|
||||
}
|
||||
|
||||
void duk_log(duk_context *ctx, duk_int_t level, const char *fmt, ...) {
|
||||
va_list ap;
|
||||
|
||||
va_start(ap, fmt);
|
||||
duk_log_va(ctx, level, fmt, ap);
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
void duk_logging_init(duk_context *ctx, duk_uint_t flags) {
|
||||
/* XXX: Add .name property for logger functions (useful for stack traces if they throw). */
|
||||
|
||||
(void) flags;
|
||||
|
||||
duk_eval_string(ctx,
|
||||
"(function(cons,prot){"
|
||||
"Object.defineProperty(Duktape,'Logger',{value:cons,writable:true,configurable:true});"
|
||||
"Object.defineProperty(cons,'prototype',{value:prot});"
|
||||
"Object.defineProperty(cons,'clog',{value:new Duktape.Logger('C'),writable:true,configurable:true});"
|
||||
"});");
|
||||
|
||||
duk_push_c_function(ctx, duk__logger_constructor, DUK_VARARGS /*nargs*/); /* Duktape.Logger */
|
||||
duk_push_object(ctx); /* Duktape.Logger.prototype */
|
||||
|
||||
/* [ ... func Duktape.Logger Duktape.Logger.prototype ] */
|
||||
|
||||
duk_push_string(ctx, "name");
|
||||
duk_push_string(ctx, "Logger");
|
||||
duk_def_prop(ctx, -4, DUK_DEFPROP_HAVE_VALUE | DUK_DEFPROP_FORCE);
|
||||
|
||||
duk_dup_top(ctx);
|
||||
duk_put_prop_string(ctx, -2, "constructor");
|
||||
duk_push_int(ctx, 2);
|
||||
duk_put_prop_string(ctx, -2, "l");
|
||||
duk_push_string(ctx, "anon");
|
||||
duk_put_prop_string(ctx, -2, "n");
|
||||
duk_push_c_function(ctx, duk__logger_prototype_fmt, 1 /*nargs*/);
|
||||
duk_put_prop_string(ctx, -2, "fmt");
|
||||
duk_push_c_function(ctx, duk__logger_prototype_raw, 1 /*nargs*/);
|
||||
duk_put_prop_string(ctx, -2, "raw");
|
||||
duk_push_c_function(ctx, duk__logger_prototype_log_shared, DUK_VARARGS /*nargs*/);
|
||||
duk_set_magic(ctx, -1, 0); /* magic=0: trace */
|
||||
duk_put_prop_string(ctx, -2, "trace");
|
||||
duk_push_c_function(ctx, duk__logger_prototype_log_shared, DUK_VARARGS /*nargs*/);
|
||||
duk_set_magic(ctx, -1, 1); /* magic=1: debug */
|
||||
duk_put_prop_string(ctx, -2, "debug");
|
||||
duk_push_c_function(ctx, duk__logger_prototype_log_shared, DUK_VARARGS /*nargs*/);
|
||||
duk_set_magic(ctx, -1, 2); /* magic=2: info */
|
||||
duk_put_prop_string(ctx, -2, "info");
|
||||
duk_push_c_function(ctx, duk__logger_prototype_log_shared, DUK_VARARGS /*nargs*/);
|
||||
duk_set_magic(ctx, -1, 3); /* magic=3: warn */
|
||||
duk_put_prop_string(ctx, -2, "warn");
|
||||
duk_push_c_function(ctx, duk__logger_prototype_log_shared, DUK_VARARGS /*nargs*/);
|
||||
duk_set_magic(ctx, -1, 4); /* magic=4: error */
|
||||
duk_put_prop_string(ctx, -2, "error");
|
||||
duk_push_c_function(ctx, duk__logger_prototype_log_shared, DUK_VARARGS /*nargs*/);
|
||||
duk_set_magic(ctx, -1, 5); /* magic=5: fatal */
|
||||
duk_put_prop_string(ctx, -2, "fatal");
|
||||
|
||||
/* [ ... func Duktape.Logger Duktape.Logger.prototype ] */
|
||||
|
||||
/* XXX: when using ROM built-ins, "Duktape" is read-only by default so
|
||||
* setting Duktape.Logger will now fail.
|
||||
*/
|
||||
|
||||
/* [ ... func Duktape.Logger Duktape.Logger.prototype ] */
|
||||
|
||||
duk_call(ctx, 2);
|
||||
duk_pop(ctx);
|
||||
}
|
20
vendor/gopkg.in/olebedev/go-duktape.v3/duk_logging.h
generated
vendored
Executable file
20
vendor/gopkg.in/olebedev/go-duktape.v3/duk_logging.h
generated
vendored
Executable file
@ -0,0 +1,20 @@
|
||||
#if !defined(DUK_LOGGING_H_INCLUDED)
|
||||
#define DUK_LOGGING_H_INCLUDED
|
||||
|
||||
#include "duktape.h"
|
||||
|
||||
/* Log levels */
|
||||
#define DUK_LOG_TRACE 0
|
||||
#define DUK_LOG_DEBUG 1
|
||||
#define DUK_LOG_INFO 2
|
||||
#define DUK_LOG_WARN 3
|
||||
#define DUK_LOG_ERROR 4
|
||||
#define DUK_LOG_FATAL 5
|
||||
|
||||
/* No flags at the moment. */
|
||||
|
||||
extern void duk_logging_init(duk_context *ctx, duk_uint_t flags);
|
||||
extern void duk_log_va(duk_context *ctx, duk_int_t level, const char *fmt, va_list ap);
|
||||
extern void duk_log(duk_context *ctx, duk_int_t level, const char *fmt, ...);
|
||||
|
||||
#endif /* DUK_LOGGING_H_INCLUDED */
|
312
vendor/gopkg.in/olebedev/go-duktape.v3/duk_minimal_printf.c
generated
vendored
Executable file
312
vendor/gopkg.in/olebedev/go-duktape.v3/duk_minimal_printf.c
generated
vendored
Executable file
@ -0,0 +1,312 @@
|
||||
/*
|
||||
* Minimal vsnprintf(), snprintf(), sprintf(), and sscanf() for Duktape.
|
||||
* The supported conversion formats narrowly match what Duktape needs.
|
||||
*/
|
||||
|
||||
#include <stdarg.h> /* va_list etc */
|
||||
#include <stddef.h> /* size_t */
|
||||
#include <stdint.h> /* SIZE_MAX */
|
||||
|
||||
/* Write character with bound checking. Offset 'off' is updated regardless
|
||||
* of whether an actual write is made. This is necessary to satisfy snprintf()
|
||||
* return value semantics.
|
||||
*/
|
||||
#define DUK__WRITE_CHAR(c) do { \
|
||||
if (off < size) { \
|
||||
str[off] = (char) c; \
|
||||
} \
|
||||
off++; \
|
||||
} while (0)
|
||||
|
||||
/* Digits up to radix 16. */
|
||||
static const char duk__format_digits[16] = {
|
||||
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'
|
||||
};
|
||||
|
||||
/* Format an unsigned long with various options. An unsigned long is large
|
||||
* enough for formatting all supported types.
|
||||
*/
|
||||
static size_t duk__format_long(char *str,
|
||||
size_t size,
|
||||
size_t off,
|
||||
int fixed_length,
|
||||
char pad,
|
||||
int radix,
|
||||
int neg_sign,
|
||||
unsigned long v) {
|
||||
char buf[24]; /* 2^64 = 18446744073709552000, length 20 */
|
||||
char *required;
|
||||
char *p;
|
||||
int i;
|
||||
|
||||
/* Format in reverse order first. Ensure at least one digit is output
|
||||
* to handle '0' correctly. Note that space padding and zero padding
|
||||
* handle negative sign differently:
|
||||
*
|
||||
* %9d and -321 => ' -321'
|
||||
* %09d and -321 => '-00000321'
|
||||
*/
|
||||
|
||||
for (i = 0; i < (int) sizeof(buf); i++) {
|
||||
buf[i] = pad; /* compiles into memset() equivalent, avoid memset() dependency */
|
||||
}
|
||||
|
||||
p = buf;
|
||||
do {
|
||||
*p++ = duk__format_digits[v % radix];
|
||||
v /= radix;
|
||||
} while (v != 0);
|
||||
|
||||
required = buf + fixed_length;
|
||||
if (p < required && pad == (char) '0') {
|
||||
/* Zero padding and we didn't reach maximum length: place
|
||||
* negative sign at the last position. We can't get here
|
||||
* with fixed_length == 0 so that required[-1] is safe.
|
||||
*
|
||||
* Technically we should only do this for 'neg_sign == 1',
|
||||
* but it's OK to advance the pointer even when that's not
|
||||
* the case.
|
||||
*/
|
||||
p = required - 1;
|
||||
}
|
||||
if (neg_sign) {
|
||||
*p++ = (char) '-';
|
||||
}
|
||||
if (p < required) {
|
||||
p = required;
|
||||
}
|
||||
|
||||
/* Now [buf,p[ contains the result in reverse; copy into place. */
|
||||
|
||||
while (p > buf) {
|
||||
p--;
|
||||
DUK__WRITE_CHAR(*p);
|
||||
}
|
||||
|
||||
return off;
|
||||
}
|
||||
|
||||
/* Parse a pointer. Must parse whatever is produced by '%p' in sprintf(). */
|
||||
static int duk__parse_pointer(const char *str, void **out) {
|
||||
const unsigned char *p;
|
||||
unsigned char ch;
|
||||
int count;
|
||||
int limit;
|
||||
long val; /* assume void * fits into long */
|
||||
|
||||
/* We only need to parse what our minimal printf() produces, so that
|
||||
* we can check for a '0x' prefix, and assume all hex digits are
|
||||
* lowercase.
|
||||
*/
|
||||
|
||||
p = (const unsigned char *) str;
|
||||
if (p[0] != (unsigned char) '0' || p[1] != (unsigned char) 'x') {
|
||||
return 0;
|
||||
}
|
||||
p += 2;
|
||||
|
||||
for (val = 0, count = 0, limit = sizeof(void *) * 2; count < limit; count++) {
|
||||
ch = *p++;
|
||||
|
||||
val <<= 4;
|
||||
if (ch >= (unsigned char) '0' && ch <= (unsigned char) '9') {
|
||||
val += ch - (unsigned char) '0';
|
||||
} else if (ch >= (unsigned char) 'a' && ch <= (unsigned char) 'f') {
|
||||
val += ch - (unsigned char) 'a' + 0x0a;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* The input may end at a NUL or garbage may follow. As long as we
|
||||
* parse the '%p' correctly, garbage is allowed to follow, and the
|
||||
* JX pointer parsing also relies on that.
|
||||
*/
|
||||
|
||||
*out = (void *) val;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Minimal vsnprintf() entry point. */
|
||||
int duk_minimal_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
|
||||
size_t off = 0;
|
||||
const char *p;
|
||||
#if 0
|
||||
const char *p_tmp;
|
||||
const char *p_fmt_start;
|
||||
#endif
|
||||
char c;
|
||||
char pad;
|
||||
int fixed_length;
|
||||
int is_long;
|
||||
|
||||
/* Assume str != NULL unless size == 0.
|
||||
* Assume format != NULL.
|
||||
*/
|
||||
|
||||
p = format;
|
||||
for (;;) {
|
||||
c = *p++;
|
||||
if (c == (char) 0) {
|
||||
break;
|
||||
}
|
||||
if (c != (char) '%') {
|
||||
DUK__WRITE_CHAR(c);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Start format sequence. Scan flags and format specifier. */
|
||||
|
||||
#if 0
|
||||
p_fmt_start = p - 1;
|
||||
#endif
|
||||
is_long = 0;
|
||||
pad = ' ';
|
||||
fixed_length = 0;
|
||||
for (;;) {
|
||||
c = *p++;
|
||||
if (c == (char) 'l') {
|
||||
is_long = 1;
|
||||
} else if (c == (char) '0') {
|
||||
/* Only support pad character '0'. */
|
||||
pad = '0';
|
||||
} else if (c >= (char) '1' && c <= (char) '9') {
|
||||
/* Only support fixed lengths 1-9. */
|
||||
fixed_length = (int) (c - (char) '0');
|
||||
} else if (c == (char) 'd') {
|
||||
long v;
|
||||
int neg_sign = 0;
|
||||
if (is_long) {
|
||||
v = va_arg(ap, long);
|
||||
} else {
|
||||
v = (long) va_arg(ap, int);
|
||||
}
|
||||
if (v < 0) {
|
||||
neg_sign = 1;
|
||||
v = -v;
|
||||
}
|
||||
off = duk__format_long(str, size, off, fixed_length, pad, 10, neg_sign, (unsigned long) v);
|
||||
break;
|
||||
} else if (c == (char) 'u') {
|
||||
unsigned long v;
|
||||
if (is_long) {
|
||||
v = va_arg(ap, unsigned long);
|
||||
} else {
|
||||
v = (unsigned long) va_arg(ap, unsigned int);
|
||||
}
|
||||
off = duk__format_long(str, size, off, fixed_length, pad, 10, 0, v);
|
||||
break;
|
||||
} else if (c == (char) 'x') {
|
||||
unsigned long v;
|
||||
if (is_long) {
|
||||
v = va_arg(ap, unsigned long);
|
||||
} else {
|
||||
v = (unsigned long) va_arg(ap, unsigned int);
|
||||
}
|
||||
off = duk__format_long(str, size, off, fixed_length, pad, 16, 0, v);
|
||||
break;
|
||||
} else if (c == (char) 'c') {
|
||||
char v;
|
||||
v = (char) va_arg(ap, int); /* intentionally not 'char' */
|
||||
DUK__WRITE_CHAR(v);
|
||||
break;
|
||||
} else if (c == (char) 's') {
|
||||
const char *v;
|
||||
char c_tmp;
|
||||
v = va_arg(ap, const char *);
|
||||
if (v) {
|
||||
for (;;) {
|
||||
c_tmp = *v++;
|
||||
if (c_tmp) {
|
||||
DUK__WRITE_CHAR(c_tmp);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
} else if (c == (char) 'p') {
|
||||
/* Assume a void * can be represented by 'long'. This is not
|
||||
* always the case. NULL pointer is printed out as 0x0000...
|
||||
*/
|
||||
void *v;
|
||||
v = va_arg(ap, void *);
|
||||
DUK__WRITE_CHAR('0');
|
||||
DUK__WRITE_CHAR('x');
|
||||
off = duk__format_long(str, size, off, sizeof(void *) * 2, '0', 16, 0, (unsigned long) v);
|
||||
break;
|
||||
} else {
|
||||
/* Unrecognized, bail out early. We could also emit the format
|
||||
* specifier verbatim, but it'd be a waste of footprint because
|
||||
* this case should never happen in practice.
|
||||
*/
|
||||
#if 0
|
||||
DUK__WRITE_CHAR('!');
|
||||
#endif
|
||||
#if 0
|
||||
for (p_tmp = p_fmt_start; p_tmp != p; p_tmp++) {
|
||||
DUK__WRITE_CHAR(*p_tmp);
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
goto finish;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
finish:
|
||||
if (off < size) {
|
||||
str[off] = (char) 0; /* No increment for 'off', not counted in return value. */
|
||||
} else if (size > 0) {
|
||||
/* Forced termination. */
|
||||
str[size - 1] = 0;
|
||||
}
|
||||
|
||||
return (int) off;
|
||||
}
|
||||
|
||||
/* Minimal snprintf() entry point. */
|
||||
int duk_minimal_snprintf(char *str, size_t size, const char *format, ...) {
|
||||
va_list ap;
|
||||
int ret;
|
||||
|
||||
va_start(ap, format);
|
||||
ret = duk_minimal_vsnprintf(str, size, format, ap);
|
||||
va_end(ap);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Minimal sprintf() entry point. */
|
||||
int duk_minimal_sprintf(char *str, const char *format, ...) {
|
||||
va_list ap;
|
||||
int ret;
|
||||
|
||||
va_start(ap, format);
|
||||
ret = duk_minimal_vsnprintf(str, SIZE_MAX, format, ap);
|
||||
va_end(ap);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Minimal sscanf() entry point. */
|
||||
int duk_minimal_sscanf(const char *str, const char *format, ...) {
|
||||
va_list ap;
|
||||
int ret;
|
||||
void **out;
|
||||
|
||||
/* Only the exact "%p" format is supported. */
|
||||
if (format[0] != (char) '%' ||
|
||||
format[1] != (char) 'p' ||
|
||||
format[2] != (char) 0) {
|
||||
}
|
||||
|
||||
va_start(ap, format);
|
||||
out = va_arg(ap, void **);
|
||||
ret = duk__parse_pointer(str, out);
|
||||
va_end(ap);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#undef DUK__WRITE_CHAR
|
12
vendor/gopkg.in/olebedev/go-duktape.v3/duk_minimal_printf.h
generated
vendored
Executable file
12
vendor/gopkg.in/olebedev/go-duktape.v3/duk_minimal_printf.h
generated
vendored
Executable file
@ -0,0 +1,12 @@
|
||||
#if !defined(DUK_MINIMAL_PRINTF_H_INCLUDED)
|
||||
#define DUK_MINIMAL_PRINTF_H_INCLUDED
|
||||
|
||||
#include <stdarg.h> /* va_list etc */
|
||||
#include <stddef.h> /* size_t */
|
||||
|
||||
extern int duk_minimal_sprintf(char *str, const char *format, ...);
|
||||
extern int duk_minimal_snprintf(char *str, size_t size, const char *format, ...);
|
||||
extern int duk_minimal_vsnprintf(char *str, size_t size, const char *format, va_list ap);
|
||||
extern int duk_minimal_sscanf(const char *str, const char *format, ...);
|
||||
|
||||
#endif /* DUK_MINIMAL_PRINTF_H_INCLUDED */
|
471
vendor/gopkg.in/olebedev/go-duktape.v3/duk_module_duktape.c
generated
vendored
Executable file
471
vendor/gopkg.in/olebedev/go-duktape.v3/duk_module_duktape.c
generated
vendored
Executable file
@ -0,0 +1,471 @@
|
||||
/*
|
||||
* Duktape 1.x compatible module loading framework
|
||||
*/
|
||||
|
||||
#include "duktape.h"
|
||||
#include "duk_module_duktape.h"
|
||||
|
||||
/* (v)snprintf() is missing before MSVC 2015. Note that _(v)snprintf() does
|
||||
* NOT NUL terminate on truncation, but that's OK here.
|
||||
* http://stackoverflow.com/questions/2915672/snprintf-and-visual-studio-2010
|
||||
*/
|
||||
#if defined(_MSC_VER) && (_MSC_VER < 1900)
|
||||
#define snprintf _snprintf
|
||||
#endif
|
||||
|
||||
#if 0 /* Enable manually */
|
||||
#define DUK__ASSERT(x) do { \
|
||||
if (!(x)) { \
|
||||
fprintf(stderr, "ASSERTION FAILED at %s:%d: " #x "\n", __FILE__, __LINE__); \
|
||||
fflush(stderr); \
|
||||
} \
|
||||
} while (0)
|
||||
#define DUK__ASSERT_TOP(ctx,val) do { \
|
||||
DUK__ASSERT(duk_get_top((ctx)) == (val)); \
|
||||
} while (0)
|
||||
#else
|
||||
#define DUK__ASSERT(x) do { (void) (x); } while (0)
|
||||
#define DUK__ASSERT_TOP(ctx,val) do { (void) ctx; (void) (val); } while (0)
|
||||
#endif
|
||||
|
||||
static void duk__resolve_module_id(duk_context *ctx, const char *req_id, const char *mod_id) {
|
||||
duk_uint8_t buf[DUK_COMMONJS_MODULE_ID_LIMIT];
|
||||
duk_uint8_t *p;
|
||||
duk_uint8_t *q;
|
||||
duk_uint8_t *q_last; /* last component */
|
||||
duk_int_t int_rc;
|
||||
|
||||
DUK__ASSERT(req_id != NULL);
|
||||
/* mod_id may be NULL */
|
||||
|
||||
/*
|
||||
* A few notes on the algorithm:
|
||||
*
|
||||
* - Terms are not allowed to begin with a period unless the term
|
||||
* is either '.' or '..'. This simplifies implementation (and
|
||||
* is within CommonJS modules specification).
|
||||
*
|
||||
* - There are few output bound checks here. This is on purpose:
|
||||
* the resolution input is length checked and the output is never
|
||||
* longer than the input. The resolved output is written directly
|
||||
* over the input because it's never longer than the input at any
|
||||
* point in the algorithm.
|
||||
*
|
||||
* - Non-ASCII characters are processed as individual bytes and
|
||||
* need no special treatment. However, U+0000 terminates the
|
||||
* algorithm; this is not an issue because U+0000 is not a
|
||||
* desirable term character anyway.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Set up the resolution input which is the requested ID directly
|
||||
* (if absolute or no current module path) or with current module
|
||||
* ID prepended (if relative and current module path exists).
|
||||
*
|
||||
* Suppose current module is 'foo/bar' and relative path is './quux'.
|
||||
* The 'bar' component must be replaced so the initial input here is
|
||||
* 'foo/bar/.././quux'.
|
||||
*/
|
||||
|
||||
if (mod_id != NULL && req_id[0] == '.') {
|
||||
int_rc = snprintf((char *) buf, sizeof(buf), "%s/../%s", mod_id, req_id);
|
||||
} else {
|
||||
int_rc = snprintf((char *) buf, sizeof(buf), "%s", req_id);
|
||||
}
|
||||
if (int_rc >= (duk_int_t) sizeof(buf) || int_rc < 0) {
|
||||
/* Potentially truncated, NUL not guaranteed in any case.
|
||||
* The (int_rc < 0) case should not occur in practice.
|
||||
*/
|
||||
goto resolve_error;
|
||||
}
|
||||
DUK__ASSERT(strlen((const char *) buf) < sizeof(buf)); /* at most sizeof(buf) - 1 */
|
||||
|
||||
/*
|
||||
* Resolution loop. At the top of the loop we're expecting a valid
|
||||
* term: '.', '..', or a non-empty identifier not starting with a period.
|
||||
*/
|
||||
|
||||
p = buf;
|
||||
q = buf;
|
||||
for (;;) {
|
||||
duk_uint_fast8_t c;
|
||||
|
||||
/* Here 'p' always points to the start of a term.
|
||||
*
|
||||
* We can also unconditionally reset q_last here: if this is
|
||||
* the last (non-empty) term q_last will have the right value
|
||||
* on loop exit.
|
||||
*/
|
||||
|
||||
DUK__ASSERT(p >= q); /* output is never longer than input during resolution */
|
||||
|
||||
q_last = q;
|
||||
|
||||
c = *p++;
|
||||
if (c == 0) {
|
||||
goto resolve_error;
|
||||
} else if (c == '.') {
|
||||
c = *p++;
|
||||
if (c == '/') {
|
||||
/* Term was '.' and is eaten entirely (including dup slashes). */
|
||||
goto eat_dup_slashes;
|
||||
}
|
||||
if (c == '.' && *p == '/') {
|
||||
/* Term was '..', backtrack resolved name by one component.
|
||||
* q[-1] = previous slash (or beyond start of buffer)
|
||||
* q[-2] = last char of previous component (or beyond start of buffer)
|
||||
*/
|
||||
p++; /* eat (first) input slash */
|
||||
DUK__ASSERT(q >= buf);
|
||||
if (q == buf) {
|
||||
goto resolve_error;
|
||||
}
|
||||
DUK__ASSERT(*(q - 1) == '/');
|
||||
q--; /* Backtrack to last output slash (dups already eliminated). */
|
||||
for (;;) {
|
||||
/* Backtrack to previous slash or start of buffer. */
|
||||
DUK__ASSERT(q >= buf);
|
||||
if (q == buf) {
|
||||
break;
|
||||
}
|
||||
if (*(q - 1) == '/') {
|
||||
break;
|
||||
}
|
||||
q--;
|
||||
}
|
||||
goto eat_dup_slashes;
|
||||
}
|
||||
goto resolve_error;
|
||||
} else if (c == '/') {
|
||||
/* e.g. require('/foo'), empty terms not allowed */
|
||||
goto resolve_error;
|
||||
} else {
|
||||
for (;;) {
|
||||
/* Copy term name until end or '/'. */
|
||||
*q++ = c;
|
||||
c = *p++;
|
||||
if (c == 0) {
|
||||
/* This was the last term, and q_last was
|
||||
* updated to match this term at loop top.
|
||||
*/
|
||||
goto loop_done;
|
||||
} else if (c == '/') {
|
||||
*q++ = '/';
|
||||
break;
|
||||
} else {
|
||||
/* write on next loop */
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
eat_dup_slashes:
|
||||
for (;;) {
|
||||
/* eat dup slashes */
|
||||
c = *p;
|
||||
if (c != '/') {
|
||||
break;
|
||||
}
|
||||
p++;
|
||||
}
|
||||
}
|
||||
loop_done:
|
||||
/* Output #1: resolved absolute name. */
|
||||
DUK__ASSERT(q >= buf);
|
||||
duk_push_lstring(ctx, (const char *) buf, (size_t) (q - buf));
|
||||
|
||||
/* Output #2: last component name. */
|
||||
DUK__ASSERT(q >= q_last);
|
||||
DUK__ASSERT(q_last >= buf);
|
||||
duk_push_lstring(ctx, (const char *) q_last, (size_t) (q - q_last));
|
||||
return;
|
||||
|
||||
resolve_error:
|
||||
(void) duk_type_error(ctx, "cannot resolve module id: %s", (const char *) req_id);
|
||||
}
|
||||
|
||||
/* Stack indices for better readability. */
|
||||
#define DUK__IDX_REQUESTED_ID 0 /* module id requested */
|
||||
#define DUK__IDX_REQUIRE 1 /* current require() function */
|
||||
#define DUK__IDX_REQUIRE_ID 2 /* the base ID of the current require() function, resolution base */
|
||||
#define DUK__IDX_RESOLVED_ID 3 /* resolved, normalized absolute module ID */
|
||||
#define DUK__IDX_LASTCOMP 4 /* last component name in resolved path */
|
||||
#define DUK__IDX_DUKTAPE 5 /* Duktape object */
|
||||
#define DUK__IDX_MODLOADED 6 /* Duktape.modLoaded[] module cache */
|
||||
#define DUK__IDX_UNDEFINED 7 /* 'undefined', artifact of lookup */
|
||||
#define DUK__IDX_FRESH_REQUIRE 8 /* new require() function for module, updated resolution base */
|
||||
#define DUK__IDX_EXPORTS 9 /* default exports table */
|
||||
#define DUK__IDX_MODULE 10 /* module object containing module.exports, etc */
|
||||
|
||||
static duk_ret_t duk__require(duk_context *ctx) {
|
||||
const char *str_req_id; /* requested identifier */
|
||||
const char *str_mod_id; /* require.id of current module */
|
||||
duk_int_t pcall_rc;
|
||||
|
||||
/* NOTE: we try to minimize code size by avoiding unnecessary pops,
|
||||
* so the stack looks a bit cluttered in this function. DUK__ASSERT_TOP()
|
||||
* assertions are used to ensure stack configuration is correct at each
|
||||
* step.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Resolve module identifier into canonical absolute form.
|
||||
*/
|
||||
|
||||
str_req_id = duk_require_string(ctx, DUK__IDX_REQUESTED_ID);
|
||||
duk_push_current_function(ctx);
|
||||
duk_get_prop_string(ctx, -1, "id");
|
||||
str_mod_id = duk_get_string(ctx, DUK__IDX_REQUIRE_ID); /* ignore non-strings */
|
||||
duk__resolve_module_id(ctx, str_req_id, str_mod_id);
|
||||
str_req_id = NULL;
|
||||
str_mod_id = NULL;
|
||||
|
||||
/* [ requested_id require require.id resolved_id last_comp ] */
|
||||
DUK__ASSERT_TOP(ctx, DUK__IDX_LASTCOMP + 1);
|
||||
|
||||
/*
|
||||
* Cached module check.
|
||||
*
|
||||
* If module has been loaded or its loading has already begun without
|
||||
* finishing, return the same cached value (module.exports). The
|
||||
* value is registered when module load starts so that circular
|
||||
* references can be supported to some extent.
|
||||
*/
|
||||
|
||||
duk_push_global_stash(ctx);
|
||||
duk_get_prop_string(ctx, -1, "\xff" "module:Duktape");
|
||||
duk_remove(ctx, -2); /* Lookup stashed, original 'Duktape' object. */
|
||||
duk_get_prop_string(ctx, DUK__IDX_DUKTAPE, "modLoaded"); /* Duktape.modLoaded */
|
||||
duk_require_type_mask(ctx, DUK__IDX_MODLOADED, DUK_TYPE_MASK_OBJECT);
|
||||
DUK__ASSERT_TOP(ctx, DUK__IDX_MODLOADED + 1);
|
||||
|
||||
duk_dup(ctx, DUK__IDX_RESOLVED_ID);
|
||||
if (duk_get_prop(ctx, DUK__IDX_MODLOADED)) {
|
||||
/* [ requested_id require require.id resolved_id last_comp Duktape Duktape.modLoaded Duktape.modLoaded[id] ] */
|
||||
duk_get_prop_string(ctx, -1, "exports"); /* return module.exports */
|
||||
return 1;
|
||||
}
|
||||
DUK__ASSERT_TOP(ctx, DUK__IDX_UNDEFINED + 1);
|
||||
|
||||
/* [ requested_id require require.id resolved_id last_comp Duktape Duktape.modLoaded undefined ] */
|
||||
|
||||
/*
|
||||
* Module not loaded (and loading not started previously).
|
||||
*
|
||||
* Create a new require() function with 'id' set to resolved ID
|
||||
* of module being loaded. Also create 'exports' and 'module'
|
||||
* tables but don't register exports to the loaded table yet.
|
||||
* We don't want to do that unless the user module search callbacks
|
||||
* succeeds in finding the module.
|
||||
*/
|
||||
|
||||
/* Fresh require: require.id is left configurable (but not writable)
|
||||
* so that is not easy to accidentally tweak it, but it can still be
|
||||
* done with Object.defineProperty().
|
||||
*
|
||||
* XXX: require.id could also be just made non-configurable, as there
|
||||
* is no practical reason to touch it (at least from Ecmascript code).
|
||||
*/
|
||||
duk_push_c_function(ctx, duk__require, 1 /*nargs*/);
|
||||
duk_push_string(ctx, "name");
|
||||
duk_push_string(ctx, "require");
|
||||
duk_def_prop(ctx, DUK__IDX_FRESH_REQUIRE, DUK_DEFPROP_HAVE_VALUE); /* not writable, not enumerable, not configurable */
|
||||
duk_push_string(ctx, "id");
|
||||
duk_dup(ctx, DUK__IDX_RESOLVED_ID);
|
||||
duk_def_prop(ctx, DUK__IDX_FRESH_REQUIRE, DUK_DEFPROP_HAVE_VALUE | DUK_DEFPROP_SET_CONFIGURABLE); /* a fresh require() with require.id = resolved target module id */
|
||||
|
||||
/* Module table:
|
||||
* - module.exports: initial exports table (may be replaced by user)
|
||||
* - module.id is non-writable and non-configurable, as the CommonJS
|
||||
* spec suggests this if possible
|
||||
* - module.filename: not set, defaults to resolved ID if not explicitly
|
||||
* set by modSearch() (note capitalization, not .fileName, matches Node.js)
|
||||
* - module.name: not set, defaults to last component of resolved ID if
|
||||
* not explicitly set by modSearch()
|
||||
*/
|
||||
duk_push_object(ctx); /* exports */
|
||||
duk_push_object(ctx); /* module */
|
||||
duk_push_string(ctx, "exports");
|
||||
duk_dup(ctx, DUK__IDX_EXPORTS);
|
||||
duk_def_prop(ctx, DUK__IDX_MODULE, DUK_DEFPROP_HAVE_VALUE | DUK_DEFPROP_SET_WRITABLE | DUK_DEFPROP_SET_CONFIGURABLE); /* module.exports = exports */
|
||||
duk_push_string(ctx, "id");
|
||||
duk_dup(ctx, DUK__IDX_RESOLVED_ID); /* resolved id: require(id) must return this same module */
|
||||
duk_def_prop(ctx, DUK__IDX_MODULE, DUK_DEFPROP_HAVE_VALUE); /* module.id = resolved_id; not writable, not enumerable, not configurable */
|
||||
duk_compact(ctx, DUK__IDX_MODULE); /* module table remains registered to modLoaded, minimize its size */
|
||||
DUK__ASSERT_TOP(ctx, DUK__IDX_MODULE + 1);
|
||||
|
||||
/* [ requested_id require require.id resolved_id last_comp Duktape Duktape.modLoaded undefined fresh_require exports module ] */
|
||||
|
||||
/* Register the module table early to modLoaded[] so that we can
|
||||
* support circular references even in modSearch(). If an error
|
||||
* is thrown, we'll delete the reference.
|
||||
*/
|
||||
duk_dup(ctx, DUK__IDX_RESOLVED_ID);
|
||||
duk_dup(ctx, DUK__IDX_MODULE);
|
||||
duk_put_prop(ctx, DUK__IDX_MODLOADED); /* Duktape.modLoaded[resolved_id] = module */
|
||||
|
||||
/*
|
||||
* Call user provided module search function and build the wrapped
|
||||
* module source code (if necessary). The module search function
|
||||
* can be used to implement pure Ecmacsript, pure C, and mixed
|
||||
* Ecmascript/C modules.
|
||||
*
|
||||
* The module search function can operate on the exports table directly
|
||||
* (e.g. DLL code can register values to it). It can also return a
|
||||
* string which is interpreted as module source code (if a non-string
|
||||
* is returned the module is assumed to be a pure C one). If a module
|
||||
* cannot be found, an error must be thrown by the user callback.
|
||||
*
|
||||
* Because Duktape.modLoaded[] already contains the module being
|
||||
* loaded, circular references for C modules should also work
|
||||
* (although expected to be quite rare).
|
||||
*/
|
||||
|
||||
duk_push_string(ctx, "(function(require,exports,module){");
|
||||
|
||||
/* Duktape.modSearch(resolved_id, fresh_require, exports, module). */
|
||||
duk_get_prop_string(ctx, DUK__IDX_DUKTAPE, "modSearch"); /* Duktape.modSearch */
|
||||
duk_dup(ctx, DUK__IDX_RESOLVED_ID);
|
||||
duk_dup(ctx, DUK__IDX_FRESH_REQUIRE);
|
||||
duk_dup(ctx, DUK__IDX_EXPORTS);
|
||||
duk_dup(ctx, DUK__IDX_MODULE); /* [ ... Duktape.modSearch resolved_id last_comp fresh_require exports module ] */
|
||||
pcall_rc = duk_pcall(ctx, 4 /*nargs*/); /* -> [ ... source ] */
|
||||
DUK__ASSERT_TOP(ctx, DUK__IDX_MODULE + 3);
|
||||
|
||||
if (pcall_rc != DUK_EXEC_SUCCESS) {
|
||||
/* Delete entry in Duktape.modLoaded[] and rethrow. */
|
||||
goto delete_rethrow;
|
||||
}
|
||||
|
||||
/* If user callback did not return source code, module loading
|
||||
* is finished (user callback initialized exports table directly).
|
||||
*/
|
||||
if (!duk_is_string(ctx, -1)) {
|
||||
/* User callback did not return source code, so module loading
|
||||
* is finished: just update modLoaded with final module.exports
|
||||
* and we're done.
|
||||
*/
|
||||
goto return_exports;
|
||||
}
|
||||
|
||||
/* Finish the wrapped module source. Force module.filename as the
|
||||
* function .fileName so it gets set for functions defined within a
|
||||
* module. This also ensures loggers created within the module get
|
||||
* the module ID (or overridden filename) as their default logger name.
|
||||
* (Note capitalization: .filename matches Node.js while .fileName is
|
||||
* used elsewhere in Duktape.)
|
||||
*/
|
||||
duk_push_string(ctx, "\n})"); /* Newline allows module last line to contain a // comment. */
|
||||
duk_concat(ctx, 3);
|
||||
if (!duk_get_prop_string(ctx, DUK__IDX_MODULE, "filename")) {
|
||||
/* module.filename for .fileName, default to resolved ID if
|
||||
* not present.
|
||||
*/
|
||||
duk_pop(ctx);
|
||||
duk_dup(ctx, DUK__IDX_RESOLVED_ID);
|
||||
}
|
||||
pcall_rc = duk_pcompile(ctx, DUK_COMPILE_EVAL);
|
||||
if (pcall_rc != DUK_EXEC_SUCCESS) {
|
||||
goto delete_rethrow;
|
||||
}
|
||||
pcall_rc = duk_pcall(ctx, 0); /* -> eval'd function wrapper (not called yet) */
|
||||
if (pcall_rc != DUK_EXEC_SUCCESS) {
|
||||
goto delete_rethrow;
|
||||
}
|
||||
|
||||
/* Module has now evaluated to a wrapped module function. Force its
|
||||
* .name to match module.name (defaults to last component of resolved
|
||||
* ID) so that it is shown in stack traces too. Note that we must not
|
||||
* introduce an actual name binding into the function scope (which is
|
||||
* usually the case with a named function) because it would affect the
|
||||
* scope seen by the module and shadow accesses to globals of the same name.
|
||||
* This is now done by compiling the function as anonymous and then forcing
|
||||
* its .name without setting a "has name binding" flag.
|
||||
*/
|
||||
|
||||
duk_push_string(ctx, "name");
|
||||
if (!duk_get_prop_string(ctx, DUK__IDX_MODULE, "name")) {
|
||||
/* module.name for .name, default to last component if
|
||||
* not present.
|
||||
*/
|
||||
duk_pop(ctx);
|
||||
duk_dup(ctx, DUK__IDX_LASTCOMP);
|
||||
}
|
||||
duk_def_prop(ctx, -3, DUK_DEFPROP_HAVE_VALUE | DUK_DEFPROP_FORCE);
|
||||
|
||||
/*
|
||||
* Call the wrapped module function.
|
||||
*
|
||||
* Use a protected call so that we can update Duktape.modLoaded[resolved_id]
|
||||
* even if the module throws an error.
|
||||
*/
|
||||
|
||||
/* [ requested_id require require.id resolved_id last_comp Duktape Duktape.modLoaded undefined fresh_require exports module mod_func ] */
|
||||
DUK__ASSERT_TOP(ctx, DUK__IDX_MODULE + 2);
|
||||
|
||||
duk_dup(ctx, DUK__IDX_EXPORTS); /* exports (this binding) */
|
||||
duk_dup(ctx, DUK__IDX_FRESH_REQUIRE); /* fresh require (argument) */
|
||||
duk_get_prop_string(ctx, DUK__IDX_MODULE, "exports"); /* relookup exports from module.exports in case it was changed by modSearch */
|
||||
duk_dup(ctx, DUK__IDX_MODULE); /* module (argument) */
|
||||
DUK__ASSERT_TOP(ctx, DUK__IDX_MODULE + 6);
|
||||
|
||||
/* [ requested_id require require.id resolved_id last_comp Duktape Duktape.modLoaded undefined fresh_require exports module mod_func exports fresh_require exports module ] */
|
||||
|
||||
pcall_rc = duk_pcall_method(ctx, 3 /*nargs*/);
|
||||
if (pcall_rc != DUK_EXEC_SUCCESS) {
|
||||
/* Module loading failed. Node.js will forget the module
|
||||
* registration so that another require() will try to load
|
||||
* the module again. Mimic that behavior.
|
||||
*/
|
||||
goto delete_rethrow;
|
||||
}
|
||||
|
||||
/* [ requested_id require require.id resolved_id last_comp Duktape Duktape.modLoaded undefined fresh_require exports module result(ignored) ] */
|
||||
DUK__ASSERT_TOP(ctx, DUK__IDX_MODULE + 2);
|
||||
|
||||
/* fall through */
|
||||
|
||||
return_exports:
|
||||
duk_get_prop_string(ctx, DUK__IDX_MODULE, "exports");
|
||||
duk_compact(ctx, -1); /* compact the exports table */
|
||||
return 1; /* return module.exports */
|
||||
|
||||
delete_rethrow:
|
||||
duk_dup(ctx, DUK__IDX_RESOLVED_ID);
|
||||
duk_del_prop(ctx, DUK__IDX_MODLOADED); /* delete Duktape.modLoaded[resolved_id] */
|
||||
(void) duk_throw(ctx); /* rethrow original error */
|
||||
return 0; /* not reachable */
|
||||
}
|
||||
|
||||
void duk_module_duktape_init(duk_context *ctx) {
|
||||
/* Stash 'Duktape' in case it's modified. */
|
||||
duk_push_global_stash(ctx);
|
||||
duk_get_global_string(ctx, "Duktape");
|
||||
duk_put_prop_string(ctx, -2, "\xff" "module:Duktape");
|
||||
duk_pop(ctx);
|
||||
|
||||
/* Register `require` as a global function. */
|
||||
duk_eval_string(ctx,
|
||||
"(function(req){"
|
||||
"var D=Object.defineProperty;"
|
||||
"D(req,'name',{value:'require'});"
|
||||
"D(this,'require',{value:req,writable:true,configurable:true});"
|
||||
"D(Duktape,'modLoaded',{value:Object.create(null),writable:true,configurable:true});"
|
||||
"})");
|
||||
duk_push_c_function(ctx, duk__require, 1 /*nargs*/);
|
||||
duk_call(ctx, 1);
|
||||
duk_pop(ctx);
|
||||
}
|
||||
|
||||
#undef DUK__ASSERT
|
||||
#undef DUK__ASSERT_TOP
|
||||
#undef DUK__IDX_REQUESTED_ID
|
||||
#undef DUK__IDX_REQUIRE
|
||||
#undef DUK__IDX_REQUIRE_ID
|
||||
#undef DUK__IDX_RESOLVED_ID
|
||||
#undef DUK__IDX_LASTCOMP
|
||||
#undef DUK__IDX_DUKTAPE
|
||||
#undef DUK__IDX_MODLOADED
|
||||
#undef DUK__IDX_UNDEFINED
|
||||
#undef DUK__IDX_FRESH_REQUIRE
|
||||
#undef DUK__IDX_EXPORTS
|
||||
#undef DUK__IDX_MODULE
|
14
vendor/gopkg.in/olebedev/go-duktape.v3/duk_module_duktape.h
generated
vendored
Executable file
14
vendor/gopkg.in/olebedev/go-duktape.v3/duk_module_duktape.h
generated
vendored
Executable file
@ -0,0 +1,14 @@
|
||||
#if !defined(DUK_MODULE_DUKTAPE_H_INCLUDED)
|
||||
#define DUK_MODULE_DUKTAPE_H_INCLUDED
|
||||
|
||||
#include "duktape.h"
|
||||
|
||||
/* Maximum length of CommonJS module identifier to resolve. Length includes
|
||||
* both current module ID, requested (possibly relative) module ID, and a
|
||||
* slash in between.
|
||||
*/
|
||||
#define DUK_COMMONJS_MODULE_ID_LIMIT 256
|
||||
|
||||
extern void duk_module_duktape_init(duk_context *ctx);
|
||||
|
||||
#endif /* DUK_MODULE_DUKTAPE_H_INCLUDED */
|
333
vendor/gopkg.in/olebedev/go-duktape.v3/duk_module_node.c
generated
vendored
Executable file
333
vendor/gopkg.in/olebedev/go-duktape.v3/duk_module_node.c
generated
vendored
Executable file
@ -0,0 +1,333 @@
|
||||
/*
|
||||
* Node.js-like module loading framework for Duktape
|
||||
*
|
||||
* https://nodejs.org/api/modules.html
|
||||
*/
|
||||
|
||||
#include "duktape.h"
|
||||
#include "duk_module_node.h"
|
||||
|
||||
#if DUK_VERSION >= 19999
|
||||
static duk_int_t duk__eval_module_source(duk_context *ctx, void *udata);
|
||||
#else
|
||||
static duk_int_t duk__eval_module_source(duk_context *ctx);
|
||||
#endif
|
||||
static void duk__push_module_object(duk_context *ctx, const char *id, duk_bool_t main);
|
||||
|
||||
static duk_bool_t duk__get_cached_module(duk_context *ctx, const char *id) {
|
||||
duk_push_global_stash(ctx);
|
||||
(void) duk_get_prop_string(ctx, -1, "\xff" "requireCache");
|
||||
if (duk_get_prop_string(ctx, -1, id)) {
|
||||
duk_remove(ctx, -2);
|
||||
duk_remove(ctx, -2);
|
||||
return 1;
|
||||
} else {
|
||||
duk_pop_3(ctx);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Place a `module` object on the top of the value stack into the require cache
|
||||
* based on its `.id` property. As a convenience to the caller, leave the
|
||||
* object on top of the value stack afterwards.
|
||||
*/
|
||||
static void duk__put_cached_module(duk_context *ctx) {
|
||||
/* [ ... module ] */
|
||||
|
||||
duk_push_global_stash(ctx);
|
||||
(void) duk_get_prop_string(ctx, -1, "\xff" "requireCache");
|
||||
duk_dup(ctx, -3);
|
||||
|
||||
/* [ ... module stash req_cache module ] */
|
||||
|
||||
(void) duk_get_prop_string(ctx, -1, "id");
|
||||
duk_dup(ctx, -2);
|
||||
duk_put_prop(ctx, -4);
|
||||
|
||||
duk_pop_3(ctx); /* [ ... module ] */
|
||||
}
|
||||
|
||||
static void duk__del_cached_module(duk_context *ctx, const char *id) {
|
||||
duk_push_global_stash(ctx);
|
||||
(void) duk_get_prop_string(ctx, -1, "\xff" "requireCache");
|
||||
duk_del_prop_string(ctx, -1, id);
|
||||
duk_pop_2(ctx);
|
||||
}
|
||||
|
||||
static duk_ret_t duk__handle_require(duk_context *ctx) {
|
||||
/*
|
||||
* Value stack handling here is a bit sloppy but should be correct.
|
||||
* Call handling will clean up any extra garbage for us.
|
||||
*/
|
||||
|
||||
const char *id;
|
||||
const char *parent_id;
|
||||
duk_idx_t module_idx;
|
||||
duk_idx_t stash_idx;
|
||||
duk_int_t ret;
|
||||
|
||||
duk_push_global_stash(ctx);
|
||||
stash_idx = duk_normalize_index(ctx, -1);
|
||||
|
||||
duk_push_current_function(ctx);
|
||||
(void) duk_get_prop_string(ctx, -1, "\xff" "moduleId");
|
||||
parent_id = duk_require_string(ctx, -1);
|
||||
(void) parent_id; /* not used directly; suppress warning */
|
||||
|
||||
/* [ id stash require parent_id ] */
|
||||
|
||||
id = duk_require_string(ctx, 0);
|
||||
|
||||
(void) duk_get_prop_string(ctx, stash_idx, "\xff" "modResolve");
|
||||
duk_dup(ctx, 0); /* module ID */
|
||||
duk_dup(ctx, -3); /* parent ID */
|
||||
duk_call(ctx, 2);
|
||||
|
||||
/* [ ... stash ... resolved_id ] */
|
||||
|
||||
id = duk_require_string(ctx, -1);
|
||||
|
||||
if (duk__get_cached_module(ctx, id)) {
|
||||
goto have_module; /* use the cached module */
|
||||
}
|
||||
|
||||
duk__push_module_object(ctx, id, 0 /*main*/);
|
||||
duk__put_cached_module(ctx); /* module remains on stack */
|
||||
|
||||
/*
|
||||
* From here on out, we have to be careful not to throw. If it can't be
|
||||
* avoided, the error must be caught and the module removed from the
|
||||
* require cache before rethrowing. This allows the application to
|
||||
* reattempt loading the module.
|
||||
*/
|
||||
|
||||
module_idx = duk_normalize_index(ctx, -1);
|
||||
|
||||
/* [ ... stash ... resolved_id module ] */
|
||||
|
||||
(void) duk_get_prop_string(ctx, stash_idx, "\xff" "modLoad");
|
||||
duk_dup(ctx, -3); /* resolved ID */
|
||||
(void) duk_get_prop_string(ctx, module_idx, "exports");
|
||||
duk_dup(ctx, module_idx);
|
||||
ret = duk_pcall(ctx, 3);
|
||||
if (ret != DUK_EXEC_SUCCESS) {
|
||||
duk__del_cached_module(ctx, id);
|
||||
(void) duk_throw(ctx); /* rethrow */
|
||||
}
|
||||
|
||||
if (duk_is_string(ctx, -1)) {
|
||||
duk_int_t ret;
|
||||
|
||||
/* [ ... module source ] */
|
||||
|
||||
#if DUK_VERSION >= 19999
|
||||
ret = duk_safe_call(ctx, duk__eval_module_source, NULL, 2, 1);
|
||||
#else
|
||||
ret = duk_safe_call(ctx, duk__eval_module_source, 2, 1);
|
||||
#endif
|
||||
if (ret != DUK_EXEC_SUCCESS) {
|
||||
duk__del_cached_module(ctx, id);
|
||||
(void) duk_throw(ctx); /* rethrow */
|
||||
}
|
||||
} else if (duk_is_undefined(ctx, -1)) {
|
||||
duk_pop(ctx);
|
||||
} else {
|
||||
duk__del_cached_module(ctx, id);
|
||||
(void) duk_type_error(ctx, "invalid module load callback return value");
|
||||
}
|
||||
|
||||
/* fall through */
|
||||
|
||||
have_module:
|
||||
/* [ ... module ] */
|
||||
|
||||
(void) duk_get_prop_string(ctx, -1, "exports");
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void duk__push_require_function(duk_context *ctx, const char *id) {
|
||||
duk_push_c_function(ctx, duk__handle_require, 1);
|
||||
duk_push_string(ctx, "name");
|
||||
duk_push_string(ctx, "require");
|
||||
duk_def_prop(ctx, -3, DUK_DEFPROP_HAVE_VALUE);
|
||||
duk_push_string(ctx, id);
|
||||
duk_put_prop_string(ctx, -2, "\xff" "moduleId");
|
||||
|
||||
/* require.cache */
|
||||
duk_push_global_stash(ctx);
|
||||
(void) duk_get_prop_string(ctx, -1, "\xff" "requireCache");
|
||||
duk_put_prop_string(ctx, -3, "cache");
|
||||
duk_pop(ctx);
|
||||
|
||||
/* require.main */
|
||||
duk_push_global_stash(ctx);
|
||||
(void) duk_get_prop_string(ctx, -1, "\xff" "mainModule");
|
||||
duk_put_prop_string(ctx, -3, "main");
|
||||
duk_pop(ctx);
|
||||
}
|
||||
|
||||
static void duk__push_module_object(duk_context *ctx, const char *id, duk_bool_t main) {
|
||||
duk_push_object(ctx);
|
||||
|
||||
/* Set this as the main module, if requested */
|
||||
if (main) {
|
||||
duk_push_global_stash(ctx);
|
||||
duk_dup(ctx, -2);
|
||||
duk_put_prop_string(ctx, -2, "\xff" "mainModule");
|
||||
duk_pop(ctx);
|
||||
}
|
||||
|
||||
/* Node.js uses the canonicalized filename of a module for both module.id
|
||||
* and module.filename. We have no concept of a file system here, so just
|
||||
* use the module ID for both values.
|
||||
*/
|
||||
duk_push_string(ctx, id);
|
||||
duk_dup(ctx, -1);
|
||||
duk_put_prop_string(ctx, -3, "filename");
|
||||
duk_put_prop_string(ctx, -2, "id");
|
||||
|
||||
/* module.exports = {} */
|
||||
duk_push_object(ctx);
|
||||
duk_put_prop_string(ctx, -2, "exports");
|
||||
|
||||
/* module.loaded = false */
|
||||
duk_push_false(ctx);
|
||||
duk_put_prop_string(ctx, -2, "loaded");
|
||||
|
||||
/* module.require */
|
||||
duk__push_require_function(ctx, id);
|
||||
duk_put_prop_string(ctx, -2, "require");
|
||||
}
|
||||
|
||||
#if DUK_VERSION >= 19999
|
||||
static duk_int_t duk__eval_module_source(duk_context *ctx, void *udata) {
|
||||
#else
|
||||
static duk_int_t duk__eval_module_source(duk_context *ctx) {
|
||||
#endif
|
||||
const char *src;
|
||||
|
||||
/*
|
||||
* Stack: [ ... module source ]
|
||||
*/
|
||||
|
||||
#if DUK_VERSION >= 19999
|
||||
(void) udata;
|
||||
#endif
|
||||
|
||||
/* Wrap the module code in a function expression. This is the simplest
|
||||
* way to implement CommonJS closure semantics and matches the behavior of
|
||||
* e.g. Node.js.
|
||||
*/
|
||||
duk_push_string(ctx, "(function(exports,require,module,__filename,__dirname){");
|
||||
src = duk_require_string(ctx, -2);
|
||||
duk_push_string(ctx, (src[0] == '#' && src[1] == '!') ? "//" : ""); /* Shebang support. */
|
||||
duk_dup(ctx, -3); /* source */
|
||||
duk_push_string(ctx, "\n})"); /* Newline allows module last line to contain a // comment. */
|
||||
duk_concat(ctx, 4);
|
||||
|
||||
/* [ ... module source func_src ] */
|
||||
|
||||
(void) duk_get_prop_string(ctx, -3, "filename");
|
||||
duk_compile(ctx, DUK_COMPILE_EVAL);
|
||||
duk_call(ctx, 0);
|
||||
|
||||
/* [ ... module source func ] */
|
||||
|
||||
/* Set name for the wrapper function. */
|
||||
duk_push_string(ctx, "name");
|
||||
duk_push_string(ctx, "main");
|
||||
duk_def_prop(ctx, -3, DUK_DEFPROP_HAVE_VALUE | DUK_DEFPROP_FORCE);
|
||||
|
||||
/* call the function wrapper */
|
||||
(void) duk_get_prop_string(ctx, -3, "exports"); /* exports */
|
||||
(void) duk_get_prop_string(ctx, -4, "require"); /* require */
|
||||
duk_dup(ctx, -5); /* module */
|
||||
(void) duk_get_prop_string(ctx, -6, "filename"); /* __filename */
|
||||
duk_push_undefined(ctx); /* __dirname */
|
||||
duk_call(ctx, 5);
|
||||
|
||||
/* [ ... module source result(ignore) ] */
|
||||
|
||||
/* module.loaded = true */
|
||||
duk_push_true(ctx);
|
||||
duk_put_prop_string(ctx, -4, "loaded");
|
||||
|
||||
/* [ ... module source retval ] */
|
||||
|
||||
duk_pop_2(ctx);
|
||||
|
||||
/* [ ... module ] */
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Load a module as the 'main' module. */
|
||||
duk_ret_t duk_module_node_peval_main(duk_context *ctx, const char *path) {
|
||||
/*
|
||||
* Stack: [ ... source ]
|
||||
*/
|
||||
|
||||
duk__push_module_object(ctx, path, 1 /*main*/);
|
||||
/* [ ... source module ] */
|
||||
|
||||
duk_dup(ctx, 0);
|
||||
/* [ ... source module source ] */
|
||||
|
||||
#if DUK_VERSION >= 19999
|
||||
return duk_safe_call(ctx, duk__eval_module_source, NULL, 2, 1);
|
||||
#else
|
||||
return duk_safe_call(ctx, duk__eval_module_source, 2, 1);
|
||||
#endif
|
||||
}
|
||||
|
||||
void duk_module_node_init(duk_context *ctx) {
|
||||
/*
|
||||
* Stack: [ ... options ] => [ ... ]
|
||||
*/
|
||||
|
||||
duk_idx_t options_idx;
|
||||
|
||||
duk_require_object_coercible(ctx, -1); /* error before setting up requireCache */
|
||||
options_idx = duk_require_normalize_index(ctx, -1);
|
||||
|
||||
/* Initialize the require cache to a fresh object. */
|
||||
duk_push_global_stash(ctx);
|
||||
#if DUK_VERSION >= 19999
|
||||
duk_push_bare_object(ctx);
|
||||
#else
|
||||
duk_push_object(ctx);
|
||||
duk_push_undefined(ctx);
|
||||
duk_set_prototype(ctx, -2);
|
||||
#endif
|
||||
duk_put_prop_string(ctx, -2, "\xff" "requireCache");
|
||||
duk_pop(ctx);
|
||||
|
||||
/* Stash callbacks for later use. User code can overwrite them later
|
||||
* on directly by accessing the global stash.
|
||||
*/
|
||||
duk_push_global_stash(ctx);
|
||||
duk_get_prop_string(ctx, options_idx, "resolve");
|
||||
duk_require_function(ctx, -1);
|
||||
duk_put_prop_string(ctx, -2, "\xff" "modResolve");
|
||||
duk_get_prop_string(ctx, options_idx, "load");
|
||||
duk_require_function(ctx, -1);
|
||||
duk_put_prop_string(ctx, -2, "\xff" "modLoad");
|
||||
duk_pop(ctx);
|
||||
|
||||
/* Stash main module. */
|
||||
duk_push_global_stash(ctx);
|
||||
duk_push_undefined(ctx);
|
||||
duk_put_prop_string(ctx, -2, "\xff" "mainModule");
|
||||
duk_pop(ctx);
|
||||
|
||||
/* register `require` as a global function. */
|
||||
duk_push_global_object(ctx);
|
||||
duk_push_string(ctx, "require");
|
||||
duk__push_require_function(ctx, "");
|
||||
duk_def_prop(ctx, -3, DUK_DEFPROP_HAVE_VALUE |
|
||||
DUK_DEFPROP_SET_WRITABLE |
|
||||
DUK_DEFPROP_SET_CONFIGURABLE);
|
||||
duk_pop(ctx);
|
||||
|
||||
duk_pop(ctx); /* pop argument */
|
||||
}
|
9
vendor/gopkg.in/olebedev/go-duktape.v3/duk_module_node.h
generated
vendored
Executable file
9
vendor/gopkg.in/olebedev/go-duktape.v3/duk_module_node.h
generated
vendored
Executable file
@ -0,0 +1,9 @@
|
||||
#if !defined(DUK_MODULE_NODE_H_INCLUDED)
|
||||
#define DUK_MODULE_NODE_H_INCLUDED
|
||||
|
||||
#include "duktape.h"
|
||||
|
||||
extern duk_ret_t duk_module_node_peval_main(duk_context *ctx, const char *path);
|
||||
extern void duk_module_node_init(duk_context *ctx);
|
||||
|
||||
#endif /* DUK_MODULE_NODE_H_INCLUDED */
|
127
vendor/gopkg.in/olebedev/go-duktape.v3/duk_print_alert.c
generated
vendored
Executable file
127
vendor/gopkg.in/olebedev/go-duktape.v3/duk_print_alert.c
generated
vendored
Executable file
@ -0,0 +1,127 @@
|
||||
/*
|
||||
* Duktape 1.x compatible print() and alert() bindings.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include "duktape.h"
|
||||
#include "duk_print_alert.h"
|
||||
|
||||
#define DUK_PRINT_ALERT_FLUSH /* Flush after stdout/stderr write (Duktape 1.x: yes) */
|
||||
#undef DUK_PRINT_ALERT_SMALL /* Prefer smaller footprint (but slower and more memory churn) */
|
||||
|
||||
#if defined(DUK_PRINT_ALERT_SMALL)
|
||||
static duk_ret_t duk__print_alert_helper(duk_context *ctx, FILE *fh) {
|
||||
duk_idx_t nargs;
|
||||
|
||||
nargs = duk_get_top(ctx);
|
||||
|
||||
/* If argument count is 1 and first argument is a buffer, write the buffer
|
||||
* as raw data into the file without a newline; this allows exact control
|
||||
* over stdout/stderr without an additional entrypoint (useful for now).
|
||||
* Otherwise current print/alert semantics are to ToString() coerce
|
||||
* arguments, join them with a single space, and append a newline.
|
||||
*/
|
||||
|
||||
if (nargs == 1 && duk_is_buffer(ctx, 0)) {
|
||||
buf = (const duk_uint8_t *) duk_get_buffer(ctx, 0, &sz_buf);
|
||||
fwrite((const void *) buf, 1, (size_t) sz_buf, fh);
|
||||
} else {
|
||||
duk_push_string(ctx, " ");
|
||||
duk_insert(ctx, 0);
|
||||
duk_concat(ctx, nargs);
|
||||
fprintf(fh, "%s\n", duk_require_string(ctx, -1));
|
||||
}
|
||||
|
||||
#if defined(DUK_PRINT_ALERT_FLUSH)
|
||||
fflush(fh);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
/* Faster, less churn, higher footprint option. */
|
||||
static duk_ret_t duk__print_alert_helper(duk_context *ctx, FILE *fh) {
|
||||
duk_idx_t nargs;
|
||||
const duk_uint8_t *buf;
|
||||
duk_size_t sz_buf;
|
||||
const char nl = (const char) '\n';
|
||||
duk_uint8_t buf_stack[256];
|
||||
|
||||
nargs = duk_get_top(ctx);
|
||||
|
||||
/* If argument count is 1 and first argument is a buffer, write the buffer
|
||||
* as raw data into the file without a newline; this allows exact control
|
||||
* over stdout/stderr without an additional entrypoint (useful for now).
|
||||
* Otherwise current print/alert semantics are to ToString() coerce
|
||||
* arguments, join them with a single space, and append a newline.
|
||||
*/
|
||||
|
||||
if (nargs == 1 && duk_is_buffer(ctx, 0)) {
|
||||
buf = (const duk_uint8_t *) duk_get_buffer(ctx, 0, &sz_buf);
|
||||
} else if (nargs > 0) {
|
||||
duk_idx_t i;
|
||||
duk_size_t sz_str;
|
||||
const duk_uint8_t *p_str;
|
||||
duk_uint8_t *p;
|
||||
|
||||
sz_buf = (duk_size_t) nargs; /* spaces (nargs - 1) + newline */
|
||||
for (i = 0; i < nargs; i++) {
|
||||
(void) duk_to_lstring(ctx, i, &sz_str);
|
||||
sz_buf += sz_str;
|
||||
}
|
||||
|
||||
if (sz_buf <= sizeof(buf_stack)) {
|
||||
p = (duk_uint8_t *) buf_stack;
|
||||
} else {
|
||||
p = (duk_uint8_t *) duk_push_fixed_buffer(ctx, sz_buf);
|
||||
}
|
||||
|
||||
buf = (const duk_uint8_t *) p;
|
||||
for (i = 0; i < nargs; i++) {
|
||||
p_str = (const duk_uint8_t *) duk_get_lstring(ctx, i, &sz_str);
|
||||
memcpy((void *) p, (const void *) p_str, sz_str);
|
||||
p += sz_str;
|
||||
*p++ = (duk_uint8_t) (i == nargs - 1 ? '\n' : ' ');
|
||||
}
|
||||
} else {
|
||||
buf = (const duk_uint8_t *) &nl;
|
||||
sz_buf = 1;
|
||||
}
|
||||
|
||||
/* 'buf' contains the string to write, 'sz_buf' contains the length
|
||||
* (which may be zero).
|
||||
*/
|
||||
|
||||
if (sz_buf > 0) {
|
||||
fwrite((const void *) buf, 1, (size_t) sz_buf, fh);
|
||||
#if defined(DUK_PRINT_ALERT_FLUSH)
|
||||
fflush(fh);
|
||||
#endif
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static duk_ret_t duk__print(duk_context *ctx) {
|
||||
return duk__print_alert_helper(ctx, stdout);
|
||||
}
|
||||
|
||||
static duk_ret_t duk__alert(duk_context *ctx) {
|
||||
return duk__print_alert_helper(ctx, stderr);
|
||||
}
|
||||
|
||||
void duk_print_alert_init(duk_context *ctx, duk_uint_t flags) {
|
||||
(void) flags; /* unused at the moment */
|
||||
|
||||
/* XXX: use duk_def_prop_list(). */
|
||||
duk_push_global_object(ctx);
|
||||
duk_push_string(ctx, "print");
|
||||
duk_push_c_function(ctx, duk__print, DUK_VARARGS);
|
||||
duk_def_prop(ctx, -3, DUK_DEFPROP_HAVE_VALUE | DUK_DEFPROP_SET_WRITABLE | DUK_DEFPROP_SET_CONFIGURABLE);
|
||||
duk_push_string(ctx, "alert");
|
||||
duk_push_c_function(ctx, duk__alert, DUK_VARARGS);
|
||||
duk_def_prop(ctx, -3, DUK_DEFPROP_HAVE_VALUE | DUK_DEFPROP_SET_WRITABLE | DUK_DEFPROP_SET_CONFIGURABLE);
|
||||
duk_pop(ctx);
|
||||
}
|
10
vendor/gopkg.in/olebedev/go-duktape.v3/duk_print_alert.h
generated
vendored
Executable file
10
vendor/gopkg.in/olebedev/go-duktape.v3/duk_print_alert.h
generated
vendored
Executable file
@ -0,0 +1,10 @@
|
||||
#if !defined(DUK_PRINT_ALERT_H_INCLUDED)
|
||||
#define DUK_PRINT_ALERT_H_INCLUDED
|
||||
|
||||
#include "duktape.h"
|
||||
|
||||
/* No flags at the moment. */
|
||||
|
||||
extern void duk_print_alert_init(duk_context *ctx, duk_uint_t flags);
|
||||
|
||||
#endif /* DUK_PRINT_ALERT_H_INCLUDED */
|
131
vendor/gopkg.in/olebedev/go-duktape.v3/duk_v1_compat.c
generated
vendored
Executable file
131
vendor/gopkg.in/olebedev/go-duktape.v3/duk_v1_compat.c
generated
vendored
Executable file
@ -0,0 +1,131 @@
|
||||
#include <stdio.h>
|
||||
#include "duktape.h"
|
||||
#include "duk_v1_compat.h"
|
||||
|
||||
/*
|
||||
* duk_dump_context_{stdout,stderr}()
|
||||
*/
|
||||
|
||||
void duk_dump_context_stdout(duk_context *ctx) {
|
||||
duk_push_context_dump(ctx);
|
||||
fprintf(stdout, "%s\n", duk_safe_to_string(ctx, -1));
|
||||
duk_pop(ctx);
|
||||
}
|
||||
|
||||
void duk_dump_context_stderr(duk_context *ctx) {
|
||||
duk_push_context_dump(ctx);
|
||||
fprintf(stderr, "%s\n", duk_safe_to_string(ctx, -1));
|
||||
duk_pop(ctx);
|
||||
}
|
||||
|
||||
/*
|
||||
* duk_push_string_file() and duk_push_string_file_raw()
|
||||
*/
|
||||
|
||||
const char *duk_push_string_file_raw(duk_context *ctx, const char *path, duk_uint_t flags) {
|
||||
FILE *f = NULL;
|
||||
char *buf;
|
||||
long sz; /* ANSI C typing */
|
||||
|
||||
if (!path) {
|
||||
goto fail;
|
||||
}
|
||||
f = fopen(path, "rb");
|
||||
if (!f) {
|
||||
goto fail;
|
||||
}
|
||||
if (fseek(f, 0, SEEK_END) < 0) {
|
||||
goto fail;
|
||||
}
|
||||
sz = ftell(f);
|
||||
if (sz < 0) {
|
||||
goto fail;
|
||||
}
|
||||
if (fseek(f, 0, SEEK_SET) < 0) {
|
||||
goto fail;
|
||||
}
|
||||
buf = (char *) duk_push_fixed_buffer(ctx, (duk_size_t) sz);
|
||||
if ((size_t) fread(buf, 1, (size_t) sz, f) != (size_t) sz) {
|
||||
duk_pop(ctx);
|
||||
goto fail;
|
||||
}
|
||||
(void) fclose(f); /* ignore fclose() error */
|
||||
return duk_buffer_to_string(ctx, -1);
|
||||
|
||||
fail:
|
||||
if (f) {
|
||||
(void) fclose(f); /* ignore fclose() error */
|
||||
}
|
||||
|
||||
if (flags & DUK_STRING_PUSH_SAFE) {
|
||||
duk_push_undefined(ctx);
|
||||
} else {
|
||||
(void) duk_type_error(ctx, "read file error");
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* duk_eval_file(), duk_compile_file(), and their variants
|
||||
*/
|
||||
|
||||
void duk_eval_file(duk_context *ctx, const char *path) {
|
||||
duk_push_string_file_raw(ctx, path, 0);
|
||||
duk_push_string(ctx, path);
|
||||
duk_compile(ctx, DUK_COMPILE_EVAL);
|
||||
duk_push_global_object(ctx); /* 'this' binding */
|
||||
duk_call_method(ctx, 0);
|
||||
}
|
||||
|
||||
void duk_eval_file_noresult(duk_context *ctx, const char *path) {
|
||||
duk_eval_file(ctx, path);
|
||||
duk_pop(ctx);
|
||||
}
|
||||
|
||||
duk_int_t duk_peval_file(duk_context *ctx, const char *path) {
|
||||
duk_int_t rc;
|
||||
|
||||
duk_push_string_file_raw(ctx, path, DUK_STRING_PUSH_SAFE);
|
||||
duk_push_string(ctx, path);
|
||||
rc = duk_pcompile(ctx, DUK_COMPILE_EVAL);
|
||||
if (rc != 0) {
|
||||
return rc;
|
||||
}
|
||||
duk_push_global_object(ctx); /* 'this' binding */
|
||||
rc = duk_pcall_method(ctx, 0);
|
||||
return rc;
|
||||
}
|
||||
|
||||
duk_int_t duk_peval_file_noresult(duk_context *ctx, const char *path) {
|
||||
duk_int_t rc;
|
||||
|
||||
rc = duk_peval_file(ctx, path);
|
||||
duk_pop(ctx);
|
||||
return rc;
|
||||
}
|
||||
|
||||
void duk_compile_file(duk_context *ctx, duk_uint_t flags, const char *path) {
|
||||
duk_push_string_file_raw(ctx, path, 0);
|
||||
duk_push_string(ctx, path);
|
||||
duk_compile(ctx, flags);
|
||||
}
|
||||
|
||||
duk_int_t duk_pcompile_file(duk_context *ctx, duk_uint_t flags, const char *path) {
|
||||
duk_int_t rc;
|
||||
|
||||
duk_push_string_file_raw(ctx, path, DUK_STRING_PUSH_SAFE);
|
||||
duk_push_string(ctx, path);
|
||||
rc = duk_pcompile(ctx, flags);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* duk_to_defaultvalue()
|
||||
*/
|
||||
|
||||
void duk_to_defaultvalue(duk_context *ctx, duk_idx_t idx, duk_int_t hint) {
|
||||
duk_require_type_mask(ctx, idx, DUK_TYPE_MASK_OBJECT |
|
||||
DUK_TYPE_MASK_BUFFER |
|
||||
DUK_TYPE_MASK_LIGHTFUNC);
|
||||
duk_to_primitive(ctx, idx, hint);
|
||||
}
|
28
vendor/gopkg.in/olebedev/go-duktape.v3/duk_v1_compat.h
generated
vendored
Executable file
28
vendor/gopkg.in/olebedev/go-duktape.v3/duk_v1_compat.h
generated
vendored
Executable file
@ -0,0 +1,28 @@
|
||||
#if !defined(DUK_V1_COMPAT_INCLUDED)
|
||||
#define DUK_V1_COMPAT_INCLUDED
|
||||
|
||||
#include "duktape.h"
|
||||
|
||||
/* Straight flag rename */
|
||||
#if !defined(DUK_ENUM_INCLUDE_INTERNAL)
|
||||
#define DUK_ENUM_INCLUDE_INTERNAL DUK_ENUM_INCLUDE_HIDDEN
|
||||
#endif
|
||||
|
||||
/* Flags for duk_push_string_file_raw() */
|
||||
#define DUK_STRING_PUSH_SAFE (1 << 0) /* no error if file does not exist */
|
||||
|
||||
extern void duk_dump_context_stdout(duk_context *ctx);
|
||||
extern void duk_dump_context_stderr(duk_context *ctx);
|
||||
extern const char *duk_push_string_file_raw(duk_context *ctx, const char *path, duk_uint_t flags);
|
||||
extern void duk_eval_file(duk_context *ctx, const char *path);
|
||||
extern void duk_eval_file_noresult(duk_context *ctx, const char *path);
|
||||
extern duk_int_t duk_peval_file(duk_context *ctx, const char *path);
|
||||
extern duk_int_t duk_peval_file_noresult(duk_context *ctx, const char *path);
|
||||
extern void duk_compile_file(duk_context *ctx, duk_uint_t flags, const char *path);
|
||||
extern duk_int_t duk_pcompile_file(duk_context *ctx, duk_uint_t flags, const char *path);
|
||||
extern void duk_to_defaultvalue(duk_context *ctx, duk_idx_t idx, duk_int_t hint);
|
||||
|
||||
#define duk_push_string_file(ctx,path) \
|
||||
duk_push_string_file_raw((ctx), (path), 0)
|
||||
|
||||
#endif /* DUK_V1_COMPAT_INCLUDED */
|
95118
vendor/gopkg.in/olebedev/go-duktape.v3/duktape.c
generated
vendored
Executable file
95118
vendor/gopkg.in/olebedev/go-duktape.v3/duktape.c
generated
vendored
Executable file
File diff suppressed because it is too large
Load Diff
356
vendor/gopkg.in/olebedev/go-duktape.v3/duktape.go
generated
vendored
Normal file
356
vendor/gopkg.in/olebedev/go-duktape.v3/duktape.go
generated
vendored
Normal file
@ -0,0 +1,356 @@
|
||||
package duktape
|
||||
|
||||
/*
|
||||
#cgo !windows CFLAGS: -std=c99 -O3 -Wall -fomit-frame-pointer -fstrict-aliasing
|
||||
#cgo windows CFLAGS: -O3 -Wall -fomit-frame-pointer -fstrict-aliasing
|
||||
#cgo linux LDFLAGS: -lm
|
||||
#cgo freebsd LDFLAGS: -lm
|
||||
|
||||
#include "duktape.h"
|
||||
#include "duk_logging.h"
|
||||
#include "duk_print_alert.h"
|
||||
#include "duk_module_duktape.h"
|
||||
#include "duk_console.h"
|
||||
extern duk_ret_t goFunctionCall(duk_context *ctx);
|
||||
extern void goFinalizeCall(duk_context *ctx);
|
||||
*/
|
||||
import "C"
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sync"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var reFuncName = regexp.MustCompile("^[a-z_][a-z0-9_]*([A-Z_][a-z0-9_]*)*$")
|
||||
|
||||
const (
|
||||
goFunctionPtrProp = "\xff" + "goFunctionPtrProp"
|
||||
goContextPtrProp = "\xff" + "goContextPtrProp"
|
||||
)
|
||||
|
||||
type Context struct {
|
||||
*context
|
||||
}
|
||||
|
||||
// transmute replaces the value from Context with the value of pointer
|
||||
func (c *Context) transmute(p unsafe.Pointer) {
|
||||
*c = *(*Context)(p)
|
||||
}
|
||||
|
||||
// this is a pojo containing only the values of the Context
|
||||
type context struct {
|
||||
sync.Mutex
|
||||
duk_context *C.duk_context
|
||||
fnIndex *functionIndex
|
||||
timerIndex *timerIndex
|
||||
}
|
||||
|
||||
// New returns plain initialized duktape context object
|
||||
// See: http://duktape.org/api.html#duk_create_heap_default
|
||||
func New() *Context {
|
||||
d := &Context{
|
||||
&context{
|
||||
duk_context: C.duk_create_heap(nil, nil, nil, nil, nil),
|
||||
fnIndex: newFunctionIndex(),
|
||||
timerIndex: &timerIndex{},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := d.duk_context
|
||||
C.duk_logging_init(ctx, 0)
|
||||
C.duk_print_alert_init(ctx, 0)
|
||||
C.duk_module_duktape_init(ctx)
|
||||
C.duk_console_init(ctx, 0)
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
// Flags is a set of flags for controlling the behaviour of duktape.
|
||||
type Flags struct {
|
||||
Logging uint
|
||||
PrintAlert uint
|
||||
Console uint
|
||||
}
|
||||
|
||||
// FlagConsoleProxyWrapper is a Console flag.
|
||||
// Use a proxy wrapper to make undefined methods (console.foo()) no-ops.
|
||||
const FlagConsoleProxyWrapper = 1 << 0
|
||||
|
||||
// FlagConsoleFlush is a Console flag.
|
||||
// Flush output after every call.
|
||||
const FlagConsoleFlush = 1 << 1
|
||||
|
||||
// NewWithFlags returns plain initialized duktape context object
|
||||
// You can control the behaviour of duktape by setting flags.
|
||||
// See: http://duktape.org/api.html#duk_create_heap_default
|
||||
func NewWithFlags(flags *Flags) *Context {
|
||||
d := &Context{
|
||||
&context{
|
||||
duk_context: C.duk_create_heap(nil, nil, nil, nil, nil),
|
||||
fnIndex: newFunctionIndex(),
|
||||
timerIndex: &timerIndex{},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := d.duk_context
|
||||
C.duk_logging_init(ctx, C.duk_uint_t(flags.Logging))
|
||||
C.duk_print_alert_init(ctx, C.duk_uint_t(flags.PrintAlert))
|
||||
C.duk_module_duktape_init(ctx)
|
||||
C.duk_console_init(ctx, C.duk_uint_t(flags.Console))
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
func contextFromPointer(ctx *C.duk_context) *Context {
|
||||
return &Context{&context{duk_context: ctx}}
|
||||
}
|
||||
|
||||
// PushGlobalGoFunction push the given function into duktape global object
|
||||
// Returns non-negative index (relative to stack bottom) of the pushed function
|
||||
// also returns error if the function name is invalid
|
||||
func (d *Context) PushGlobalGoFunction(name string, fn func(*Context) int) (int, error) {
|
||||
if !reFuncName.MatchString(name) {
|
||||
return -1, errors.New("Malformed function name '" + name + "'")
|
||||
}
|
||||
|
||||
d.PushGlobalObject()
|
||||
idx := d.PushGoFunction(fn)
|
||||
d.PutPropString(-2, name)
|
||||
d.Pop()
|
||||
|
||||
return idx, nil
|
||||
}
|
||||
|
||||
// PushGoFunction push the given function into duktape stack, returns non-negative
|
||||
// index (relative to stack bottom) of the pushed function
|
||||
func (d *Context) PushGoFunction(fn func(*Context) int) int {
|
||||
funPtr := d.fnIndex.add(fn)
|
||||
ctxPtr := contexts.add(d)
|
||||
|
||||
idx := d.PushCFunction((*[0]byte)(C.goFunctionCall), C.DUK_VARARGS)
|
||||
d.PushCFunction((*[0]byte)(C.goFinalizeCall), 1)
|
||||
d.PushPointer(funPtr)
|
||||
d.PutPropString(-2, goFunctionPtrProp)
|
||||
d.PushPointer(ctxPtr)
|
||||
d.PutPropString(-2, goContextPtrProp)
|
||||
d.SetFinalizer(-2)
|
||||
|
||||
d.PushPointer(funPtr)
|
||||
d.PutPropString(-2, goFunctionPtrProp)
|
||||
d.PushPointer(ctxPtr)
|
||||
d.PutPropString(-2, goContextPtrProp)
|
||||
|
||||
return idx
|
||||
}
|
||||
|
||||
//export goFunctionCall
|
||||
func goFunctionCall(cCtx *C.duk_context) C.duk_ret_t {
|
||||
d := contextFromPointer(cCtx)
|
||||
|
||||
funPtr, ctx := d.getFunctionPtrs()
|
||||
d.transmute(unsafe.Pointer(ctx))
|
||||
|
||||
result := d.fnIndex.get(funPtr)(d)
|
||||
|
||||
return C.duk_ret_t(result)
|
||||
}
|
||||
|
||||
//export goFinalizeCall
|
||||
func goFinalizeCall(cCtx *C.duk_context) {
|
||||
d := contextFromPointer(cCtx)
|
||||
|
||||
funPtr, ctx := d.getFunctionPtrs()
|
||||
d.transmute(unsafe.Pointer(ctx))
|
||||
|
||||
d.fnIndex.delete(funPtr)
|
||||
}
|
||||
|
||||
func (d *Context) getFunctionPtrs() (unsafe.Pointer, *Context) {
|
||||
d.PushCurrentFunction()
|
||||
d.GetPropString(-1, goFunctionPtrProp)
|
||||
funPtr := d.GetPointer(-1)
|
||||
|
||||
d.Pop()
|
||||
|
||||
d.GetPropString(-1, goContextPtrProp)
|
||||
ctx := contexts.get(d.GetPointer(-1))
|
||||
d.Pop2()
|
||||
return funPtr, ctx
|
||||
}
|
||||
|
||||
// Destroy destroy all the references to the functions and freed the pointers
|
||||
func (d *Context) Destroy() {
|
||||
d.fnIndex.destroy()
|
||||
contexts.delete(d)
|
||||
}
|
||||
|
||||
type Error struct {
|
||||
Type string
|
||||
Message string
|
||||
FileName string
|
||||
LineNumber int
|
||||
Stack string
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("%s: %s", e.Type, e.Message)
|
||||
}
|
||||
|
||||
type Type int
|
||||
|
||||
func (t Type) IsNone() bool { return t == TypeNone }
|
||||
func (t Type) IsUndefined() bool { return t == TypeUndefined }
|
||||
func (t Type) IsNull() bool { return t == TypeNull }
|
||||
func (t Type) IsBool() bool { return t == TypeBoolean }
|
||||
func (t Type) IsNumber() bool { return t == TypeNumber }
|
||||
func (t Type) IsString() bool { return t == TypeString }
|
||||
func (t Type) IsObject() bool { return t == TypeObject }
|
||||
func (t Type) IsBuffer() bool { return t == TypeBuffer }
|
||||
func (t Type) IsPointer() bool { return t == TypePointer }
|
||||
func (t Type) IsLightFunc() bool { return t == TypeLightFunc }
|
||||
|
||||
func (t Type) String() string {
|
||||
switch t {
|
||||
case TypeNone:
|
||||
return "None"
|
||||
case TypeUndefined:
|
||||
return "Undefined"
|
||||
case TypeNull:
|
||||
return "Null"
|
||||
case TypeBoolean:
|
||||
return "Boolean"
|
||||
case TypeNumber:
|
||||
return "Number"
|
||||
case TypeString:
|
||||
return "String"
|
||||
case TypeObject:
|
||||
return "Object"
|
||||
case TypeBuffer:
|
||||
return "Buffer"
|
||||
case TypePointer:
|
||||
return "Pointer"
|
||||
case TypeLightFunc:
|
||||
return "LightFunc"
|
||||
default:
|
||||
return "Unknown"
|
||||
}
|
||||
}
|
||||
|
||||
type functionIndex struct {
|
||||
functions map[unsafe.Pointer]func(*Context) int
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
type timerIndex struct {
|
||||
c float64
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func (t *timerIndex) get() float64 {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
t.c++
|
||||
return t.c
|
||||
}
|
||||
|
||||
func newFunctionIndex() *functionIndex {
|
||||
return &functionIndex{
|
||||
functions: make(map[unsafe.Pointer]func(*Context) int, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (i *functionIndex) add(fn func(*Context) int) unsafe.Pointer {
|
||||
ptr := C.malloc(1)
|
||||
|
||||
i.Lock()
|
||||
i.functions[ptr] = fn
|
||||
i.Unlock()
|
||||
|
||||
return ptr
|
||||
}
|
||||
|
||||
func (i *functionIndex) get(ptr unsafe.Pointer) func(*Context) int {
|
||||
i.RLock()
|
||||
fn := i.functions[ptr]
|
||||
i.RUnlock()
|
||||
|
||||
return fn
|
||||
}
|
||||
|
||||
func (i *functionIndex) delete(ptr unsafe.Pointer) {
|
||||
i.Lock()
|
||||
delete(i.functions, ptr)
|
||||
i.Unlock()
|
||||
|
||||
C.free(ptr)
|
||||
}
|
||||
|
||||
func (i *functionIndex) destroy() {
|
||||
i.Lock()
|
||||
|
||||
for ptr, _ := range i.functions {
|
||||
delete(i.functions, ptr)
|
||||
C.free(ptr)
|
||||
}
|
||||
i.Unlock()
|
||||
}
|
||||
|
||||
type ctxIndex struct {
|
||||
sync.RWMutex
|
||||
ctxs map[unsafe.Pointer]*Context
|
||||
}
|
||||
|
||||
func (ci *ctxIndex) add(ctx *Context) unsafe.Pointer {
|
||||
|
||||
ci.RLock()
|
||||
for ptr, ctxPtr := range ci.ctxs {
|
||||
if ctxPtr == ctx {
|
||||
ci.RUnlock()
|
||||
return ptr
|
||||
}
|
||||
}
|
||||
ci.RUnlock()
|
||||
|
||||
ci.Lock()
|
||||
for ptr, ctxPtr := range ci.ctxs {
|
||||
if ctxPtr == ctx {
|
||||
ci.Unlock()
|
||||
return ptr
|
||||
}
|
||||
}
|
||||
ptr := C.malloc(1)
|
||||
ci.ctxs[ptr] = ctx
|
||||
ci.Unlock()
|
||||
|
||||
return ptr
|
||||
}
|
||||
|
||||
func (ci *ctxIndex) get(ptr unsafe.Pointer) *Context {
|
||||
ci.RLock()
|
||||
ctx := ci.ctxs[ptr]
|
||||
ci.RUnlock()
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (ci *ctxIndex) delete(ctx *Context) {
|
||||
ci.Lock()
|
||||
for ptr, ctxPtr := range ci.ctxs {
|
||||
if ctxPtr == ctx {
|
||||
delete(ci.ctxs, ptr)
|
||||
C.free(ptr)
|
||||
ci.Unlock()
|
||||
return
|
||||
}
|
||||
}
|
||||
panic(fmt.Sprintf("context (%p) doesn't exist", ctx))
|
||||
}
|
||||
|
||||
var contexts *ctxIndex
|
||||
|
||||
func init() {
|
||||
contexts = &ctxIndex{
|
||||
ctxs: make(map[unsafe.Pointer]*Context),
|
||||
}
|
||||
}
|
1349
vendor/gopkg.in/olebedev/go-duktape.v3/duktape.h
generated
vendored
Executable file
1349
vendor/gopkg.in/olebedev/go-duktape.v3/duktape.h
generated
vendored
Executable file
File diff suppressed because it is too large
Load Diff
136
vendor/gopkg.in/olebedev/go-duktape.v3/timers.go
generated
vendored
Normal file
136
vendor/gopkg.in/olebedev/go-duktape.v3/timers.go
generated
vendored
Normal file
@ -0,0 +1,136 @@
|
||||
package duktape
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DefineTimers defines `setTimeout`, `clearTimeout`, `setInterval`,
|
||||
// `clearInterval` into global context.
|
||||
func (d *Context) PushTimers() error {
|
||||
d.PushGlobalStash()
|
||||
// check if timers already exists
|
||||
if !d.HasPropString(-1, "timers") {
|
||||
d.PushObject()
|
||||
d.PutPropString(-2, "timers") // stash -> [ timers:{} ]
|
||||
d.Pop()
|
||||
|
||||
d.PushGlobalGoFunction("setTimeout", setTimeout)
|
||||
d.PushGlobalGoFunction("setInterval", setInterval)
|
||||
d.PushGlobalGoFunction("clearTimeout", clearTimeout)
|
||||
d.PushGlobalGoFunction("clearInterval", clearTimeout)
|
||||
return nil
|
||||
} else {
|
||||
d.Pop()
|
||||
return errors.New("Timers are already defined")
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Context) FlushTimers() {
|
||||
d.PushGlobalStash()
|
||||
d.PushObject()
|
||||
d.PutPropString(-2, "timers") // stash -> [ timers:{} ]
|
||||
d.Pop()
|
||||
}
|
||||
|
||||
func setTimeout(c *Context) int {
|
||||
id := c.pushTimer(0)
|
||||
timeout := c.ToNumber(1)
|
||||
if timeout < 1 {
|
||||
timeout = 1
|
||||
}
|
||||
go func(id float64) {
|
||||
<-time.After(time.Duration(timeout) * time.Millisecond)
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
if c.duk_context == nil {
|
||||
fmt.Println("[duktape] Warning!\nsetTimeout invokes callback after the context was destroyed.")
|
||||
return
|
||||
}
|
||||
|
||||
// check if timer still exists
|
||||
c.putTimer(id)
|
||||
if c.GetType(-1).IsObject() {
|
||||
c.Pcall(0 /* nargs */)
|
||||
}
|
||||
c.dropTimer(id)
|
||||
}(id)
|
||||
c.PushNumber(id)
|
||||
return 1
|
||||
}
|
||||
|
||||
func clearTimeout(c *Context) int {
|
||||
if c.GetType(0).IsNumber() {
|
||||
c.dropTimer(c.GetNumber(0))
|
||||
c.Pop()
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func setInterval(c *Context) int {
|
||||
id := c.pushTimer(0)
|
||||
timeout := c.ToNumber(1)
|
||||
if timeout < 1 {
|
||||
timeout = 1
|
||||
}
|
||||
go func(id float64) {
|
||||
ticker := time.NewTicker(time.Duration(timeout) * time.Millisecond)
|
||||
for _ = range ticker.C {
|
||||
c.Lock()
|
||||
// check if duktape context exists
|
||||
if c.duk_context == nil {
|
||||
c.dropTimer(id)
|
||||
c.Pop()
|
||||
ticker.Stop()
|
||||
fmt.Println("[duktape] Warning!\nsetInterval invokes callback after the context was destroyed.")
|
||||
c.Unlock()
|
||||
continue
|
||||
}
|
||||
|
||||
// check if timer still exists
|
||||
c.putTimer(id)
|
||||
if c.GetType(-1).IsObject() {
|
||||
c.Pcall(0 /* nargs */)
|
||||
c.Pop()
|
||||
} else {
|
||||
c.dropTimer(id)
|
||||
c.Pop()
|
||||
ticker.Stop()
|
||||
}
|
||||
c.Unlock()
|
||||
}
|
||||
}(id)
|
||||
c.PushNumber(id)
|
||||
return 1
|
||||
}
|
||||
|
||||
func (d *Context) pushTimer(index int) float64 {
|
||||
id := d.timerIndex.get()
|
||||
|
||||
d.PushGlobalStash()
|
||||
d.GetPropString(-1, "timers")
|
||||
d.PushNumber(id)
|
||||
d.Dup(index)
|
||||
d.PutProp(-3)
|
||||
d.Pop2()
|
||||
|
||||
return id
|
||||
}
|
||||
|
||||
func (d *Context) dropTimer(id float64) {
|
||||
d.PushGlobalStash()
|
||||
d.GetPropString(-1, "timers")
|
||||
d.PushNumber(id)
|
||||
d.DelProp(-2)
|
||||
d.Pop2()
|
||||
}
|
||||
|
||||
func (d *Context) putTimer(id float64) {
|
||||
d.PushGlobalStash() // stash -> [ ..., timers: { <id>: { func: true } } ]
|
||||
d.GetPropString(-1, "timers") // stash -> [ ..., timers: { <id>: { func: true } } }, { <id>: { func: true } ]
|
||||
d.PushNumber(id)
|
||||
d.GetProp(-2) // stash -> [ ..., timers: { <id>: { func: true } } }, { <id>: { func: true }, { func: true } ]
|
||||
d.Replace(-3)
|
||||
d.Pop()
|
||||
}
|
10
vendor/gopkg.in/olebedev/go-duktape.v3/utils.go
generated
vendored
Normal file
10
vendor/gopkg.in/olebedev/go-duktape.v3/utils.go
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
package duktape
|
||||
|
||||
// Must returns existing *Context or throw panic.
|
||||
// It is highly recommended to use Must all the time.
|
||||
func (d *Context) Must() *Context {
|
||||
if d.duk_context == nil {
|
||||
panic("[duktape] Context does not exists!\nYou cannot call any contexts methods after `DestroyHeap()` was called.")
|
||||
}
|
||||
return d
|
||||
}
|
14
vendor/gopkg.in/olebedev/go-duktape.v3/wercker.yml
generated
vendored
Normal file
14
vendor/gopkg.in/olebedev/go-duktape.v3/wercker.yml
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
box: golang
|
||||
build:
|
||||
steps:
|
||||
- setup-go-workspace
|
||||
- script:
|
||||
name: go get
|
||||
code: |
|
||||
cd $WERCKER_SOURCE_DIR
|
||||
go version
|
||||
go get gopkg.in/check.v1
|
||||
- script:
|
||||
name: go test
|
||||
code: |
|
||||
go test . -v
|
6
vendor/vendor.json
vendored
6
vendor/vendor.json
vendored
@ -693,6 +693,12 @@
|
||||
"revision": "c1b8fa8bdccecb0b8db834ee0b92fdbcfa606dd6",
|
||||
"revisionTime": "2016-06-21T03:49:01Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "YvuEbc0a032zr+BTp/YbBQojiuY=",
|
||||
"path": "gopkg.in/olebedev/go-duktape.v3",
|
||||
"revision": "9af39127cb024b355a6a0221769f6ddfe3f542e7",
|
||||
"revisionTime": "2017-12-20T12:19:14Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "4BwmmgQUhWtizsR2soXND0nqZ1I=",
|
||||
"path": "gopkg.in/sourcemap.v1",
|
||||
|
Loading…
Reference in New Issue
Block a user