forked from cerc-io/plugeth
Merge pull request #21 from openrelayxyz/feature/blockTracer
Feature/block tracer
This commit is contained in:
commit
6030087970
@ -85,3 +85,6 @@ workflows:
|
|||||||
- build_geth_push:
|
- build_geth_push:
|
||||||
requires:
|
requires:
|
||||||
- test
|
- test
|
||||||
|
filters:
|
||||||
|
tags:
|
||||||
|
only: /^v.*/
|
||||||
|
@ -41,7 +41,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
"github.com/ethereum/go-ethereum/plugins"
|
"github.com/ethereum/go-ethereum/plugins"
|
||||||
"github.com/ethereum/go-ethereum/plugins/wrappers"
|
"github.com/ethereum/go-ethereum/plugins/wrappers/backendwrapper"
|
||||||
|
|
||||||
// Force-load the tracer engines to trigger registration
|
// Force-load the tracer engines to trigger registration
|
||||||
_ "github.com/ethereum/go-ethereum/eth/tracers/js"
|
_ "github.com/ethereum/go-ethereum/eth/tracers/js"
|
||||||
@ -314,7 +314,9 @@ func prepare(ctx *cli.Context) {
|
|||||||
// It creates a default node based on the command line arguments and runs it in
|
// It creates a default node based on the command line arguments and runs it in
|
||||||
// blocking mode, waiting for it to be shut down.
|
// blocking mode, waiting for it to be shut down.
|
||||||
func geth(ctx *cli.Context) error {
|
func geth(ctx *cli.Context) error {
|
||||||
if err := plugins.Initialize(path.Join(ctx.GlobalString(utils.DataDirFlag.Name), "plugins"), ctx); err != nil { return err }
|
if err := plugins.Initialize(path.Join(ctx.GlobalString(utils.DataDirFlag.Name), "plugins"), ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
prepare(ctx)
|
prepare(ctx)
|
||||||
if !plugins.ParseFlags(ctx.Args()) {
|
if !plugins.ParseFlags(ctx.Args()) {
|
||||||
if args := ctx.Args(); len(args) > 0 {
|
if args := ctx.Args(); len(args) > 0 {
|
||||||
@ -322,7 +324,7 @@ func geth(ctx *cli.Context) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
stack, backend := makeFullNode(ctx)
|
stack, backend := makeFullNode(ctx)
|
||||||
wrapperBackend := wrappers.NewBackend(backend)
|
wrapperBackend := backendwrapper.NewBackend(backend)
|
||||||
pluginsInitializeNode(stack, wrapperBackend)
|
pluginsInitializeNode(stack, wrapperBackend)
|
||||||
if ok, err := plugins.RunSubcommand(ctx); ok {
|
if ok, err := plugins.RunSubcommand(ctx); ok {
|
||||||
stack.Close()
|
stack.Close()
|
||||||
|
@ -1,182 +1,285 @@
|
|||||||
package core
|
package core
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"math/big"
|
"math/big"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"reflect"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"time"
|
||||||
"github.com/ethereum/go-ethereum/plugins"
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/openrelayxyz/plugeth-utils/core"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/plugins"
|
||||||
|
"github.com/ethereum/go-ethereum/plugins/wrappers"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
"github.com/openrelayxyz/plugeth-utils/core"
|
||||||
)
|
)
|
||||||
|
|
||||||
func PluginPreProcessBlock(pl *plugins.PluginLoader, block *types.Block) {
|
func PluginPreProcessBlock(pl *plugins.PluginLoader, block *types.Block) {
|
||||||
fnList := pl.Lookup("PreProcessBlock", func(item interface{}) bool {
|
fnList := pl.Lookup("PreProcessBlock", func(item interface{}) bool {
|
||||||
_, ok := item.(func(core.Hash, uint64, []byte))
|
_, ok := item.(func(core.Hash, uint64, []byte))
|
||||||
return ok
|
return ok
|
||||||
})
|
})
|
||||||
encoded, _ := rlp.EncodeToBytes(block)
|
encoded, _ := rlp.EncodeToBytes(block)
|
||||||
for _, fni := range fnList {
|
for _, fni := range fnList {
|
||||||
if fn, ok := fni.(func(core.Hash, uint64, []byte)); ok {
|
if fn, ok := fni.(func(core.Hash, uint64, []byte)); ok {
|
||||||
fn(core.Hash(block.Hash()), block.NumberU64(), encoded)
|
fn(core.Hash(block.Hash()), block.NumberU64(), encoded)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func pluginPreProcessBlock(block *types.Block) {
|
func pluginPreProcessBlock(block *types.Block) {
|
||||||
if plugins.DefaultPluginLoader == nil {
|
if plugins.DefaultPluginLoader == nil {
|
||||||
log.Warn("Attempting PreProcessBlock, but default PluginLoader has not been initialized")
|
log.Warn("Attempting PreProcessBlock, but default PluginLoader has not been initialized")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
PluginPreProcessBlock(plugins.DefaultPluginLoader, block) // TODO
|
PluginPreProcessBlock(plugins.DefaultPluginLoader, block) // TODO
|
||||||
}
|
}
|
||||||
func PluginPreProcessTransaction(pl *plugins.PluginLoader, tx *types.Transaction, block *types.Block, i int) {
|
func PluginPreProcessTransaction(pl *plugins.PluginLoader, tx *types.Transaction, block *types.Block, i int) {
|
||||||
fnList := pl.Lookup("PreProcessTransaction", func(item interface{}) bool {
|
fnList := pl.Lookup("PreProcessTransaction", func(item interface{}) bool {
|
||||||
_, ok := item.(func([]byte, core.Hash, core.Hash, int))
|
_, ok := item.(func([]byte, core.Hash, core.Hash, int))
|
||||||
return ok
|
return ok
|
||||||
})
|
})
|
||||||
txBytes, _ := tx.MarshalBinary()
|
txBytes, _ := tx.MarshalBinary()
|
||||||
for _, fni := range fnList {
|
for _, fni := range fnList {
|
||||||
if fn, ok := fni.(func([]byte, core.Hash, core.Hash, int)); ok {
|
if fn, ok := fni.(func([]byte, core.Hash, core.Hash, int)); ok {
|
||||||
fn(txBytes, core.Hash(tx.Hash()), core.Hash(block.Hash()), i)
|
fn(txBytes, core.Hash(tx.Hash()), core.Hash(block.Hash()), i)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func pluginPreProcessTransaction(tx *types.Transaction, block *types.Block, i int) {
|
func pluginPreProcessTransaction(tx *types.Transaction, block *types.Block, i int) {
|
||||||
if plugins.DefaultPluginLoader == nil {
|
if plugins.DefaultPluginLoader == nil {
|
||||||
log.Warn("Attempting PreProcessTransaction, but default PluginLoader has not been initialized")
|
log.Warn("Attempting PreProcessTransaction, but default PluginLoader has not been initialized")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
PluginPreProcessTransaction(plugins.DefaultPluginLoader, tx, block, i)
|
PluginPreProcessTransaction(plugins.DefaultPluginLoader, tx, block, i)
|
||||||
}
|
}
|
||||||
func PluginBlockProcessingError(pl *plugins.PluginLoader, tx *types.Transaction, block *types.Block, err error) {
|
func PluginBlockProcessingError(pl *plugins.PluginLoader, tx *types.Transaction, block *types.Block, err error) {
|
||||||
fnList := pl.Lookup("BlockProcessingError", func(item interface{}) bool {
|
fnList := pl.Lookup("BlockProcessingError", func(item interface{}) bool {
|
||||||
_, ok := item.(func(core.Hash, core.Hash, error))
|
_, ok := item.(func(core.Hash, core.Hash, error))
|
||||||
return ok
|
return ok
|
||||||
})
|
})
|
||||||
for _, fni := range fnList {
|
for _, fni := range fnList {
|
||||||
if fn, ok := fni.(func(core.Hash, core.Hash, error)); ok {
|
if fn, ok := fni.(func(core.Hash, core.Hash, error)); ok {
|
||||||
fn(core.Hash(tx.Hash()), core.Hash(block.Hash()), err)
|
fn(core.Hash(tx.Hash()), core.Hash(block.Hash()), err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func pluginBlockProcessingError(tx *types.Transaction, block *types.Block, err error) {
|
func pluginBlockProcessingError(tx *types.Transaction, block *types.Block, err error) {
|
||||||
if plugins.DefaultPluginLoader == nil {
|
if plugins.DefaultPluginLoader == nil {
|
||||||
log.Warn("Attempting BlockProcessingError, but default PluginLoader has not been initialized")
|
log.Warn("Attempting BlockProcessingError, but default PluginLoader has not been initialized")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
PluginBlockProcessingError(plugins.DefaultPluginLoader, tx, block, err)
|
PluginBlockProcessingError(plugins.DefaultPluginLoader, tx, block, err)
|
||||||
}
|
}
|
||||||
func PluginPostProcessTransaction(pl *plugins.PluginLoader, tx *types.Transaction, block *types.Block, i int, receipt *types.Receipt) {
|
func PluginPostProcessTransaction(pl *plugins.PluginLoader, tx *types.Transaction, block *types.Block, i int, receipt *types.Receipt) {
|
||||||
fnList := pl.Lookup("PostProcessTransaction", func(item interface{}) bool {
|
fnList := pl.Lookup("PostProcessTransaction", func(item interface{}) bool {
|
||||||
_, ok := item.(func(core.Hash, core.Hash, int, []byte))
|
_, ok := item.(func(core.Hash, core.Hash, int, []byte))
|
||||||
return ok
|
return ok
|
||||||
})
|
})
|
||||||
receiptBytes, _ := json.Marshal(receipt)
|
receiptBytes, _ := json.Marshal(receipt)
|
||||||
for _, fni := range fnList {
|
for _, fni := range fnList {
|
||||||
if fn, ok := fni.(func(core.Hash, core.Hash, int, []byte)); ok {
|
if fn, ok := fni.(func(core.Hash, core.Hash, int, []byte)); ok {
|
||||||
fn(core.Hash(tx.Hash()), core.Hash(block.Hash()), i, receiptBytes)
|
fn(core.Hash(tx.Hash()), core.Hash(block.Hash()), i, receiptBytes)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func pluginPostProcessTransaction(tx *types.Transaction, block *types.Block, i int, receipt *types.Receipt) {
|
func pluginPostProcessTransaction(tx *types.Transaction, block *types.Block, i int, receipt *types.Receipt) {
|
||||||
if plugins.DefaultPluginLoader == nil {
|
if plugins.DefaultPluginLoader == nil {
|
||||||
log.Warn("Attempting PostProcessTransaction, but default PluginLoader has not been initialized")
|
log.Warn("Attempting PostProcessTransaction, but default PluginLoader has not been initialized")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
PluginPostProcessTransaction(plugins.DefaultPluginLoader, tx, block, i, receipt)
|
PluginPostProcessTransaction(plugins.DefaultPluginLoader, tx, block, i, receipt)
|
||||||
}
|
}
|
||||||
func PluginPostProcessBlock(pl *plugins.PluginLoader, block *types.Block) {
|
func PluginPostProcessBlock(pl *plugins.PluginLoader, block *types.Block) {
|
||||||
fnList := pl.Lookup("PostProcessBlock", func(item interface{}) bool {
|
fnList := pl.Lookup("PostProcessBlock", func(item interface{}) bool {
|
||||||
_, ok := item.(func(core.Hash))
|
_, ok := item.(func(core.Hash))
|
||||||
return ok
|
return ok
|
||||||
})
|
})
|
||||||
for _, fni := range fnList {
|
for _, fni := range fnList {
|
||||||
if fn, ok := fni.(func(core.Hash)); ok {
|
if fn, ok := fni.(func(core.Hash)); ok {
|
||||||
fn(core.Hash(block.Hash()))
|
fn(core.Hash(block.Hash()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func pluginPostProcessBlock(block *types.Block) {
|
func pluginPostProcessBlock(block *types.Block) {
|
||||||
if plugins.DefaultPluginLoader == nil {
|
if plugins.DefaultPluginLoader == nil {
|
||||||
log.Warn("Attempting PostProcessBlock, but default PluginLoader has not been initialized")
|
log.Warn("Attempting PostProcessBlock, but default PluginLoader has not been initialized")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
PluginPostProcessBlock(plugins.DefaultPluginLoader, block)
|
PluginPostProcessBlock(plugins.DefaultPluginLoader, block)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
func PluginNewHead(pl *plugins.PluginLoader, block *types.Block, hash common.Hash, logs []*types.Log, td *big.Int) {
|
func PluginNewHead(pl *plugins.PluginLoader, block *types.Block, hash common.Hash, logs []*types.Log, td *big.Int) {
|
||||||
fnList := pl.Lookup("NewHead", func(item interface{}) bool {
|
fnList := pl.Lookup("NewHead", func(item interface{}) bool {
|
||||||
_, ok := item.(func([]byte, core.Hash, [][]byte, *big.Int))
|
_, ok := item.(func([]byte, core.Hash, [][]byte, *big.Int))
|
||||||
return ok
|
return ok
|
||||||
})
|
})
|
||||||
blockBytes, _ := rlp.EncodeToBytes(block)
|
blockBytes, _ := rlp.EncodeToBytes(block)
|
||||||
logBytes := make([][]byte, len(logs))
|
logBytes := make([][]byte, len(logs))
|
||||||
for i, l := range logs {
|
for i, l := range logs {
|
||||||
logBytes[i], _ = rlp.EncodeToBytes(l)
|
logBytes[i], _ = rlp.EncodeToBytes(l)
|
||||||
}
|
}
|
||||||
for _, fni := range fnList {
|
for _, fni := range fnList {
|
||||||
if fn, ok := fni.(func([]byte, core.Hash, [][]byte, *big.Int)); ok {
|
if fn, ok := fni.(func([]byte, core.Hash, [][]byte, *big.Int)); ok {
|
||||||
fn(blockBytes, core.Hash(hash), logBytes, td)
|
fn(blockBytes, core.Hash(hash), logBytes, td)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func pluginNewHead(block *types.Block, hash common.Hash, logs []*types.Log, td *big.Int) {
|
func pluginNewHead(block *types.Block, hash common.Hash, logs []*types.Log, td *big.Int) {
|
||||||
if plugins.DefaultPluginLoader == nil {
|
if plugins.DefaultPluginLoader == nil {
|
||||||
log.Warn("Attempting NewHead, but default PluginLoader has not been initialized")
|
log.Warn("Attempting NewHead, but default PluginLoader has not been initialized")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
PluginNewHead(plugins.DefaultPluginLoader, block, hash, logs, td)
|
PluginNewHead(plugins.DefaultPluginLoader, block, hash, logs, td)
|
||||||
}
|
}
|
||||||
|
|
||||||
func PluginNewSideBlock(pl *plugins.PluginLoader, block *types.Block, hash common.Hash, logs []*types.Log) {
|
func PluginNewSideBlock(pl *plugins.PluginLoader, block *types.Block, hash common.Hash, logs []*types.Log) {
|
||||||
fnList := pl.Lookup("NewSideBlock", func(item interface{}) bool {
|
fnList := pl.Lookup("NewSideBlock", func(item interface{}) bool {
|
||||||
_, ok := item.(func([]byte, core.Hash, [][]byte))
|
_, ok := item.(func([]byte, core.Hash, [][]byte))
|
||||||
return ok
|
return ok
|
||||||
})
|
})
|
||||||
blockBytes, _ := rlp.EncodeToBytes(block)
|
blockBytes, _ := rlp.EncodeToBytes(block)
|
||||||
logBytes := make([][]byte, len(logs))
|
logBytes := make([][]byte, len(logs))
|
||||||
for i, l := range logs {
|
for i, l := range logs {
|
||||||
logBytes[i], _ = rlp.EncodeToBytes(l)
|
logBytes[i], _ = rlp.EncodeToBytes(l)
|
||||||
}
|
}
|
||||||
for _, fni := range fnList {
|
for _, fni := range fnList {
|
||||||
if fn, ok := fni.(func([]byte, core.Hash, [][]byte)); ok {
|
if fn, ok := fni.(func([]byte, core.Hash, [][]byte)); ok {
|
||||||
fn(blockBytes, core.Hash(hash), logBytes)
|
fn(blockBytes, core.Hash(hash), logBytes)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func pluginNewSideBlock(block *types.Block, hash common.Hash, logs []*types.Log) {
|
func pluginNewSideBlock(block *types.Block, hash common.Hash, logs []*types.Log) {
|
||||||
if plugins.DefaultPluginLoader == nil {
|
if plugins.DefaultPluginLoader == nil {
|
||||||
log.Warn("Attempting NewSideBlock, but default PluginLoader has not been initialized")
|
log.Warn("Attempting NewSideBlock, but default PluginLoader has not been initialized")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
PluginNewSideBlock(plugins.DefaultPluginLoader, block, hash, logs)
|
PluginNewSideBlock(plugins.DefaultPluginLoader, block, hash, logs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func PluginReorg(pl *plugins.PluginLoader, commonBlock *types.Block, oldChain, newChain types.Blocks) {
|
func PluginReorg(pl *plugins.PluginLoader, commonBlock *types.Block, oldChain, newChain types.Blocks) {
|
||||||
fnList := pl.Lookup("Reorg", func(item interface{}) bool {
|
fnList := pl.Lookup("Reorg", func(item interface{}) bool {
|
||||||
_, ok := item.(func(core.Hash, []core.Hash, []core.Hash))
|
_, ok := item.(func(core.Hash, []core.Hash, []core.Hash))
|
||||||
return ok
|
return ok
|
||||||
})
|
})
|
||||||
oldChainHashes := make([]core.Hash, len(oldChain))
|
oldChainHashes := make([]core.Hash, len(oldChain))
|
||||||
for i, block := range oldChain {
|
for i, block := range oldChain {
|
||||||
oldChainHashes[i] = core.Hash(block.Hash())
|
oldChainHashes[i] = core.Hash(block.Hash())
|
||||||
}
|
}
|
||||||
newChainHashes := make([]core.Hash, len(newChain))
|
newChainHashes := make([]core.Hash, len(newChain))
|
||||||
for i, block := range newChain {
|
for i, block := range newChain {
|
||||||
newChainHashes[i] = core.Hash(block.Hash())
|
newChainHashes[i] = core.Hash(block.Hash())
|
||||||
}
|
}
|
||||||
for _, fni := range fnList {
|
for _, fni := range fnList {
|
||||||
if fn, ok := fni.(func(core.Hash, []core.Hash, []core.Hash)); ok {
|
if fn, ok := fni.(func(core.Hash, []core.Hash, []core.Hash)); ok {
|
||||||
fn(core.Hash(commonBlock.Hash()), oldChainHashes, newChainHashes)
|
fn(core.Hash(commonBlock.Hash()), oldChainHashes, newChainHashes)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func pluginReorg(commonBlock *types.Block, oldChain, newChain types.Blocks) {
|
func pluginReorg(commonBlock *types.Block, oldChain, newChain types.Blocks) {
|
||||||
if plugins.DefaultPluginLoader == nil {
|
if plugins.DefaultPluginLoader == nil {
|
||||||
log.Warn("Attempting Reorg, but default PluginLoader has not been initialized")
|
log.Warn("Attempting Reorg, but default PluginLoader has not been initialized")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
PluginReorg(plugins.DefaultPluginLoader, commonBlock, oldChain, newChain)
|
PluginReorg(plugins.DefaultPluginLoader, commonBlock, oldChain, newChain)
|
||||||
|
}
|
||||||
|
|
||||||
|
type metaTracer struct {
|
||||||
|
tracers []core.BlockTracer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mt *metaTracer) PreProcessBlock(block *types.Block) {
|
||||||
|
blockHash := core.Hash(block.Hash())
|
||||||
|
blockNumber := block.NumberU64()
|
||||||
|
encoded, _ := rlp.EncodeToBytes(block)
|
||||||
|
for _, tracer := range mt.tracers {
|
||||||
|
tracer.PreProcessBlock(blockHash, blockNumber, encoded)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (mt *metaTracer) PreProcessTransaction(tx *types.Transaction, block *types.Block, i int) {
|
||||||
|
blockHash := core.Hash(block.Hash())
|
||||||
|
transactionHash := core.Hash(tx.Hash())
|
||||||
|
for _, tracer := range mt.tracers {
|
||||||
|
tracer.PreProcessTransaction(transactionHash, blockHash, i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (mt *metaTracer) BlockProcessingError(tx *types.Transaction, block *types.Block, err error) {
|
||||||
|
blockHash := core.Hash(block.Hash())
|
||||||
|
transactionHash := core.Hash(tx.Hash())
|
||||||
|
for _, tracer := range mt.tracers {
|
||||||
|
tracer.BlockProcessingError(transactionHash, blockHash, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (mt *metaTracer) PostProcessTransaction(tx *types.Transaction, block *types.Block, i int, receipt *types.Receipt) {
|
||||||
|
blockHash := core.Hash(block.Hash())
|
||||||
|
transactionHash := core.Hash(tx.Hash())
|
||||||
|
receiptBytes, _ := json.Marshal(receipt)
|
||||||
|
for _, tracer := range mt.tracers {
|
||||||
|
tracer.PostProcessTransaction(transactionHash, blockHash, i, receiptBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (mt *metaTracer) PostProcessBlock(block *types.Block) {
|
||||||
|
blockHash := core.Hash(block.Hash())
|
||||||
|
for _, tracer := range mt.tracers {
|
||||||
|
tracer.PostProcessBlock(blockHash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (mt *metaTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) {
|
||||||
|
for _, tracer := range mt.tracers {
|
||||||
|
tracer.CaptureStart(core.Address(from), core.Address(to), create, input, gas, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (mt *metaTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) {
|
||||||
|
for _, tracer := range mt.tracers {
|
||||||
|
tracer.CaptureState(pc, core.OpCode(op), gas, cost, wrappers.NewWrappedScopeContext(scope), rData, depth, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (mt *metaTracer) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) {
|
||||||
|
for _, tracer := range mt.tracers {
|
||||||
|
tracer.CaptureFault(pc, core.OpCode(op), gas, cost, wrappers.NewWrappedScopeContext(scope), depth, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (mt *metaTracer) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) {
|
||||||
|
for _, tracer := range mt.tracers {
|
||||||
|
tracer.CaptureEnd(output, gasUsed, t, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mt *metaTracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
|
||||||
|
for _, tracer := range mt.tracers {
|
||||||
|
tracer.CaptureEnter(core.OpCode(typ), core.Address(from), core.Address(to), input, gas, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mt *metaTracer) CaptureExit(output []byte, gasUsed uint64, err error) {
|
||||||
|
for _, tracer := range mt.tracers {
|
||||||
|
tracer.CaptureExit(output, gasUsed, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func PluginGetBlockTracer(pl *plugins.PluginLoader, hash common.Hash, statedb *state.StateDB) *metaTracer {
|
||||||
|
//look for a function that takes whatever the ctx provides and statedb and returns a core.blocktracer append into meta tracer
|
||||||
|
tracerList := plugins.Lookup("GetLiveTracer", func(item interface{}) bool {
|
||||||
|
_, ok := item.(func(core.Hash, core.StateDB) core.BlockTracer)
|
||||||
|
log.Info("Item is LiveTracer", "ok", ok, "type", reflect.TypeOf(item))
|
||||||
|
return ok
|
||||||
|
})
|
||||||
|
mt := &metaTracer{tracers: []core.BlockTracer{}}
|
||||||
|
for _, tracer := range tracerList {
|
||||||
|
if v, ok := tracer.(func(core.Hash, core.StateDB) core.BlockTracer); ok {
|
||||||
|
bt := v(core.Hash(hash), wrappers.NewWrappedStateDB(statedb))
|
||||||
|
if bt != nil {
|
||||||
|
mt.tracers = append(mt.tracers, bt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return mt
|
||||||
|
}
|
||||||
|
func pluginGetBlockTracer(hash common.Hash, statedb *state.StateDB) *metaTracer {
|
||||||
|
if plugins.DefaultPluginLoader == nil {
|
||||||
|
log.Warn("Attempting GetBlockTracer, but default PluginLoader has not been initialized")
|
||||||
|
return &metaTracer{}
|
||||||
|
}
|
||||||
|
return PluginGetBlockTracer(plugins.DefaultPluginLoader, hash, statedb)
|
||||||
}
|
}
|
||||||
|
@ -71,29 +71,38 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
|
|||||||
misc.ApplyDAOHardFork(statedb)
|
misc.ApplyDAOHardFork(statedb)
|
||||||
}
|
}
|
||||||
blockContext := NewEVMBlockContext(header, p.bc, nil)
|
blockContext := NewEVMBlockContext(header, p.bc, nil)
|
||||||
|
blockTracer := pluginGetBlockTracer(header.Hash(), statedb)
|
||||||
|
cfg.Tracer = blockTracer
|
||||||
|
cfg.Debug = true
|
||||||
vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, p.config, cfg)
|
vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, p.config, cfg)
|
||||||
// Iterate over and process the individual transactions
|
// Iterate over and process the individual transactions
|
||||||
pluginPreProcessBlock(block)
|
pluginPreProcessBlock(block)
|
||||||
|
blockTracer.PreProcessBlock(block)
|
||||||
for i, tx := range block.Transactions() {
|
for i, tx := range block.Transactions() {
|
||||||
msg, err := tx.AsMessage(types.MakeSigner(p.config, header.Number), header.BaseFee)
|
msg, err := tx.AsMessage(types.MakeSigner(p.config, header.Number), header.BaseFee)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
pluginBlockProcessingError(tx, block, err)
|
pluginBlockProcessingError(tx, block, err)
|
||||||
|
blockTracer.BlockProcessingError(tx, block, err)
|
||||||
return nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err)
|
return nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err)
|
||||||
}
|
}
|
||||||
statedb.Prepare(tx.Hash(), i)
|
statedb.Prepare(tx.Hash(), i)
|
||||||
pluginPreProcessTransaction(tx, block, i)
|
pluginPreProcessTransaction(tx, block, i)
|
||||||
|
blockTracer.PreProcessTransaction(tx, block, i)
|
||||||
receipt, err := applyTransaction(msg, p.config, p.bc, nil, gp, statedb, blockNumber, blockHash, tx, usedGas, vmenv)
|
receipt, err := applyTransaction(msg, p.config, p.bc, nil, gp, statedb, blockNumber, blockHash, tx, usedGas, vmenv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
pluginBlockProcessingError(tx, block, err)
|
pluginBlockProcessingError(tx, block, err)
|
||||||
|
blockTracer.BlockProcessingError(tx, block, err)
|
||||||
return nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err)
|
return nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err)
|
||||||
}
|
}
|
||||||
pluginPostProcessTransaction(tx, block, i, receipt)
|
pluginPostProcessTransaction(tx, block, i, receipt)
|
||||||
|
blockTracer.PostProcessTransaction(tx, block, i, receipt)
|
||||||
receipts = append(receipts, receipt)
|
receipts = append(receipts, receipt)
|
||||||
allLogs = append(allLogs, receipt.Logs...)
|
allLogs = append(allLogs, receipt.Logs...)
|
||||||
}
|
}
|
||||||
// Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
|
// Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
|
||||||
p.engine.Finalize(p.bc, header, statedb, block.Transactions(), block.Uncles())
|
p.engine.Finalize(p.bc, header, statedb, block.Transactions(), block.Uncles())
|
||||||
pluginPostProcessBlock(block)
|
pluginPostProcessBlock(block)
|
||||||
|
blockTracer.PostProcessBlock(block)
|
||||||
return receipts, allLogs, *usedGas, nil
|
return receipts, allLogs, *usedGas, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -188,7 +188,6 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
|
|||||||
Preimages: config.Preimages,
|
Preimages: config.Preimages,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
pluginUpdateBlockchainVMConfig(&vmConfig)
|
|
||||||
eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, chainConfig, eth.engine, vmConfig, eth.shouldPreserve, &config.TxLookupLimit)
|
eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, chainConfig, eth.engine, vmConfig, eth.shouldPreserve, &config.TxLookupLimit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -1,81 +0,0 @@
|
|||||||
package eth
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math/big"
|
|
||||||
"reflect"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
|
||||||
"github.com/ethereum/go-ethereum/plugins"
|
|
||||||
"github.com/ethereum/go-ethereum/plugins/wrappers"
|
|
||||||
"github.com/openrelayxyz/plugeth-utils/core"
|
|
||||||
)
|
|
||||||
|
|
||||||
type metaTracer struct {
|
|
||||||
tracers []core.TracerResult
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mt *metaTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) {
|
|
||||||
for _, tracer := range mt.tracers {
|
|
||||||
tracer.CaptureStart(core.Address(from), core.Address(to), create, input, gas, value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func (mt *metaTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) {
|
|
||||||
for _, tracer := range mt.tracers {
|
|
||||||
tracer.CaptureState(pc, core.OpCode(op), gas, cost, wrappers.NewWrappedScopeContext(scope), rData, depth, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func (mt *metaTracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
|
|
||||||
for _, tracer := range mt.tracers {
|
|
||||||
tracer.CaptureEnter(core.OpCode(typ), core.Address(from), core.Address(to), input, gas, value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func (mt *metaTracer) CaptureExit(output []byte, gasUsed uint64, err error) {
|
|
||||||
for _, tracer := range mt.tracers {
|
|
||||||
tracer.CaptureExit(output, gasUsed, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func (mt *metaTracer) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) {
|
|
||||||
for _, tracer := range mt.tracers {
|
|
||||||
tracer.CaptureFault(pc, core.OpCode(op), gas, cost, wrappers.NewWrappedScopeContext(scope), depth, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func (mt *metaTracer) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) {
|
|
||||||
for _, tracer := range mt.tracers {
|
|
||||||
tracer.CaptureEnd(output, gasUsed, t, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func PluginUpdateBlockchainVMConfig(pl *plugins.PluginLoader, cfg *vm.Config) {
|
|
||||||
tracerList := plugins.Lookup("LiveTracer", func(item interface{}) bool {
|
|
||||||
_, ok := item.(core.TracerResult)
|
|
||||||
log.Info("Item is LiveTracer", "ok", ok, "type", reflect.TypeOf(item))
|
|
||||||
return ok
|
|
||||||
})
|
|
||||||
if len(tracerList) > 0 {
|
|
||||||
mt := &metaTracer{tracers: []core.TracerResult{}}
|
|
||||||
for _, tracer := range tracerList {
|
|
||||||
if v, ok := tracer.(core.TracerResult); ok {
|
|
||||||
log.Info("LiveTracer registered")
|
|
||||||
mt.tracers = append(mt.tracers, v)
|
|
||||||
} else {
|
|
||||||
log.Info("Item is not tracer")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cfg.Debug = true
|
|
||||||
cfg.Tracer = mt
|
|
||||||
} else {
|
|
||||||
log.Warn("Module is not tracer")
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func pluginUpdateBlockchainVMConfig(cfg *vm.Config) {
|
|
||||||
if plugins.DefaultPluginLoader == nil {
|
|
||||||
log.Warn("Attempting CreateConsensusEngine, but default PluginLoader has not been initialized")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
PluginUpdateBlockchainVMConfig(plugins.DefaultPluginLoader, cfg)
|
|
||||||
}
|
|
2
go.mod
2
go.mod
@ -51,7 +51,7 @@ require (
|
|||||||
github.com/naoina/go-stringutil v0.1.0 // indirect
|
github.com/naoina/go-stringutil v0.1.0 // indirect
|
||||||
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416
|
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416
|
||||||
github.com/olekukonko/tablewriter v0.0.5
|
github.com/olekukonko/tablewriter v0.0.5
|
||||||
github.com/openrelayxyz/plugeth-utils v0.0.9
|
github.com/openrelayxyz/plugeth-utils v0.0.10
|
||||||
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7
|
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7
|
||||||
github.com/prometheus/tsdb v0.7.1
|
github.com/prometheus/tsdb v0.7.1
|
||||||
github.com/rjeczalik/notify v0.9.1
|
github.com/rjeczalik/notify v0.9.1
|
||||||
|
2
go.sum
2
go.sum
@ -341,6 +341,8 @@ github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
|
|||||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||||
github.com/openrelayxyz/plugeth-utils v0.0.9 h1:Rz0nwzirHSpGa6TfTuPl9/0W0TSN5vsYW+DtQH8QLIc=
|
github.com/openrelayxyz/plugeth-utils v0.0.9 h1:Rz0nwzirHSpGa6TfTuPl9/0W0TSN5vsYW+DtQH8QLIc=
|
||||||
github.com/openrelayxyz/plugeth-utils v0.0.9/go.mod h1:Lv47unyKJ3b/PVbVAt9Uk+RQmpdrzDOsjSCPhAMQAps=
|
github.com/openrelayxyz/plugeth-utils v0.0.9/go.mod h1:Lv47unyKJ3b/PVbVAt9Uk+RQmpdrzDOsjSCPhAMQAps=
|
||||||
|
github.com/openrelayxyz/plugeth-utils v0.0.10 h1:Aw1wiQUepHH9yytOM8+RlSj9Z3OU+OsegoPym7SLdic=
|
||||||
|
github.com/openrelayxyz/plugeth-utils v0.0.10/go.mod h1:Lv47unyKJ3b/PVbVAt9Uk+RQmpdrzDOsjSCPhAMQAps=
|
||||||
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||||
github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||||
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
|
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
|
||||||
|
422
plugins/wrappers/backendwrapper/backendwrapper.go
Normal file
422
plugins/wrappers/backendwrapper/backendwrapper.go
Normal file
@ -0,0 +1,422 @@
|
|||||||
|
package backendwrapper
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
"reflect"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum"
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
gcore "github.com/ethereum/go-ethereum/core"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/event"
|
||||||
|
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
|
"github.com/openrelayxyz/plugeth-utils/core"
|
||||||
|
"github.com/openrelayxyz/plugeth-utils/restricted"
|
||||||
|
"github.com/openrelayxyz/plugeth-utils/restricted/params"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Backend struct {
|
||||||
|
b ethapi.Backend
|
||||||
|
newTxsFeed event.Feed
|
||||||
|
newTxsOnce sync.Once
|
||||||
|
chainFeed event.Feed
|
||||||
|
chainOnce sync.Once
|
||||||
|
chainHeadFeed event.Feed
|
||||||
|
chainHeadOnce sync.Once
|
||||||
|
chainSideFeed event.Feed
|
||||||
|
chainSideOnce sync.Once
|
||||||
|
logsFeed event.Feed
|
||||||
|
logsOnce sync.Once
|
||||||
|
pendingLogsFeed event.Feed
|
||||||
|
pendingLogsOnce sync.Once
|
||||||
|
removedLogsFeed event.Feed
|
||||||
|
removedLogsOnce sync.Once
|
||||||
|
chainConfig *params.ChainConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewBackend(b ethapi.Backend) *Backend {
|
||||||
|
return &Backend{b: b}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Backend) SuggestGasTipCap(ctx context.Context) (*big.Int, error) {
|
||||||
|
return b.b.SuggestGasTipCap(ctx)
|
||||||
|
}
|
||||||
|
func (b *Backend) ChainDb() restricted.Database {
|
||||||
|
return &dbWrapper{b.b.ChainDb()}
|
||||||
|
}
|
||||||
|
func (b *Backend) ExtRPCEnabled() bool {
|
||||||
|
return b.b.ExtRPCEnabled()
|
||||||
|
}
|
||||||
|
func (b *Backend) RPCGasCap() uint64 {
|
||||||
|
return b.b.RPCGasCap()
|
||||||
|
}
|
||||||
|
func (b *Backend) RPCTxFeeCap() float64 {
|
||||||
|
return b.b.RPCTxFeeCap()
|
||||||
|
}
|
||||||
|
func (b *Backend) UnprotectedAllowed() bool {
|
||||||
|
return b.b.UnprotectedAllowed()
|
||||||
|
}
|
||||||
|
func (b *Backend) SetHead(number uint64) {
|
||||||
|
b.b.SetHead(number)
|
||||||
|
}
|
||||||
|
func (b *Backend) HeaderByNumber(ctx context.Context, number int64) ([]byte, error) {
|
||||||
|
header, err := b.b.HeaderByNumber(ctx, rpc.BlockNumber(number))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return rlp.EncodeToBytes(header)
|
||||||
|
}
|
||||||
|
func (b *Backend) HeaderByHash(ctx context.Context, hash core.Hash) ([]byte, error) {
|
||||||
|
header, err := b.b.HeaderByHash(ctx, common.Hash(hash))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return rlp.EncodeToBytes(header)
|
||||||
|
}
|
||||||
|
func (b *Backend) CurrentHeader() []byte {
|
||||||
|
ret, _ := rlp.EncodeToBytes(b.b.CurrentHeader())
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
func (b *Backend) CurrentBlock() []byte {
|
||||||
|
ret, _ := rlp.EncodeToBytes(b.b.CurrentBlock())
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
func (b *Backend) BlockByNumber(ctx context.Context, number int64) ([]byte, error) {
|
||||||
|
block, err := b.b.BlockByNumber(ctx, rpc.BlockNumber(number))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return rlp.EncodeToBytes(block)
|
||||||
|
}
|
||||||
|
func (b *Backend) BlockByHash(ctx context.Context, hash core.Hash) ([]byte, error) {
|
||||||
|
block, err := b.b.BlockByHash(ctx, common.Hash(hash))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return rlp.EncodeToBytes(block)
|
||||||
|
}
|
||||||
|
func (b *Backend) GetReceipts(ctx context.Context, hash core.Hash) ([]byte, error) {
|
||||||
|
receipts, err := b.b.GetReceipts(ctx, common.Hash(hash))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return json.Marshal(receipts)
|
||||||
|
}
|
||||||
|
func (b *Backend) GetTd(ctx context.Context, hash core.Hash) *big.Int {
|
||||||
|
return b.b.GetTd(ctx, common.Hash(hash))
|
||||||
|
}
|
||||||
|
func (b *Backend) SendTx(ctx context.Context, signedTx []byte) error {
|
||||||
|
tx := new(types.Transaction)
|
||||||
|
if err := tx.UnmarshalBinary(signedTx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return b.b.SendTx(ctx, tx)
|
||||||
|
}
|
||||||
|
func (b *Backend) GetTransaction(ctx context.Context, txHash core.Hash) ([]byte, core.Hash, uint64, uint64, error) { // RLP Encoded transaction {
|
||||||
|
tx, blockHash, blockNumber, index, err := b.b.GetTransaction(ctx, common.Hash(txHash))
|
||||||
|
if err != nil {
|
||||||
|
return nil, core.Hash(blockHash), blockNumber, index, err
|
||||||
|
}
|
||||||
|
enc, err := tx.MarshalBinary()
|
||||||
|
return enc, core.Hash(blockHash), blockNumber, index, err
|
||||||
|
}
|
||||||
|
func (b *Backend) GetPoolTransactions() ([][]byte, error) {
|
||||||
|
txs, err := b.b.GetPoolTransactions()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
results := make([][]byte, len(txs))
|
||||||
|
for i, tx := range txs {
|
||||||
|
results[i], _ = rlp.EncodeToBytes(tx)
|
||||||
|
}
|
||||||
|
return results, nil
|
||||||
|
}
|
||||||
|
func (b *Backend) GetPoolTransaction(txHash core.Hash) []byte {
|
||||||
|
tx := b.b.GetPoolTransaction(common.Hash(txHash))
|
||||||
|
if tx == nil {
|
||||||
|
return []byte{}
|
||||||
|
}
|
||||||
|
enc, _ := rlp.EncodeToBytes(tx)
|
||||||
|
return enc
|
||||||
|
}
|
||||||
|
func (b *Backend) GetPoolNonce(ctx context.Context, addr core.Address) (uint64, error) {
|
||||||
|
return b.b.GetPoolNonce(ctx, common.Address(addr))
|
||||||
|
}
|
||||||
|
func (b *Backend) Stats() (pending int, queued int) {
|
||||||
|
return b.b.Stats()
|
||||||
|
}
|
||||||
|
func (b *Backend) TxPoolContent() (map[core.Address][][]byte, map[core.Address][][]byte) {
|
||||||
|
pending, queued := b.b.TxPoolContent()
|
||||||
|
trpending, trqueued := make(map[core.Address][][]byte), make(map[core.Address][][]byte)
|
||||||
|
for k, v := range pending {
|
||||||
|
trpending[core.Address(k)] = make([][]byte, len(v))
|
||||||
|
for i, tx := range v {
|
||||||
|
trpending[core.Address(k)][i], _ = tx.MarshalBinary()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for k, v := range queued {
|
||||||
|
trqueued[core.Address(k)] = make([][]byte, len(v))
|
||||||
|
for i, tx := range v {
|
||||||
|
trpending[core.Address(k)][i], _ = tx.MarshalBinary()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return trpending, trqueued
|
||||||
|
} // RLP encoded transactions
|
||||||
|
func (b *Backend) BloomStatus() (uint64, uint64) {
|
||||||
|
return b.b.BloomStatus()
|
||||||
|
}
|
||||||
|
func (b *Backend) GetLogs(ctx context.Context, blockHash core.Hash) ([][]byte, error) {
|
||||||
|
logs, err := b.b.GetLogs(ctx, common.Hash(blockHash))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
encLogs := make([][]byte, len(logs))
|
||||||
|
for i, log := range logs {
|
||||||
|
encLogs[i], _ = rlp.EncodeToBytes(log)
|
||||||
|
}
|
||||||
|
return encLogs, nil
|
||||||
|
} // []RLP encoded logs
|
||||||
|
|
||||||
|
type dli interface {
|
||||||
|
SyncProgress() ethereum.SyncProgress
|
||||||
|
}
|
||||||
|
|
||||||
|
type dl struct {
|
||||||
|
dl dli
|
||||||
|
}
|
||||||
|
|
||||||
|
type progress struct {
|
||||||
|
p ethereum.SyncProgress
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *progress) StartingBlock() uint64 {
|
||||||
|
return p.p.StartingBlock
|
||||||
|
}
|
||||||
|
func (p *progress) CurrentBlock() uint64 {
|
||||||
|
return p.p.CurrentBlock
|
||||||
|
}
|
||||||
|
func (p *progress) HighestBlock() uint64 {
|
||||||
|
return p.p.HighestBlock
|
||||||
|
}
|
||||||
|
func (p *progress) PulledStates() uint64 {
|
||||||
|
return p.p.PulledStates
|
||||||
|
}
|
||||||
|
func (p *progress) KnownStates() uint64 {
|
||||||
|
return p.p.KnownStates
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *dl) Progress() core.Progress {
|
||||||
|
return &progress{d.dl.SyncProgress()}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Backend) Downloader() core.Downloader {
|
||||||
|
return &dl{b.b}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Backend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) core.Subscription {
|
||||||
|
var sub event.Subscription
|
||||||
|
b.newTxsOnce.Do(func() {
|
||||||
|
bch := make(chan gcore.NewTxsEvent, 100)
|
||||||
|
sub = b.b.SubscribeNewTxsEvent(bch)
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case item := <-bch:
|
||||||
|
txe := core.NewTxsEvent{
|
||||||
|
Txs: make([][]byte, len(item.Txs)),
|
||||||
|
}
|
||||||
|
for i, tx := range item.Txs {
|
||||||
|
txe.Txs[i], _ = tx.MarshalBinary()
|
||||||
|
}
|
||||||
|
b.newTxsFeed.Send(txe)
|
||||||
|
case err := <-sub.Err():
|
||||||
|
log.Warn("Subscription error for NewTxs", "err", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
})
|
||||||
|
return b.newTxsFeed.Subscribe(ch)
|
||||||
|
}
|
||||||
|
func (b *Backend) SubscribeChainEvent(ch chan<- core.ChainEvent) core.Subscription {
|
||||||
|
var sub event.Subscription
|
||||||
|
b.chainOnce.Do(func() {
|
||||||
|
bch := make(chan gcore.ChainEvent, 100)
|
||||||
|
sub = b.b.SubscribeChainEvent(bch)
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case item := <-bch:
|
||||||
|
ce := core.ChainEvent{
|
||||||
|
Hash: core.Hash(item.Hash),
|
||||||
|
}
|
||||||
|
ce.Block, _ = rlp.EncodeToBytes(item.Block)
|
||||||
|
ce.Logs, _ = rlp.EncodeToBytes(item.Logs)
|
||||||
|
b.chainFeed.Send(ce)
|
||||||
|
case err := <-sub.Err():
|
||||||
|
log.Warn("Subscription error for Chain", "err", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
})
|
||||||
|
return b.chainFeed.Subscribe(ch)
|
||||||
|
}
|
||||||
|
func (b *Backend) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) core.Subscription {
|
||||||
|
var sub event.Subscription
|
||||||
|
b.chainHeadOnce.Do(func() {
|
||||||
|
bch := make(chan gcore.ChainHeadEvent, 100)
|
||||||
|
sub = b.b.SubscribeChainHeadEvent(bch)
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case item := <-bch:
|
||||||
|
che := core.ChainHeadEvent{}
|
||||||
|
che.Block, _ = rlp.EncodeToBytes(item.Block)
|
||||||
|
b.chainHeadFeed.Send(che)
|
||||||
|
case err := <-sub.Err():
|
||||||
|
log.Warn("Subscription error for ChainHead", "err", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
})
|
||||||
|
return b.chainHeadFeed.Subscribe(ch)
|
||||||
|
}
|
||||||
|
func (b *Backend) SubscribeChainSideEvent(ch chan<- core.ChainSideEvent) core.Subscription {
|
||||||
|
var sub event.Subscription
|
||||||
|
b.chainSideOnce.Do(func() {
|
||||||
|
bch := make(chan gcore.ChainSideEvent, 100)
|
||||||
|
sub = b.b.SubscribeChainSideEvent(bch)
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case item := <-bch:
|
||||||
|
cse := core.ChainSideEvent{}
|
||||||
|
cse.Block, _ = rlp.EncodeToBytes(item.Block)
|
||||||
|
b.chainSideFeed.Send(cse)
|
||||||
|
case err := <-sub.Err():
|
||||||
|
log.Warn("Subscription error for ChainSide", "err", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
})
|
||||||
|
return b.chainSideFeed.Subscribe(ch)
|
||||||
|
}
|
||||||
|
func (b *Backend) SubscribeLogsEvent(ch chan<- [][]byte) core.Subscription {
|
||||||
|
var sub event.Subscription
|
||||||
|
b.logsOnce.Do(func() {
|
||||||
|
bch := make(chan []*types.Log, 100)
|
||||||
|
sub = b.b.SubscribeLogsEvent(bch)
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case item := <-bch:
|
||||||
|
logs := make([][]byte, len(item))
|
||||||
|
for i, log := range item {
|
||||||
|
logs[i], _ = rlp.EncodeToBytes(log)
|
||||||
|
}
|
||||||
|
b.logsFeed.Send(logs)
|
||||||
|
case err := <-sub.Err():
|
||||||
|
log.Warn("Subscription error for Logs", "err", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
})
|
||||||
|
return b.logsFeed.Subscribe(ch)
|
||||||
|
} // []RLP encoded logs
|
||||||
|
func (b *Backend) SubscribePendingLogsEvent(ch chan<- [][]byte) core.Subscription {
|
||||||
|
var sub event.Subscription
|
||||||
|
b.pendingLogsOnce.Do(func() {
|
||||||
|
bch := make(chan []*types.Log, 100)
|
||||||
|
sub = b.b.SubscribePendingLogsEvent(bch)
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case item := <-bch:
|
||||||
|
logs := make([][]byte, len(item))
|
||||||
|
for i, log := range item {
|
||||||
|
logs[i], _ = rlp.EncodeToBytes(log)
|
||||||
|
}
|
||||||
|
b.pendingLogsFeed.Send(logs)
|
||||||
|
case err := <-sub.Err():
|
||||||
|
log.Warn("Subscription error for PendingLogs", "err", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
})
|
||||||
|
return b.pendingLogsFeed.Subscribe(ch)
|
||||||
|
} // RLP Encoded logs
|
||||||
|
func (b *Backend) SubscribeRemovedLogsEvent(ch chan<- []byte) core.Subscription {
|
||||||
|
var sub event.Subscription
|
||||||
|
b.removedLogsOnce.Do(func() {
|
||||||
|
bch := make(chan gcore.RemovedLogsEvent, 100)
|
||||||
|
sub = b.b.SubscribeRemovedLogsEvent(bch)
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case item := <-bch:
|
||||||
|
logs := make([][]byte, len(item.Logs))
|
||||||
|
for i, log := range item.Logs {
|
||||||
|
logs[i], _ = rlp.EncodeToBytes(log)
|
||||||
|
}
|
||||||
|
b.removedLogsFeed.Send(item)
|
||||||
|
case err := <-sub.Err():
|
||||||
|
log.Warn("Subscription error for RemovedLogs", "err", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
})
|
||||||
|
return b.removedLogsFeed.Subscribe(ch)
|
||||||
|
} // RLP encoded logs
|
||||||
|
|
||||||
|
func convertAndSet(a, b reflect.Value) (err error) {
|
||||||
|
defer func() {
|
||||||
|
if recover() != nil {
|
||||||
|
fmt.Errorf("error converting: %v", err.Error())
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
a.Set(b.Convert(a.Type()))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Backend) ChainConfig() *params.ChainConfig {
|
||||||
|
// We're using the reflect library to copy data from params.ChainConfig to
|
||||||
|
// pparams.ChainConfig, so this function shouldn't need to be touched for
|
||||||
|
// simple changes to ChainConfig (though pparams.ChainConfig may need to be
|
||||||
|
// updated). Note that this probably won't carry over consensus engine data.
|
||||||
|
if b.chainConfig != nil {
|
||||||
|
return b.chainConfig
|
||||||
|
}
|
||||||
|
b.chainConfig = ¶ms.ChainConfig{}
|
||||||
|
nval := reflect.ValueOf(b.b.ChainConfig())
|
||||||
|
ntype := nval.Elem().Type()
|
||||||
|
lval := reflect.ValueOf(b.chainConfig)
|
||||||
|
for i := 0; i < nval.Elem().NumField(); i++ {
|
||||||
|
field := ntype.Field(i)
|
||||||
|
v := nval.Elem().FieldByName(field.Name)
|
||||||
|
lv := lval.Elem().FieldByName(field.Name)
|
||||||
|
log.Info("Checking value for", "field", field.Name)
|
||||||
|
if lv.Kind() != reflect.Invalid {
|
||||||
|
// If core.ChainConfig doesn't have this field, skip it.
|
||||||
|
if v.Type() == lv.Type() && lv.CanSet() {
|
||||||
|
lv.Set(v)
|
||||||
|
} else {
|
||||||
|
convertAndSet(lv, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return b.chainConfig
|
||||||
|
}
|
49
plugins/wrappers/backendwrapper/dbwrapper.go
Normal file
49
plugins/wrappers/backendwrapper/dbwrapper.go
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
package backendwrapper
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
|
"github.com/openrelayxyz/plugeth-utils/restricted"
|
||||||
|
)
|
||||||
|
|
||||||
|
type dbWrapper struct {
|
||||||
|
db ethdb.Database
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *dbWrapper) Has(key []byte) (bool, error) { return d.db.Has(key) }
|
||||||
|
func (d *dbWrapper) Get(key []byte) ([]byte, error) { return d.db.Get(key) }
|
||||||
|
func (d *dbWrapper) Put(key []byte, value []byte) error { return d.db.Put(key, value) }
|
||||||
|
func (d *dbWrapper) Delete(key []byte) error { return d.db.Delete(key) }
|
||||||
|
func (d *dbWrapper) Stat(property string) (string, error) { return d.db.Stat(property) }
|
||||||
|
func (d *dbWrapper) Compact(start []byte, limit []byte) error { return d.db.Compact(start, limit) }
|
||||||
|
func (d *dbWrapper) HasAncient(kind string, number uint64) (bool, error) {
|
||||||
|
return d.db.HasAncient(kind, number)
|
||||||
|
}
|
||||||
|
func (d *dbWrapper) Ancient(kind string, number uint64) ([]byte, error) {
|
||||||
|
return d.db.Ancient(kind, number)
|
||||||
|
}
|
||||||
|
func (d *dbWrapper) Ancients() (uint64, error) { return d.db.Ancients() }
|
||||||
|
func (d *dbWrapper) AncientSize(kind string) (uint64, error) { return d.db.AncientSize(kind) }
|
||||||
|
func (d *dbWrapper) AppendAncient(number uint64, hash, header, body, receipt, td []byte) error {
|
||||||
|
return fmt.Errorf("AppendAncient is no longer supported in geth 1.10.9 and above. Use ModifyAncients instead.")
|
||||||
|
}
|
||||||
|
func (d *dbWrapper) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (int64, error) {
|
||||||
|
return d.db.ModifyAncients(fn)
|
||||||
|
}
|
||||||
|
func (d *dbWrapper) TruncateAncients(n uint64) error { return d.db.TruncateAncients(n) }
|
||||||
|
func (d *dbWrapper) Sync() error { return d.db.Sync() }
|
||||||
|
func (d *dbWrapper) Close() error { return d.db.Close() }
|
||||||
|
func (d *dbWrapper) NewIterator(prefix []byte, start []byte) restricted.Iterator {
|
||||||
|
return &iterWrapper{d.db.NewIterator(prefix, start)}
|
||||||
|
}
|
||||||
|
|
||||||
|
type iterWrapper struct {
|
||||||
|
iter ethdb.Iterator
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *iterWrapper) Next() bool { return it.iter.Next() }
|
||||||
|
func (it *iterWrapper) Error() error { return it.iter.Error() }
|
||||||
|
func (it *iterWrapper) Key() []byte { return it.iter.Key() }
|
||||||
|
func (it *iterWrapper) Value() []byte { return it.iter.Value() }
|
||||||
|
func (it *iterWrapper) Release() { it.iter.Release() }
|
@ -1,38 +0,0 @@
|
|||||||
package wrappers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
|
||||||
"github.com/openrelayxyz/plugeth-utils/restricted"
|
|
||||||
)
|
|
||||||
|
|
||||||
type dbWrapper struct{
|
|
||||||
db ethdb.Database
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dbWrapper) Has(key []byte) (bool, error) { return d.db.Has(key) }
|
|
||||||
func (d *dbWrapper) Get(key []byte) ([]byte, error) { return d.db.Get(key) }
|
|
||||||
func (d *dbWrapper) Put(key []byte, value []byte) error { return d.db.Put(key, value) }
|
|
||||||
func (d *dbWrapper) Delete(key []byte) error { return d.db.Delete(key) }
|
|
||||||
func (d *dbWrapper) Stat(property string) (string, error) { return d.db.Stat(property) }
|
|
||||||
func (d *dbWrapper) Compact(start []byte, limit []byte) error { return d.db.Compact(start, limit) }
|
|
||||||
func (d *dbWrapper) HasAncient(kind string, number uint64) (bool, error) { return d.db.HasAncient(kind, number) }
|
|
||||||
func (d *dbWrapper) Ancient(kind string, number uint64) ([]byte, error) { return d.db.Ancient(kind, number) }
|
|
||||||
func (d *dbWrapper) Ancients() (uint64, error) { return d.db.Ancients() }
|
|
||||||
func (d *dbWrapper) AncientSize(kind string) (uint64, error) { return d.db.AncientSize(kind) }
|
|
||||||
func (d *dbWrapper) AppendAncient(number uint64, hash, header, body, receipt, td []byte) error { return fmt.Errorf("AppendAncient is no longer supported in geth 1.10.9 and above. Use ModifyAncients instead.") }
|
|
||||||
func (d *dbWrapper) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (int64, error) { return d.db.ModifyAncients(fn) }
|
|
||||||
func (d *dbWrapper) TruncateAncients(n uint64) error { return d.db.TruncateAncients(n) }
|
|
||||||
func (d *dbWrapper) Sync() error { return d.db.Sync() }
|
|
||||||
func (d *dbWrapper) Close() error { return d.db.Close() }
|
|
||||||
func (d *dbWrapper) NewIterator(prefix []byte, start []byte) restricted.Iterator { return &iterWrapper{d.db.NewIterator(prefix, start)} }
|
|
||||||
|
|
||||||
type iterWrapper struct {
|
|
||||||
iter ethdb.Iterator
|
|
||||||
}
|
|
||||||
|
|
||||||
func (it *iterWrapper) Next() bool { return it.iter.Next() }
|
|
||||||
func (it *iterWrapper) Error() error { return it.iter.Error() }
|
|
||||||
func (it *iterWrapper) Key() []byte { return it.iter.Key() }
|
|
||||||
func (it *iterWrapper) Value() []byte { return it.iter.Value() }
|
|
||||||
func (it *iterWrapper) Release() { it.iter.Release() }
|
|
@ -1,31 +1,14 @@
|
|||||||
package wrappers
|
package wrappers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"math/big"
|
"math/big"
|
||||||
"reflect"
|
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum"
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
gcore "github.com/ethereum/go-ethereum/core"
|
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
|
||||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
|
||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
|
|
||||||
// "github.com/ethereum/go-ethereum/plugins/interfaces"
|
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
|
||||||
"github.com/openrelayxyz/plugeth-utils/core"
|
"github.com/openrelayxyz/plugeth-utils/core"
|
||||||
"github.com/openrelayxyz/plugeth-utils/restricted"
|
|
||||||
"github.com/openrelayxyz/plugeth-utils/restricted/params"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type WrappedScopeContext struct {
|
type WrappedScopeContext struct {
|
||||||
@ -223,401 +206,3 @@ func (n *Node) ResolvePath(x string) string {
|
|||||||
func (n *Node) Attach() (core.Client, error) {
|
func (n *Node) Attach() (core.Client, error) {
|
||||||
return n.n.Attach()
|
return n.n.Attach()
|
||||||
}
|
}
|
||||||
|
|
||||||
type Backend struct {
|
|
||||||
b ethapi.Backend
|
|
||||||
newTxsFeed event.Feed
|
|
||||||
newTxsOnce sync.Once
|
|
||||||
chainFeed event.Feed
|
|
||||||
chainOnce sync.Once
|
|
||||||
chainHeadFeed event.Feed
|
|
||||||
chainHeadOnce sync.Once
|
|
||||||
chainSideFeed event.Feed
|
|
||||||
chainSideOnce sync.Once
|
|
||||||
logsFeed event.Feed
|
|
||||||
logsOnce sync.Once
|
|
||||||
pendingLogsFeed event.Feed
|
|
||||||
pendingLogsOnce sync.Once
|
|
||||||
removedLogsFeed event.Feed
|
|
||||||
removedLogsOnce sync.Once
|
|
||||||
chainConfig *params.ChainConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBackend(b ethapi.Backend) *Backend {
|
|
||||||
return &Backend{b: b}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Backend) SuggestGasTipCap(ctx context.Context) (*big.Int, error) {
|
|
||||||
return b.b.SuggestGasTipCap(ctx)
|
|
||||||
}
|
|
||||||
func (b *Backend) ChainDb() restricted.Database {
|
|
||||||
return &dbWrapper{b.b.ChainDb()}
|
|
||||||
}
|
|
||||||
func (b *Backend) ExtRPCEnabled() bool {
|
|
||||||
return b.b.ExtRPCEnabled()
|
|
||||||
}
|
|
||||||
func (b *Backend) RPCGasCap() uint64 {
|
|
||||||
return b.b.RPCGasCap()
|
|
||||||
}
|
|
||||||
func (b *Backend) RPCTxFeeCap() float64 {
|
|
||||||
return b.b.RPCTxFeeCap()
|
|
||||||
}
|
|
||||||
func (b *Backend) UnprotectedAllowed() bool {
|
|
||||||
return b.b.UnprotectedAllowed()
|
|
||||||
}
|
|
||||||
func (b *Backend) SetHead(number uint64) {
|
|
||||||
b.b.SetHead(number)
|
|
||||||
}
|
|
||||||
func (b *Backend) HeaderByNumber(ctx context.Context, number int64) ([]byte, error) {
|
|
||||||
header, err := b.b.HeaderByNumber(ctx, rpc.BlockNumber(number))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return rlp.EncodeToBytes(header)
|
|
||||||
}
|
|
||||||
func (b *Backend) HeaderByHash(ctx context.Context, hash core.Hash) ([]byte, error) {
|
|
||||||
header, err := b.b.HeaderByHash(ctx, common.Hash(hash))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return rlp.EncodeToBytes(header)
|
|
||||||
}
|
|
||||||
func (b *Backend) CurrentHeader() []byte {
|
|
||||||
ret, _ := rlp.EncodeToBytes(b.b.CurrentHeader())
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
func (b *Backend) CurrentBlock() []byte {
|
|
||||||
ret, _ := rlp.EncodeToBytes(b.b.CurrentBlock())
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
func (b *Backend) BlockByNumber(ctx context.Context, number int64) ([]byte, error) {
|
|
||||||
block, err := b.b.BlockByNumber(ctx, rpc.BlockNumber(number))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return rlp.EncodeToBytes(block)
|
|
||||||
}
|
|
||||||
func (b *Backend) BlockByHash(ctx context.Context, hash core.Hash) ([]byte, error) {
|
|
||||||
block, err := b.b.BlockByHash(ctx, common.Hash(hash))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return rlp.EncodeToBytes(block)
|
|
||||||
}
|
|
||||||
func (b *Backend) GetReceipts(ctx context.Context, hash core.Hash) ([]byte, error) {
|
|
||||||
receipts, err := b.b.GetReceipts(ctx, common.Hash(hash))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return json.Marshal(receipts)
|
|
||||||
}
|
|
||||||
func (b *Backend) GetTd(ctx context.Context, hash core.Hash) *big.Int {
|
|
||||||
return b.b.GetTd(ctx, common.Hash(hash))
|
|
||||||
}
|
|
||||||
func (b *Backend) SendTx(ctx context.Context, signedTx []byte) error {
|
|
||||||
tx := new(types.Transaction)
|
|
||||||
if err := tx.UnmarshalBinary(signedTx); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return b.b.SendTx(ctx, tx)
|
|
||||||
}
|
|
||||||
func (b *Backend) GetTransaction(ctx context.Context, txHash core.Hash) ([]byte, core.Hash, uint64, uint64, error) { // RLP Encoded transaction {
|
|
||||||
tx, blockHash, blockNumber, index, err := b.b.GetTransaction(ctx, common.Hash(txHash))
|
|
||||||
if err != nil {
|
|
||||||
return nil, core.Hash(blockHash), blockNumber, index, err
|
|
||||||
}
|
|
||||||
enc, err := tx.MarshalBinary()
|
|
||||||
return enc, core.Hash(blockHash), blockNumber, index, err
|
|
||||||
}
|
|
||||||
func (b *Backend) GetPoolTransactions() ([][]byte, error) {
|
|
||||||
txs, err := b.b.GetPoolTransactions()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
results := make([][]byte, len(txs))
|
|
||||||
for i, tx := range txs {
|
|
||||||
results[i], _ = rlp.EncodeToBytes(tx)
|
|
||||||
}
|
|
||||||
return results, nil
|
|
||||||
}
|
|
||||||
func (b *Backend) GetPoolTransaction(txHash core.Hash) []byte {
|
|
||||||
tx := b.b.GetPoolTransaction(common.Hash(txHash))
|
|
||||||
if tx == nil {
|
|
||||||
return []byte{}
|
|
||||||
}
|
|
||||||
enc, _ := rlp.EncodeToBytes(tx)
|
|
||||||
return enc
|
|
||||||
}
|
|
||||||
func (b *Backend) GetPoolNonce(ctx context.Context, addr core.Address) (uint64, error) {
|
|
||||||
return b.b.GetPoolNonce(ctx, common.Address(addr))
|
|
||||||
}
|
|
||||||
func (b *Backend) Stats() (pending int, queued int) {
|
|
||||||
return b.b.Stats()
|
|
||||||
}
|
|
||||||
func (b *Backend) TxPoolContent() (map[core.Address][][]byte, map[core.Address][][]byte) {
|
|
||||||
pending, queued := b.b.TxPoolContent()
|
|
||||||
trpending, trqueued := make(map[core.Address][][]byte), make(map[core.Address][][]byte)
|
|
||||||
for k, v := range pending {
|
|
||||||
trpending[core.Address(k)] = make([][]byte, len(v))
|
|
||||||
for i, tx := range v {
|
|
||||||
trpending[core.Address(k)][i], _ = tx.MarshalBinary()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for k, v := range queued {
|
|
||||||
trqueued[core.Address(k)] = make([][]byte, len(v))
|
|
||||||
for i, tx := range v {
|
|
||||||
trpending[core.Address(k)][i], _ = tx.MarshalBinary()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return trpending, trqueued
|
|
||||||
} // RLP encoded transactions
|
|
||||||
func (b *Backend) BloomStatus() (uint64, uint64) {
|
|
||||||
return b.b.BloomStatus()
|
|
||||||
}
|
|
||||||
func (b *Backend) GetLogs(ctx context.Context, blockHash core.Hash) ([][]byte, error) {
|
|
||||||
logs, err := b.b.GetLogs(ctx, common.Hash(blockHash))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
encLogs := make([][]byte, len(logs))
|
|
||||||
for i, log := range logs {
|
|
||||||
encLogs[i], _ = rlp.EncodeToBytes(log)
|
|
||||||
}
|
|
||||||
return encLogs, nil
|
|
||||||
} // []RLP encoded logs
|
|
||||||
|
|
||||||
type dli interface {
|
|
||||||
SyncProgress() ethereum.SyncProgress
|
|
||||||
}
|
|
||||||
|
|
||||||
type dl struct {
|
|
||||||
dl dli
|
|
||||||
}
|
|
||||||
|
|
||||||
type progress struct {
|
|
||||||
p ethereum.SyncProgress
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *progress) StartingBlock() uint64 {
|
|
||||||
return p.p.StartingBlock
|
|
||||||
}
|
|
||||||
func (p *progress) CurrentBlock() uint64 {
|
|
||||||
return p.p.CurrentBlock
|
|
||||||
}
|
|
||||||
func (p *progress) HighestBlock() uint64 {
|
|
||||||
return p.p.HighestBlock
|
|
||||||
}
|
|
||||||
func (p *progress) PulledStates() uint64 {
|
|
||||||
return p.p.PulledStates
|
|
||||||
}
|
|
||||||
func (p *progress) KnownStates() uint64 {
|
|
||||||
return p.p.KnownStates
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dl) Progress() core.Progress {
|
|
||||||
return &progress{d.dl.SyncProgress()}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Backend) Downloader() core.Downloader {
|
|
||||||
return &dl{b.b}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Backend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) core.Subscription {
|
|
||||||
var sub event.Subscription
|
|
||||||
b.newTxsOnce.Do(func() {
|
|
||||||
bch := make(chan gcore.NewTxsEvent, 100)
|
|
||||||
sub = b.b.SubscribeNewTxsEvent(bch)
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case item := <-bch:
|
|
||||||
txe := core.NewTxsEvent{
|
|
||||||
Txs: make([][]byte, len(item.Txs)),
|
|
||||||
}
|
|
||||||
for i, tx := range item.Txs {
|
|
||||||
txe.Txs[i], _ = tx.MarshalBinary()
|
|
||||||
}
|
|
||||||
b.newTxsFeed.Send(txe)
|
|
||||||
case err := <-sub.Err():
|
|
||||||
log.Warn("Subscription error for NewTxs", "err", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
})
|
|
||||||
return b.newTxsFeed.Subscribe(ch)
|
|
||||||
}
|
|
||||||
func (b *Backend) SubscribeChainEvent(ch chan<- core.ChainEvent) core.Subscription {
|
|
||||||
var sub event.Subscription
|
|
||||||
b.chainOnce.Do(func() {
|
|
||||||
bch := make(chan gcore.ChainEvent, 100)
|
|
||||||
sub = b.b.SubscribeChainEvent(bch)
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case item := <-bch:
|
|
||||||
ce := core.ChainEvent{
|
|
||||||
Hash: core.Hash(item.Hash),
|
|
||||||
}
|
|
||||||
ce.Block, _ = rlp.EncodeToBytes(item.Block)
|
|
||||||
ce.Logs, _ = rlp.EncodeToBytes(item.Logs)
|
|
||||||
b.chainFeed.Send(ce)
|
|
||||||
case err := <-sub.Err():
|
|
||||||
log.Warn("Subscription error for Chain", "err", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
})
|
|
||||||
return b.chainFeed.Subscribe(ch)
|
|
||||||
}
|
|
||||||
func (b *Backend) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) core.Subscription {
|
|
||||||
var sub event.Subscription
|
|
||||||
b.chainHeadOnce.Do(func() {
|
|
||||||
bch := make(chan gcore.ChainHeadEvent, 100)
|
|
||||||
sub = b.b.SubscribeChainHeadEvent(bch)
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case item := <-bch:
|
|
||||||
che := core.ChainHeadEvent{}
|
|
||||||
che.Block, _ = rlp.EncodeToBytes(item.Block)
|
|
||||||
b.chainHeadFeed.Send(che)
|
|
||||||
case err := <-sub.Err():
|
|
||||||
log.Warn("Subscription error for ChainHead", "err", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
})
|
|
||||||
return b.chainHeadFeed.Subscribe(ch)
|
|
||||||
}
|
|
||||||
func (b *Backend) SubscribeChainSideEvent(ch chan<- core.ChainSideEvent) core.Subscription {
|
|
||||||
var sub event.Subscription
|
|
||||||
b.chainSideOnce.Do(func() {
|
|
||||||
bch := make(chan gcore.ChainSideEvent, 100)
|
|
||||||
sub = b.b.SubscribeChainSideEvent(bch)
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case item := <-bch:
|
|
||||||
cse := core.ChainSideEvent{}
|
|
||||||
cse.Block, _ = rlp.EncodeToBytes(item.Block)
|
|
||||||
b.chainSideFeed.Send(cse)
|
|
||||||
case err := <-sub.Err():
|
|
||||||
log.Warn("Subscription error for ChainSide", "err", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
})
|
|
||||||
return b.chainSideFeed.Subscribe(ch)
|
|
||||||
}
|
|
||||||
func (b *Backend) SubscribeLogsEvent(ch chan<- [][]byte) core.Subscription {
|
|
||||||
var sub event.Subscription
|
|
||||||
b.logsOnce.Do(func() {
|
|
||||||
bch := make(chan []*types.Log, 100)
|
|
||||||
sub = b.b.SubscribeLogsEvent(bch)
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case item := <-bch:
|
|
||||||
logs := make([][]byte, len(item))
|
|
||||||
for i, log := range item {
|
|
||||||
logs[i], _ = rlp.EncodeToBytes(log)
|
|
||||||
}
|
|
||||||
b.logsFeed.Send(logs)
|
|
||||||
case err := <-sub.Err():
|
|
||||||
log.Warn("Subscription error for Logs", "err", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
})
|
|
||||||
return b.logsFeed.Subscribe(ch)
|
|
||||||
} // []RLP encoded logs
|
|
||||||
func (b *Backend) SubscribePendingLogsEvent(ch chan<- [][]byte) core.Subscription {
|
|
||||||
var sub event.Subscription
|
|
||||||
b.pendingLogsOnce.Do(func() {
|
|
||||||
bch := make(chan []*types.Log, 100)
|
|
||||||
sub = b.b.SubscribePendingLogsEvent(bch)
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case item := <-bch:
|
|
||||||
logs := make([][]byte, len(item))
|
|
||||||
for i, log := range item {
|
|
||||||
logs[i], _ = rlp.EncodeToBytes(log)
|
|
||||||
}
|
|
||||||
b.pendingLogsFeed.Send(logs)
|
|
||||||
case err := <-sub.Err():
|
|
||||||
log.Warn("Subscription error for PendingLogs", "err", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
})
|
|
||||||
return b.pendingLogsFeed.Subscribe(ch)
|
|
||||||
} // RLP Encoded logs
|
|
||||||
func (b *Backend) SubscribeRemovedLogsEvent(ch chan<- []byte) core.Subscription {
|
|
||||||
var sub event.Subscription
|
|
||||||
b.removedLogsOnce.Do(func() {
|
|
||||||
bch := make(chan gcore.RemovedLogsEvent, 100)
|
|
||||||
sub = b.b.SubscribeRemovedLogsEvent(bch)
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case item := <-bch:
|
|
||||||
logs := make([][]byte, len(item.Logs))
|
|
||||||
for i, log := range item.Logs {
|
|
||||||
logs[i], _ = rlp.EncodeToBytes(log)
|
|
||||||
}
|
|
||||||
b.removedLogsFeed.Send(item)
|
|
||||||
case err := <-sub.Err():
|
|
||||||
log.Warn("Subscription error for RemovedLogs", "err", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
})
|
|
||||||
return b.removedLogsFeed.Subscribe(ch)
|
|
||||||
} // RLP encoded logs
|
|
||||||
|
|
||||||
func convertAndSet(a, b reflect.Value) (err error) {
|
|
||||||
defer func() {
|
|
||||||
if recover() != nil {
|
|
||||||
fmt.Errorf("error converting: %v", err.Error())
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
a.Set(b.Convert(a.Type()))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Backend) ChainConfig() *params.ChainConfig {
|
|
||||||
// We're using the reflect library to copy data from params.ChainConfig to
|
|
||||||
// pparams.ChainConfig, so this function shouldn't need to be touched for
|
|
||||||
// simple changes to ChainConfig (though pparams.ChainConfig may need to be
|
|
||||||
// updated). Note that this probably won't carry over consensus engine data.
|
|
||||||
if b.chainConfig != nil {
|
|
||||||
return b.chainConfig
|
|
||||||
}
|
|
||||||
b.chainConfig = ¶ms.ChainConfig{}
|
|
||||||
nval := reflect.ValueOf(b.b.ChainConfig())
|
|
||||||
ntype := nval.Elem().Type()
|
|
||||||
lval := reflect.ValueOf(b.chainConfig)
|
|
||||||
for i := 0; i < nval.Elem().NumField(); i++ {
|
|
||||||
field := ntype.Field(i)
|
|
||||||
v := nval.Elem().FieldByName(field.Name)
|
|
||||||
lv := lval.Elem().FieldByName(field.Name)
|
|
||||||
log.Info("Checking value for", "field", field.Name)
|
|
||||||
if lv.Kind() != reflect.Invalid {
|
|
||||||
// If core.ChainConfig doesn't have this field, skip it.
|
|
||||||
if v.Type() == lv.Type() && lv.CanSet() {
|
|
||||||
lv.Set(v)
|
|
||||||
} else {
|
|
||||||
convertAndSet(lv, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return b.chainConfig
|
|
||||||
}
|
|
||||||
|
Loading…
Reference in New Issue
Block a user