Merge pull request #3518 from fjl/ethclient-dependency-cleanup

core/types: dependency cleanup
This commit is contained in:
Péter Szilágyi 2017-01-06 15:42:03 +02:00 committed by GitHub
commit 8f9daaa3ba
45 changed files with 245 additions and 268 deletions

View File

@ -31,8 +31,8 @@ type Argument struct {
func (a *Argument) UnmarshalJSON(data []byte) error { func (a *Argument) UnmarshalJSON(data []byte) error {
var extarg struct { var extarg struct {
Name string Name string
Type string Type string
Indexed bool Indexed bool
} }
err := json.Unmarshal(data, &extarg) err := json.Unmarshal(data, &extarg)

View File

@ -18,10 +18,10 @@
package main package main
import ( import (
"encoding/hex"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os" "os"
"encoding/hex"
"strings" "strings"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"

View File

@ -337,10 +337,10 @@ var (
Usage: "Network listening port", Usage: "Network listening port",
Value: 30303, Value: 30303,
} }
BootnodesFlag = cli.StringFlag{ BootnodesFlag = cli.StringSliceFlag{
Name: "bootnodes", Name: "bootnodes",
Usage: "Comma separated enode URLs for P2P discovery bootstrap", Usage: "Comma separated enode URLs for P2P discovery bootstrap",
Value: "", Value: nil,
} }
NodeKeyFileFlag = cli.StringFlag{ NodeKeyFileFlag = cli.StringFlag{
Name: "nodekey", Name: "nodekey",
@ -485,17 +485,15 @@ func makeNodeUserIdent(ctx *cli.Context) string {
// MakeBootstrapNodes creates a list of bootstrap nodes from the command line // MakeBootstrapNodes creates a list of bootstrap nodes from the command line
// flags, reverting to pre-configured ones if none have been specified. // flags, reverting to pre-configured ones if none have been specified.
func MakeBootstrapNodes(ctx *cli.Context) []*discover.Node { func MakeBootstrapNodes(ctx *cli.Context) []*discover.Node {
// Return pre-configured nodes if none were manually requested urls := params.MainnetBootnodes
if !ctx.GlobalIsSet(BootnodesFlag.Name) { if ctx.GlobalIsSet(BootnodesFlag.Name) {
if ctx.GlobalBool(TestNetFlag.Name) { urls = ctx.GlobalStringSlice(BootnodesFlag.Name)
return params.TestnetBootnodes } else if ctx.GlobalBool(TestNetFlag.Name) {
} urls = params.TestnetBootnodes
return params.MainnetBootnodes
} }
// Otherwise parse and use the CLI bootstrap nodes
bootnodes := []*discover.Node{}
for _, url := range strings.Split(ctx.GlobalString(BootnodesFlag.Name), ",") { bootnodes := make([]*discover.Node, 0, len(urls))
for _, url := range urls {
node, err := discover.ParseNode(url) node, err := discover.ParseNode(url)
if err != nil { if err != nil {
glog.V(logger.Error).Infof("Bootstrap URL %s: %v\n", url, err) glog.V(logger.Error).Infof("Bootstrap URL %s: %v\n", url, err)
@ -509,14 +507,13 @@ func MakeBootstrapNodes(ctx *cli.Context) []*discover.Node {
// MakeBootstrapNodesV5 creates a list of bootstrap nodes from the command line // MakeBootstrapNodesV5 creates a list of bootstrap nodes from the command line
// flags, reverting to pre-configured ones if none have been specified. // flags, reverting to pre-configured ones if none have been specified.
func MakeBootstrapNodesV5(ctx *cli.Context) []*discv5.Node { func MakeBootstrapNodesV5(ctx *cli.Context) []*discv5.Node {
// Return pre-configured nodes if none were manually requested urls := params.DiscoveryV5Bootnodes
if !ctx.GlobalIsSet(BootnodesFlag.Name) { if ctx.GlobalIsSet(BootnodesFlag.Name) {
return params.DiscoveryV5Bootnodes urls = ctx.GlobalStringSlice(BootnodesFlag.Name)
} }
// Otherwise parse and use the CLI bootstrap nodes
bootnodes := []*discv5.Node{}
for _, url := range strings.Split(ctx.GlobalString(BootnodesFlag.Name), ",") { bootnodes := make([]*discv5.Node, 0, len(urls))
for _, url := range urls {
node, err := discv5.ParseNode(url) node, err := discv5.ParseNode(url)
if err != nil { if err != nil {
glog.V(logger.Error).Infof("Bootstrap URL %s: %v\n", url, err) glog.V(logger.Error).Infof("Bootstrap URL %s: %v\n", url, err)

View File

@ -24,7 +24,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
@ -77,7 +76,7 @@ func TestPutReceipt(t *testing.T) {
hash[0] = 2 hash[0] = 2
receipt := new(types.Receipt) receipt := new(types.Receipt)
receipt.Logs = vm.Logs{&vm.Log{ receipt.Logs = []*types.Log{{
Address: addr, Address: addr,
Topics: []common.Hash{hash}, Topics: []common.Hash{hash},
Data: []byte("hi"), Data: []byte("hi"),

View File

@ -883,7 +883,7 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
var ( var (
stats = insertStats{startTime: time.Now()} stats = insertStats{startTime: time.Now()}
events = make([]interface{}, 0, len(chain)) events = make([]interface{}, 0, len(chain))
coalescedLogs vm.Logs coalescedLogs []*types.Log
nonceChecked = make([]bool, len(chain)) nonceChecked = make([]bool, len(chain))
) )
@ -1094,7 +1094,7 @@ func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
oldStart = oldBlock oldStart = oldBlock
newStart = newBlock newStart = newBlock
deletedTxs types.Transactions deletedTxs types.Transactions
deletedLogs vm.Logs deletedLogs []*types.Log
// collectLogs collects the logs that were generated during the // collectLogs collects the logs that were generated during the
// processing of the block that corresponds with the given hash. // processing of the block that corresponds with the given hash.
// These logs are later announced as deleted. // These logs are later announced as deleted.
@ -1210,7 +1210,7 @@ func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
// postChainEvents iterates over the events generated by a chain insertion and // postChainEvents iterates over the events generated by a chain insertion and
// posts them into the event mux. // posts them into the event mux.
func (self *BlockChain) postChainEvents(events []interface{}, logs vm.Logs) { func (self *BlockChain) postChainEvents(events []interface{}, logs []*types.Log) {
// post event logs for further processing // post event logs for further processing
self.eventMux.Post(logs) self.eventMux.Post(logs)
for _, event := range events { for _, event := range events {

View File

@ -435,7 +435,7 @@ func (bproc) ValidateHeader(*types.Header, *types.Header, bool) error { return n
func (bproc) ValidateState(block, parent *types.Block, state *state.StateDB, receipts types.Receipts, usedGas *big.Int) error { func (bproc) ValidateState(block, parent *types.Block, state *state.StateDB, receipts types.Receipts, usedGas *big.Int) error {
return nil return nil
} }
func (bproc) Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (types.Receipts, vm.Logs, *big.Int, error) { func (bproc) Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (types.Receipts, []*types.Log, *big.Int, error) {
return nil, nil, new(big.Int), nil return nil, nil, new(big.Int), nil
} }

View File

@ -26,7 +26,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/sha3" "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
@ -393,9 +392,9 @@ func TestReceiptStorage(t *testing.T) {
receipt1 := &types.Receipt{ receipt1 := &types.Receipt{
PostState: []byte{0x01}, PostState: []byte{0x01},
CumulativeGasUsed: big.NewInt(1), CumulativeGasUsed: big.NewInt(1),
Logs: vm.Logs{ Logs: []*types.Log{
&vm.Log{Address: common.BytesToAddress([]byte{0x11})}, {Address: common.BytesToAddress([]byte{0x11})},
&vm.Log{Address: common.BytesToAddress([]byte{0x01, 0x11})}, {Address: common.BytesToAddress([]byte{0x01, 0x11})},
}, },
TxHash: common.BytesToHash([]byte{0x11, 0x11}), TxHash: common.BytesToHash([]byte{0x11, 0x11}),
ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}), ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}),
@ -404,9 +403,9 @@ func TestReceiptStorage(t *testing.T) {
receipt2 := &types.Receipt{ receipt2 := &types.Receipt{
PostState: []byte{0x02}, PostState: []byte{0x02},
CumulativeGasUsed: big.NewInt(2), CumulativeGasUsed: big.NewInt(2),
Logs: vm.Logs{ Logs: []*types.Log{
&vm.Log{Address: common.BytesToAddress([]byte{0x22})}, {Address: common.BytesToAddress([]byte{0x22})},
&vm.Log{Address: common.BytesToAddress([]byte{0x02, 0x22})}, {Address: common.BytesToAddress([]byte{0x02, 0x22})},
}, },
TxHash: common.BytesToHash([]byte{0x22, 0x22}), TxHash: common.BytesToHash([]byte{0x22, 0x22}),
ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}), ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}),
@ -452,9 +451,9 @@ func TestBlockReceiptStorage(t *testing.T) {
receipt1 := &types.Receipt{ receipt1 := &types.Receipt{
PostState: []byte{0x01}, PostState: []byte{0x01},
CumulativeGasUsed: big.NewInt(1), CumulativeGasUsed: big.NewInt(1),
Logs: vm.Logs{ Logs: []*types.Log{
&vm.Log{Address: common.BytesToAddress([]byte{0x11})}, {Address: common.BytesToAddress([]byte{0x11})},
&vm.Log{Address: common.BytesToAddress([]byte{0x01, 0x11})}, {Address: common.BytesToAddress([]byte{0x01, 0x11})},
}, },
TxHash: common.BytesToHash([]byte{0x11, 0x11}), TxHash: common.BytesToHash([]byte{0x11, 0x11}),
ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}), ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}),
@ -463,9 +462,9 @@ func TestBlockReceiptStorage(t *testing.T) {
receipt2 := &types.Receipt{ receipt2 := &types.Receipt{
PostState: []byte{0x02}, PostState: []byte{0x02},
CumulativeGasUsed: big.NewInt(2), CumulativeGasUsed: big.NewInt(2),
Logs: vm.Logs{ Logs: []*types.Log{
&vm.Log{Address: common.BytesToAddress([]byte{0x22})}, {Address: common.BytesToAddress([]byte{0x22})},
&vm.Log{Address: common.BytesToAddress([]byte{0x02, 0x22})}, {Address: common.BytesToAddress([]byte{0x02, 0x22})},
}, },
TxHash: common.BytesToHash([]byte{0x22, 0x22}), TxHash: common.BytesToHash([]byte{0x22, 0x22}),
ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}), ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}),
@ -505,14 +504,14 @@ func TestMipmapBloom(t *testing.T) {
db, _ := ethdb.NewMemDatabase() db, _ := ethdb.NewMemDatabase()
receipt1 := new(types.Receipt) receipt1 := new(types.Receipt)
receipt1.Logs = vm.Logs{ receipt1.Logs = []*types.Log{
&vm.Log{Address: common.BytesToAddress([]byte("test"))}, {Address: common.BytesToAddress([]byte("test"))},
&vm.Log{Address: common.BytesToAddress([]byte("address"))}, {Address: common.BytesToAddress([]byte("address"))},
} }
receipt2 := new(types.Receipt) receipt2 := new(types.Receipt)
receipt2.Logs = vm.Logs{ receipt2.Logs = []*types.Log{
&vm.Log{Address: common.BytesToAddress([]byte("test"))}, {Address: common.BytesToAddress([]byte("test"))},
&vm.Log{Address: common.BytesToAddress([]byte("address1"))}, {Address: common.BytesToAddress([]byte("address1"))},
} }
WriteMipmapBloom(db, 1, types.Receipts{receipt1}) WriteMipmapBloom(db, 1, types.Receipts{receipt1})
@ -528,14 +527,14 @@ func TestMipmapBloom(t *testing.T) {
// reset // reset
db, _ = ethdb.NewMemDatabase() db, _ = ethdb.NewMemDatabase()
receipt := new(types.Receipt) receipt := new(types.Receipt)
receipt.Logs = vm.Logs{ receipt.Logs = []*types.Log{
&vm.Log{Address: common.BytesToAddress([]byte("test"))}, {Address: common.BytesToAddress([]byte("test"))},
} }
WriteMipmapBloom(db, 999, types.Receipts{receipt1}) WriteMipmapBloom(db, 999, types.Receipts{receipt1})
receipt = new(types.Receipt) receipt = new(types.Receipt)
receipt.Logs = vm.Logs{ receipt.Logs = []*types.Log{
&vm.Log{Address: common.BytesToAddress([]byte("test 1"))}, {Address: common.BytesToAddress([]byte("test 1"))},
} }
WriteMipmapBloom(db, 1000, types.Receipts{receipt}) WriteMipmapBloom(db, 1000, types.Receipts{receipt})
@ -568,17 +567,12 @@ func TestMipmapChain(t *testing.T) {
switch i { switch i {
case 1: case 1:
receipt := types.NewReceipt(nil, new(big.Int)) receipt := types.NewReceipt(nil, new(big.Int))
receipt.Logs = vm.Logs{ receipt.Logs = []*types.Log{{Address: addr, Topics: []common.Hash{hash1}}}
&vm.Log{
Address: addr,
Topics: []common.Hash{hash1},
},
}
gen.AddUncheckedReceipt(receipt) gen.AddUncheckedReceipt(receipt)
receipts = types.Receipts{receipt} receipts = types.Receipts{receipt}
case 1000: case 1000:
receipt := types.NewReceipt(nil, new(big.Int)) receipt := types.NewReceipt(nil, new(big.Int))
receipt.Logs = vm.Logs{&vm.Log{Address: addr2}} receipt.Logs = []*types.Log{{Address: addr2}}
gen.AddUncheckedReceipt(receipt) gen.AddUncheckedReceipt(receipt)
receipts = types.Receipts{receipt} receipts = types.Receipts{receipt}

View File

@ -21,7 +21,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
) )
// TxPreEvent is posted when a transaction enters the transaction pool. // TxPreEvent is posted when a transaction enters the transaction pool.
@ -32,7 +31,7 @@ type TxPostEvent struct{ Tx *types.Transaction }
// PendingLogsEvent is posted pre mining and notifies of pending logs. // PendingLogsEvent is posted pre mining and notifies of pending logs.
type PendingLogsEvent struct { type PendingLogsEvent struct {
Logs vm.Logs Logs []*types.Log
} }
// PendingStateEvent is posted pre mining and notifies of pending state changes. // PendingStateEvent is posted pre mining and notifies of pending state changes.
@ -45,18 +44,18 @@ type NewMinedBlockEvent struct{ Block *types.Block }
type RemovedTransactionEvent struct{ Txs types.Transactions } type RemovedTransactionEvent struct{ Txs types.Transactions }
// RemovedLogEvent is posted when a reorg happens // RemovedLogEvent is posted when a reorg happens
type RemovedLogsEvent struct{ Logs vm.Logs } type RemovedLogsEvent struct{ Logs []*types.Log }
// ChainSplit is posted when a new head is detected // ChainSplit is posted when a new head is detected
type ChainSplitEvent struct { type ChainSplitEvent struct {
Block *types.Block Block *types.Block
Logs vm.Logs Logs []*types.Log
} }
type ChainEvent struct { type ChainEvent struct {
Block *types.Block Block *types.Block
Hash common.Hash Hash common.Hash
Logs vm.Logs Logs []*types.Log
} }
type ChainSideEvent struct { type ChainSideEvent struct {
@ -65,7 +64,7 @@ type ChainSideEvent struct {
type PendingBlockEvent struct { type PendingBlockEvent struct {
Block *types.Block Block *types.Block
Logs vm.Logs Logs []*types.Log
} }
type ChainUncleEvent struct { type ChainUncleEvent struct {

View File

@ -24,6 +24,7 @@ import (
"sync" "sync"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
@ -71,7 +72,7 @@ type StateDB struct {
thash, bhash common.Hash thash, bhash common.Hash
txIndex int txIndex int
logs map[common.Hash]vm.Logs logs map[common.Hash][]*types.Log
logSize uint logSize uint
// Journal of state modifications. This is the backbone of // Journal of state modifications. This is the backbone of
@ -97,7 +98,7 @@ func New(root common.Hash, db ethdb.Database) (*StateDB, error) {
stateObjects: make(map[common.Address]*StateObject), stateObjects: make(map[common.Address]*StateObject),
stateObjectsDirty: make(map[common.Address]struct{}), stateObjectsDirty: make(map[common.Address]struct{}),
refund: new(big.Int), refund: new(big.Int),
logs: make(map[common.Hash]vm.Logs), logs: make(map[common.Hash][]*types.Log),
}, nil }, nil
} }
@ -118,7 +119,7 @@ func (self *StateDB) New(root common.Hash) (*StateDB, error) {
stateObjects: make(map[common.Address]*StateObject), stateObjects: make(map[common.Address]*StateObject),
stateObjectsDirty: make(map[common.Address]struct{}), stateObjectsDirty: make(map[common.Address]struct{}),
refund: new(big.Int), refund: new(big.Int),
logs: make(map[common.Hash]vm.Logs), logs: make(map[common.Hash][]*types.Log),
}, nil }, nil
} }
@ -138,7 +139,7 @@ func (self *StateDB) Reset(root common.Hash) error {
self.thash = common.Hash{} self.thash = common.Hash{}
self.bhash = common.Hash{} self.bhash = common.Hash{}
self.txIndex = 0 self.txIndex = 0
self.logs = make(map[common.Hash]vm.Logs) self.logs = make(map[common.Hash][]*types.Log)
self.logSize = 0 self.logSize = 0
self.clearJournalAndRefund() self.clearJournalAndRefund()
@ -175,7 +176,7 @@ func (self *StateDB) StartRecord(thash, bhash common.Hash, ti int) {
self.txIndex = ti self.txIndex = ti
} }
func (self *StateDB) AddLog(log *vm.Log) { func (self *StateDB) AddLog(log *types.Log) {
self.journal = append(self.journal, addLogChange{txhash: self.thash}) self.journal = append(self.journal, addLogChange{txhash: self.thash})
log.TxHash = self.thash log.TxHash = self.thash
@ -186,12 +187,12 @@ func (self *StateDB) AddLog(log *vm.Log) {
self.logSize++ self.logSize++
} }
func (self *StateDB) GetLogs(hash common.Hash) vm.Logs { func (self *StateDB) GetLogs(hash common.Hash) []*types.Log {
return self.logs[hash] return self.logs[hash]
} }
func (self *StateDB) Logs() vm.Logs { func (self *StateDB) Logs() []*types.Log {
var logs vm.Logs var logs []*types.Log
for _, lgs := range self.logs { for _, lgs := range self.logs {
logs = append(logs, lgs...) logs = append(logs, lgs...)
} }
@ -474,7 +475,7 @@ func (self *StateDB) Copy() *StateDB {
stateObjects: make(map[common.Address]*StateObject, len(self.stateObjectsDirty)), stateObjects: make(map[common.Address]*StateObject, len(self.stateObjectsDirty)),
stateObjectsDirty: make(map[common.Address]struct{}, len(self.stateObjectsDirty)), stateObjectsDirty: make(map[common.Address]struct{}, len(self.stateObjectsDirty)),
refund: new(big.Int).Set(self.refund), refund: new(big.Int).Set(self.refund),
logs: make(map[common.Hash]vm.Logs, len(self.logs)), logs: make(map[common.Hash][]*types.Log, len(self.logs)),
logSize: self.logSize, logSize: self.logSize,
} }
// Copy the dirty states and logs // Copy the dirty states and logs
@ -483,7 +484,7 @@ func (self *StateDB) Copy() *StateDB {
state.stateObjectsDirty[addr] = struct{}{} state.stateObjectsDirty[addr] = struct{}{}
} }
for hash, logs := range self.logs { for hash, logs := range self.logs {
state.logs[hash] = make(vm.Logs, len(logs)) state.logs[hash] = make([]*types.Log, len(logs))
copy(state.logs[hash], logs) copy(state.logs[hash], logs)
} }
return state return state

View File

@ -29,7 +29,7 @@ import (
"testing/quick" "testing/quick"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
) )
@ -221,7 +221,7 @@ func newTestAction(addr common.Address, r *rand.Rand) testAction {
fn: func(a testAction, s *StateDB) { fn: func(a testAction, s *StateDB) {
data := make([]byte, 2) data := make([]byte, 2)
binary.BigEndian.PutUint16(data, uint16(a.args[0])) binary.BigEndian.PutUint16(data, uint16(a.args[0]))
s.AddLog(&vm.Log{Address: addr, Data: data}) s.AddLog(&types.Log{Address: addr, Data: data})
}, },
args: make([]int64, 1), args: make([]int64, 1),
}, },

View File

@ -21,7 +21,6 @@ import (
"math/big" "math/big"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
) )
@ -32,7 +31,7 @@ import (
type StateSync trie.TrieSync type StateSync trie.TrieSync
// NewStateSync create a new state trie download scheduler. // NewStateSync create a new state trie download scheduler.
func NewStateSync(root common.Hash, database ethdb.Database) *StateSync { func NewStateSync(root common.Hash, database trie.DatabaseReader) *StateSync {
var syncer *trie.TrieSync var syncer *trie.TrieSync
callback := func(leaf []byte, parent common.Hash) error { callback := func(leaf []byte, parent common.Hash) error {
@ -62,8 +61,8 @@ func (s *StateSync) Missing(max int) []common.Hash {
// Process injects a batch of retrieved trie nodes data, returning if something // Process injects a batch of retrieved trie nodes data, returning if something
// was committed to the database and also the index of an entry if processing of // was committed to the database and also the index of an entry if processing of
// it failed. // it failed.
func (s *StateSync) Process(list []trie.SyncResult) (bool, int, error) { func (s *StateSync) Process(list []trie.SyncResult, dbw trie.DatabaseWriter) (bool, int, error) {
return (*trie.TrieSync)(s).Process(list) return (*trie.TrieSync)(s).Process(list, dbw)
} }
// Pending returns the number of state entries currently pending for download. // Pending returns the number of state entries currently pending for download.

View File

@ -138,7 +138,7 @@ func testIterativeStateSync(t *testing.T, batch int) {
} }
results[i] = trie.SyncResult{Hash: hash, Data: data} results[i] = trie.SyncResult{Hash: hash, Data: data}
} }
if _, index, err := sched.Process(results); err != nil { if _, index, err := sched.Process(results, dstDb); err != nil {
t.Fatalf("failed to process result #%d: %v", index, err) t.Fatalf("failed to process result #%d: %v", index, err)
} }
queue = append(queue[:0], sched.Missing(batch)...) queue = append(queue[:0], sched.Missing(batch)...)
@ -168,7 +168,7 @@ func TestIterativeDelayedStateSync(t *testing.T) {
} }
results[i] = trie.SyncResult{Hash: hash, Data: data} results[i] = trie.SyncResult{Hash: hash, Data: data}
} }
if _, index, err := sched.Process(results); err != nil { if _, index, err := sched.Process(results, dstDb); err != nil {
t.Fatalf("failed to process result #%d: %v", index, err) t.Fatalf("failed to process result #%d: %v", index, err)
} }
queue = append(queue[len(results):], sched.Missing(0)...) queue = append(queue[len(results):], sched.Missing(0)...)
@ -206,7 +206,7 @@ func testIterativeRandomStateSync(t *testing.T, batch int) {
results = append(results, trie.SyncResult{Hash: hash, Data: data}) results = append(results, trie.SyncResult{Hash: hash, Data: data})
} }
// Feed the retrieved results back and queue new tasks // Feed the retrieved results back and queue new tasks
if _, index, err := sched.Process(results); err != nil { if _, index, err := sched.Process(results, dstDb); err != nil {
t.Fatalf("failed to process result #%d: %v", index, err) t.Fatalf("failed to process result #%d: %v", index, err)
} }
queue = make(map[common.Hash]struct{}) queue = make(map[common.Hash]struct{})
@ -249,7 +249,7 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) {
} }
} }
// Feed the retrieved results back and queue new tasks // Feed the retrieved results back and queue new tasks
if _, index, err := sched.Process(results); err != nil { if _, index, err := sched.Process(results, dstDb); err != nil {
t.Fatalf("failed to process result #%d: %v", index, err) t.Fatalf("failed to process result #%d: %v", index, err)
} }
for _, hash := range sched.Missing(0) { for _, hash := range sched.Missing(0) {
@ -283,7 +283,7 @@ func TestIncompleteStateSync(t *testing.T) {
results[i] = trie.SyncResult{Hash: hash, Data: data} results[i] = trie.SyncResult{Hash: hash, Data: data}
} }
// Process each of the state nodes // Process each of the state nodes
if _, index, err := sched.Process(results); err != nil { if _, index, err := sched.Process(results, dstDb); err != nil {
t.Fatalf("failed to process result #%d: %v", index, err) t.Fatalf("failed to process result #%d: %v", index, err)
} }
for _, result := range results { for _, result := range results {

View File

@ -57,13 +57,13 @@ func NewStateProcessor(config *params.ChainConfig, bc *BlockChain) *StateProcess
// Process returns the receipts and logs accumulated during the process and // Process returns the receipts and logs accumulated during the process and
// returns the amount of gas that was used in the process. If any of the // returns the amount of gas that was used in the process. If any of the
// transactions failed to execute due to insufficient gas it will return an error. // transactions failed to execute due to insufficient gas it will return an error.
func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (types.Receipts, vm.Logs, *big.Int, error) { func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (types.Receipts, []*types.Log, *big.Int, error) {
var ( var (
receipts types.Receipts receipts types.Receipts
totalUsedGas = big.NewInt(0) totalUsedGas = big.NewInt(0)
err error err error
header = block.Header() header = block.Header()
allLogs vm.Logs allLogs []*types.Log
gp = new(GasPool).AddGas(block.GasLimit()) gp = new(GasPool).AddGas(block.GasLimit())
) )
// Mutate the the block and state according to any hard-fork specs // Mutate the the block and state according to any hard-fork specs

View File

@ -58,5 +58,5 @@ type HeaderValidator interface {
// of gas used in the process and return an error if any of the internal rules // of gas used in the process and return an error if any of the internal rules
// failed. // failed.
type Processor interface { type Processor interface {
Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (types.Receipts, vm.Logs, *big.Int, error) Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (types.Receipts, []*types.Log, *big.Int, error)
} }

View File

@ -22,7 +22,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
) )
@ -95,7 +94,7 @@ func CreateBloom(receipts Receipts) Bloom {
return BytesToBloom(bin.Bytes()) return BytesToBloom(bin.Bytes())
} }
func LogsBloom(logs vm.Logs) *big.Int { func LogsBloom(logs []*Log) *big.Int {
bin := new(big.Int) bin := new(big.Int)
for _, log := range logs { for _, log := range logs {
data := make([]common.Hash, len(log.Topics)) data := make([]common.Hash, len(log.Topics))

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package vm package types
import ( import (
"encoding/json" "encoding/json"
@ -79,10 +79,6 @@ type jsonLog struct {
Removed bool `json:"removed"` Removed bool `json:"removed"`
} }
func NewLog(address common.Address, topics []common.Hash, data []byte, number uint64) *Log {
return &Log{Address: address, Topics: topics, Data: data, BlockNumber: number}
}
// EncodeRLP implements rlp.Encoder. // EncodeRLP implements rlp.Encoder.
func (l *Log) EncodeRLP(w io.Writer) error { func (l *Log) EncodeRLP(w io.Writer) error {
return rlp.Encode(w, rlpLog{Address: l.Address, Topics: l.Topics, Data: l.Data}) return rlp.Encode(w, rlpLog{Address: l.Address, Topics: l.Topics, Data: l.Data})
@ -150,8 +146,6 @@ func (l *Log) UnmarshalJSON(input []byte) error {
return nil return nil
} }
type Logs []*Log
// LogForStorage is a wrapper around a Log that flattens and parses the entire content of // LogForStorage is a wrapper around a Log that flattens and parses the entire content of
// a log including non-consensus fields. // a log including non-consensus fields.
type LogForStorage Log type LogForStorage Log

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package vm package types
import ( import (
"encoding/json" "encoding/json"

View File

@ -25,7 +25,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
) )
@ -40,7 +39,7 @@ type Receipt struct {
PostState []byte PostState []byte
CumulativeGasUsed *big.Int CumulativeGasUsed *big.Int
Bloom Bloom Bloom Bloom
Logs vm.Logs Logs []*Log
// Implementation fields (don't reorder!) // Implementation fields (don't reorder!)
TxHash common.Hash TxHash common.Hash
@ -52,7 +51,7 @@ type jsonReceipt struct {
PostState *common.Hash `json:"root"` PostState *common.Hash `json:"root"`
CumulativeGasUsed *hexutil.Big `json:"cumulativeGasUsed"` CumulativeGasUsed *hexutil.Big `json:"cumulativeGasUsed"`
Bloom *Bloom `json:"logsBloom"` Bloom *Bloom `json:"logsBloom"`
Logs *vm.Logs `json:"logs"` Logs []*Log `json:"logs"`
TxHash *common.Hash `json:"transactionHash"` TxHash *common.Hash `json:"transactionHash"`
ContractAddress *common.Address `json:"contractAddress"` ContractAddress *common.Address `json:"contractAddress"`
GasUsed *hexutil.Big `json:"gasUsed"` GasUsed *hexutil.Big `json:"gasUsed"`
@ -76,7 +75,7 @@ func (r *Receipt) DecodeRLP(s *rlp.Stream) error {
PostState []byte PostState []byte
CumulativeGasUsed *big.Int CumulativeGasUsed *big.Int
Bloom Bloom Bloom Bloom
Logs vm.Logs Logs []*Log
} }
if err := s.Decode(&receipt); err != nil { if err := s.Decode(&receipt); err != nil {
return err return err
@ -93,7 +92,7 @@ func (r *Receipt) MarshalJSON() ([]byte, error) {
PostState: &root, PostState: &root,
CumulativeGasUsed: (*hexutil.Big)(r.CumulativeGasUsed), CumulativeGasUsed: (*hexutil.Big)(r.CumulativeGasUsed),
Bloom: &r.Bloom, Bloom: &r.Bloom,
Logs: &r.Logs, Logs: r.Logs,
TxHash: &r.TxHash, TxHash: &r.TxHash,
ContractAddress: &r.ContractAddress, ContractAddress: &r.ContractAddress,
GasUsed: (*hexutil.Big)(r.GasUsed), GasUsed: (*hexutil.Big)(r.GasUsed),
@ -120,7 +119,7 @@ func (r *Receipt) UnmarshalJSON(input []byte) error {
PostState: (*dec.PostState)[:], PostState: (*dec.PostState)[:],
CumulativeGasUsed: (*big.Int)(dec.CumulativeGasUsed), CumulativeGasUsed: (*big.Int)(dec.CumulativeGasUsed),
Bloom: *dec.Bloom, Bloom: *dec.Bloom,
Logs: *dec.Logs, Logs: dec.Logs,
TxHash: *dec.TxHash, TxHash: *dec.TxHash,
GasUsed: (*big.Int)(dec.GasUsed), GasUsed: (*big.Int)(dec.GasUsed),
} }
@ -142,9 +141,9 @@ type ReceiptForStorage Receipt
// EncodeRLP implements rlp.Encoder, and flattens all content fields of a receipt // EncodeRLP implements rlp.Encoder, and flattens all content fields of a receipt
// into an RLP stream. // into an RLP stream.
func (r *ReceiptForStorage) EncodeRLP(w io.Writer) error { func (r *ReceiptForStorage) EncodeRLP(w io.Writer) error {
logs := make([]*vm.LogForStorage, len(r.Logs)) logs := make([]*LogForStorage, len(r.Logs))
for i, log := range r.Logs { for i, log := range r.Logs {
logs[i] = (*vm.LogForStorage)(log) logs[i] = (*LogForStorage)(log)
} }
return rlp.Encode(w, []interface{}{r.PostState, r.CumulativeGasUsed, r.Bloom, r.TxHash, r.ContractAddress, logs, r.GasUsed}) return rlp.Encode(w, []interface{}{r.PostState, r.CumulativeGasUsed, r.Bloom, r.TxHash, r.ContractAddress, logs, r.GasUsed})
} }
@ -158,7 +157,7 @@ func (r *ReceiptForStorage) DecodeRLP(s *rlp.Stream) error {
Bloom Bloom Bloom Bloom
TxHash common.Hash TxHash common.Hash
ContractAddress common.Address ContractAddress common.Address
Logs []*vm.LogForStorage Logs []*LogForStorage
GasUsed *big.Int GasUsed *big.Int
} }
if err := s.Decode(&receipt); err != nil { if err := s.Decode(&receipt); err != nil {
@ -166,9 +165,9 @@ func (r *ReceiptForStorage) DecodeRLP(s *rlp.Stream) error {
} }
// Assign the consensus fields // Assign the consensus fields
r.PostState, r.CumulativeGasUsed, r.Bloom = receipt.PostState, receipt.CumulativeGasUsed, receipt.Bloom r.PostState, r.CumulativeGasUsed, r.Bloom = receipt.PostState, receipt.CumulativeGasUsed, receipt.Bloom
r.Logs = make(vm.Logs, len(receipt.Logs)) r.Logs = make([]*Log, len(receipt.Logs))
for i, log := range receipt.Logs { for i, log := range receipt.Logs {
r.Logs[i] = (*vm.Log)(log) r.Logs[i] = (*Log)(log)
} }
// Assign the implementation fields // Assign the implementation fields
r.TxHash, r.ContractAddress, r.GasUsed = receipt.TxHash, receipt.ContractAddress, receipt.GasUsed r.TxHash, r.ContractAddress, r.GasUsed = receipt.TxHash, receipt.ContractAddress, receipt.GasUsed

View File

@ -22,6 +22,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
) )
@ -605,8 +606,14 @@ func makeLog(size int) executionFunc {
} }
d := memory.Get(mStart.Int64(), mSize.Int64()) d := memory.Get(mStart.Int64(), mSize.Int64())
log := NewLog(contract.Address(), topics, d, env.BlockNumber.Uint64()) env.StateDB.AddLog(&types.Log{
env.StateDB.AddLog(log) Address: contract.Address(),
Topics: topics,
Data: d,
// This is a non-consensus field, but assigned here because
// core/state doesn't know the current block number.
BlockNumber: env.BlockNumber.Uint64(),
})
return nil, nil return nil, nil
} }
} }

View File

@ -20,6 +20,7 @@ import (
"math/big" "math/big"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
) )
// StateDB is an EVM database for full state querying. // StateDB is an EVM database for full state querying.
@ -58,7 +59,7 @@ type StateDB interface {
RevertToSnapshot(int) RevertToSnapshot(int)
Snapshot() int Snapshot() int
AddLog(*Log) AddLog(*types.Log)
} }
// Account represents a contract or basic ethereum account. // Account represents a contract or basic ethereum account.

View File

@ -20,6 +20,7 @@ import (
"math/big" "math/big"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
) )
func NoopCanTransfer(db StateDB, from common.Address, balance *big.Int) bool { func NoopCanTransfer(db StateDB, from common.Address, balance *big.Int) bool {
@ -65,4 +66,4 @@ func (NoopStateDB) Exist(common.Address) bool { return f
func (NoopStateDB) Empty(common.Address) bool { return false } func (NoopStateDB) Empty(common.Address) bool { return false }
func (NoopStateDB) RevertToSnapshot(int) {} func (NoopStateDB) RevertToSnapshot(int) {}
func (NoopStateDB) Snapshot() int { return 0 } func (NoopStateDB) Snapshot() int { return 0 }
func (NoopStateDB) AddLog(*Log) {} func (NoopStateDB) AddLog(*types.Log) {}

View File

@ -31,7 +31,7 @@ import (
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
rpc "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"golang.org/x/net/context" "golang.org/x/net/context"
) )

View File

@ -23,7 +23,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
) )
@ -38,12 +37,12 @@ func TestMipmapUpgrade(t *testing.T) {
switch i { switch i {
case 1: case 1:
receipt := types.NewReceipt(nil, new(big.Int)) receipt := types.NewReceipt(nil, new(big.Int))
receipt.Logs = vm.Logs{&vm.Log{Address: addr}} receipt.Logs = []*types.Log{&types.Log{Address: addr}}
gen.AddUncheckedReceipt(receipt) gen.AddUncheckedReceipt(receipt)
receipts = types.Receipts{receipt} receipts = types.Receipts{receipt}
case 2: case 2:
receipt := types.NewReceipt(nil, new(big.Int)) receipt := types.NewReceipt(nil, new(big.Int))
receipt.Logs = vm.Logs{&vm.Log{Address: addr}} receipt.Logs = []*types.Log{&types.Log{Address: addr}}
gen.AddUncheckedReceipt(receipt) gen.AddUncheckedReceipt(receipt)
receipts = types.Receipts{receipt} receipts = types.Receipts{receipt}
} }

View File

@ -1123,15 +1123,20 @@ func (q *queue) deliverNodeData(results []trie.SyncResult, callback func(int, bo
callback(i, progressed, errNoFetchesPending) callback(i, progressed, errNoFetchesPending)
return return
} }
if prog, _, err := q.stateScheduler.Process([]trie.SyncResult{result}); err != nil {
// Processing a state result failed, bail out batch := q.stateDatabase.NewBatch()
prog, _, err := q.stateScheduler.Process([]trie.SyncResult{result}, batch)
if err != nil {
q.stateSchedLock.Unlock() q.stateSchedLock.Unlock()
callback(i, progressed, err) callback(i, progressed, err)
return
} else if prog {
progressed = true
} }
if err = batch.Write(); err != nil {
q.stateSchedLock.Unlock()
callback(i, progressed, err)
}
// Item processing succeeded, release the lock (temporarily) // Item processing succeeded, release the lock (temporarily)
progressed = progressed || prog
q.stateSchedLock.Unlock() q.stateSchedLock.Unlock()
} }
callback(len(results), progressed, nil) callback(len(results), progressed, nil)

View File

@ -29,7 +29,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
@ -46,7 +45,7 @@ type filter struct {
deadline *time.Timer // filter is inactiv when deadline triggers deadline *time.Timer // filter is inactiv when deadline triggers
hashes []common.Hash hashes []common.Hash
crit FilterCriteria crit FilterCriteria
logs []*vm.Log logs []*types.Log
s *Subscription // associated subscription in event system s *Subscription // associated subscription in event system
} }
@ -242,7 +241,7 @@ func (api *PublicFilterAPI) Logs(ctx context.Context, crit FilterCriteria) (*rpc
var ( var (
rpcSub = notifier.CreateSubscription() rpcSub = notifier.CreateSubscription()
matchedLogs = make(chan []*vm.Log) matchedLogs = make(chan []*types.Log)
) )
logsSub, err := api.events.SubscribeLogs(crit, matchedLogs) logsSub, err := api.events.SubscribeLogs(crit, matchedLogs)
@ -293,14 +292,14 @@ type FilterCriteria struct {
// //
// https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_newfilter // https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_newfilter
func (api *PublicFilterAPI) NewFilter(crit FilterCriteria) (rpc.ID, error) { func (api *PublicFilterAPI) NewFilter(crit FilterCriteria) (rpc.ID, error) {
logs := make(chan []*vm.Log) logs := make(chan []*types.Log)
logsSub, err := api.events.SubscribeLogs(crit, logs) logsSub, err := api.events.SubscribeLogs(crit, logs)
if err != nil { if err != nil {
return rpc.ID(""), err return rpc.ID(""), err
} }
api.filtersMu.Lock() api.filtersMu.Lock()
api.filters[logsSub.ID] = &filter{typ: LogsSubscription, crit: crit, deadline: time.NewTimer(deadline), logs: make([]*vm.Log, 0), s: logsSub} api.filters[logsSub.ID] = &filter{typ: LogsSubscription, crit: crit, deadline: time.NewTimer(deadline), logs: make([]*types.Log, 0), s: logsSub}
api.filtersMu.Unlock() api.filtersMu.Unlock()
go func() { go func() {
@ -327,7 +326,7 @@ func (api *PublicFilterAPI) NewFilter(crit FilterCriteria) (rpc.ID, error) {
// GetLogs returns logs matching the given argument that are stored within the state. // GetLogs returns logs matching the given argument that are stored within the state.
// //
// https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getlogs // https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getlogs
func (api *PublicFilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([]*vm.Log, error) { func (api *PublicFilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([]*types.Log, error) {
if crit.FromBlock == nil { if crit.FromBlock == nil {
crit.FromBlock = big.NewInt(rpc.LatestBlockNumber.Int64()) crit.FromBlock = big.NewInt(rpc.LatestBlockNumber.Int64())
} }
@ -366,7 +365,7 @@ func (api *PublicFilterAPI) UninstallFilter(id rpc.ID) bool {
// If the filter could not be found an empty array of logs is returned. // If the filter could not be found an empty array of logs is returned.
// //
// https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getfilterlogs // https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getfilterlogs
func (api *PublicFilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]*vm.Log, error) { func (api *PublicFilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]*types.Log, error) {
api.filtersMu.Lock() api.filtersMu.Lock()
f, found := api.filters[id] f, found := api.filters[id]
api.filtersMu.Unlock() api.filtersMu.Unlock()
@ -441,9 +440,9 @@ func returnHashes(hashes []common.Hash) []common.Hash {
// returnLogs is a helper that will return an empty log array in case the given logs array is nil, // returnLogs is a helper that will return an empty log array in case the given logs array is nil,
// otherwise the given logs array is returned. // otherwise the given logs array is returned.
func returnLogs(logs []*vm.Log) []*vm.Log { func returnLogs(logs []*types.Log) []*types.Log {
if logs == nil { if logs == nil {
return []*vm.Log{} return []*types.Log{}
} }
return logs return logs
} }

View File

@ -25,7 +25,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
@ -91,7 +90,7 @@ func (f *Filter) SetTopics(topics [][]common.Hash) {
// all matching entries from the first block that contains matches, // all matching entries from the first block that contains matches,
// updating the start point of the filter accordingly. If no results are // updating the start point of the filter accordingly. If no results are
// found, a nil slice is returned. // found, a nil slice is returned.
func (f *Filter) FindOnce(ctx context.Context) ([]*vm.Log, error) { func (f *Filter) FindOnce(ctx context.Context) ([]*types.Log, error) {
head, _ := f.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber) head, _ := f.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber)
if head == nil { if head == nil {
return nil, nil return nil, nil
@ -122,7 +121,7 @@ func (f *Filter) FindOnce(ctx context.Context) ([]*vm.Log, error) {
} }
// Run filters logs with the current parameters set // Run filters logs with the current parameters set
func (f *Filter) Find(ctx context.Context) (logs []*vm.Log, err error) { func (f *Filter) Find(ctx context.Context) (logs []*types.Log, err error) {
for { for {
newLogs, err := f.FindOnce(ctx) newLogs, err := f.FindOnce(ctx)
if len(newLogs) == 0 || err != nil { if len(newLogs) == 0 || err != nil {
@ -132,7 +131,7 @@ func (f *Filter) Find(ctx context.Context) (logs []*vm.Log, err error) {
} }
} }
func (f *Filter) mipFind(start, end uint64, depth int) (logs []*vm.Log, blockNumber uint64) { func (f *Filter) mipFind(start, end uint64, depth int) (logs []*types.Log, blockNumber uint64) {
level := core.MIPMapLevels[depth] level := core.MIPMapLevels[depth]
// normalise numerator so we can work in level specific batches and // normalise numerator so we can work in level specific batches and
// work with the proper range checks // work with the proper range checks
@ -168,7 +167,7 @@ func (f *Filter) mipFind(start, end uint64, depth int) (logs []*vm.Log, blockNum
return nil, end return nil, end
} }
func (f *Filter) getLogs(ctx context.Context, start, end uint64) (logs []*vm.Log, blockNumber uint64, err error) { func (f *Filter) getLogs(ctx context.Context, start, end uint64) (logs []*types.Log, blockNumber uint64, err error) {
for i := start; i <= end; i++ { for i := start; i <= end; i++ {
blockNumber := rpc.BlockNumber(i) blockNumber := rpc.BlockNumber(i)
header, err := f.backend.HeaderByNumber(ctx, blockNumber) header, err := f.backend.HeaderByNumber(ctx, blockNumber)
@ -184,9 +183,9 @@ func (f *Filter) getLogs(ctx context.Context, start, end uint64) (logs []*vm.Log
if err != nil { if err != nil {
return nil, end, err return nil, end, err
} }
var unfiltered []*vm.Log var unfiltered []*types.Log
for _, receipt := range receipts { for _, receipt := range receipts {
unfiltered = append(unfiltered, ([]*vm.Log)(receipt.Logs)...) unfiltered = append(unfiltered, ([]*types.Log)(receipt.Logs)...)
} }
logs = filterLogs(unfiltered, nil, nil, f.addresses, f.topics) logs = filterLogs(unfiltered, nil, nil, f.addresses, f.topics)
if len(logs) > 0 { if len(logs) > 0 {
@ -209,8 +208,8 @@ func includes(addresses []common.Address, a common.Address) bool {
} }
// filterLogs creates a slice of logs matching the given criteria. // filterLogs creates a slice of logs matching the given criteria.
func filterLogs(logs []*vm.Log, fromBlock, toBlock *big.Int, addresses []common.Address, topics [][]common.Hash) []*vm.Log { func filterLogs(logs []*types.Log, fromBlock, toBlock *big.Int, addresses []common.Address, topics [][]common.Hash) []*types.Log {
var ret []*vm.Log var ret []*types.Log
Logs: Logs:
for _, log := range logs { for _, log := range logs {
if fromBlock != nil && fromBlock.Int64() >= 0 && fromBlock.Uint64() > log.BlockNumber { if fromBlock != nil && fromBlock.Int64() >= 0 && fromBlock.Uint64() > log.BlockNumber {

View File

@ -27,7 +27,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"golang.org/x/net/context" "golang.org/x/net/context"
@ -64,7 +63,7 @@ type subscription struct {
typ Type typ Type
created time.Time created time.Time
logsCrit FilterCriteria logsCrit FilterCriteria
logs chan []*vm.Log logs chan []*types.Log
hashes chan common.Hash hashes chan common.Hash
headers chan *types.Header headers chan *types.Header
installed chan struct{} // closed when the filter is installed installed chan struct{} // closed when the filter is installed
@ -151,7 +150,7 @@ func (es *EventSystem) subscribe(sub *subscription) *Subscription {
// SubscribeLogs creates a subscription that will write all logs matching the // SubscribeLogs creates a subscription that will write all logs matching the
// given criteria to the given logs channel. Default value for the from and to // given criteria to the given logs channel. Default value for the from and to
// block is "latest". If the fromBlock > toBlock an error is returned. // block is "latest". If the fromBlock > toBlock an error is returned.
func (es *EventSystem) SubscribeLogs(crit FilterCriteria, logs chan []*vm.Log) (*Subscription, error) { func (es *EventSystem) SubscribeLogs(crit FilterCriteria, logs chan []*types.Log) (*Subscription, error) {
var from, to rpc.BlockNumber var from, to rpc.BlockNumber
if crit.FromBlock == nil { if crit.FromBlock == nil {
from = rpc.LatestBlockNumber from = rpc.LatestBlockNumber
@ -189,7 +188,7 @@ func (es *EventSystem) SubscribeLogs(crit FilterCriteria, logs chan []*vm.Log) (
// subscribeMinedPendingLogs creates a subscription that returned mined and // subscribeMinedPendingLogs creates a subscription that returned mined and
// pending logs that match the given criteria. // pending logs that match the given criteria.
func (es *EventSystem) subscribeMinedPendingLogs(crit FilterCriteria, logs chan []*vm.Log) *Subscription { func (es *EventSystem) subscribeMinedPendingLogs(crit FilterCriteria, logs chan []*types.Log) *Subscription {
sub := &subscription{ sub := &subscription{
id: rpc.NewID(), id: rpc.NewID(),
typ: MinedAndPendingLogsSubscription, typ: MinedAndPendingLogsSubscription,
@ -207,7 +206,7 @@ func (es *EventSystem) subscribeMinedPendingLogs(crit FilterCriteria, logs chan
// subscribeLogs creates a subscription that will write all logs matching the // subscribeLogs creates a subscription that will write all logs matching the
// given criteria to the given logs channel. // given criteria to the given logs channel.
func (es *EventSystem) subscribeLogs(crit FilterCriteria, logs chan []*vm.Log) *Subscription { func (es *EventSystem) subscribeLogs(crit FilterCriteria, logs chan []*types.Log) *Subscription {
sub := &subscription{ sub := &subscription{
id: rpc.NewID(), id: rpc.NewID(),
typ: LogsSubscription, typ: LogsSubscription,
@ -225,7 +224,7 @@ func (es *EventSystem) subscribeLogs(crit FilterCriteria, logs chan []*vm.Log) *
// subscribePendingLogs creates a subscription that writes transaction hashes for // subscribePendingLogs creates a subscription that writes transaction hashes for
// transactions that enter the transaction pool. // transactions that enter the transaction pool.
func (es *EventSystem) subscribePendingLogs(crit FilterCriteria, logs chan []*vm.Log) *Subscription { func (es *EventSystem) subscribePendingLogs(crit FilterCriteria, logs chan []*types.Log) *Subscription {
sub := &subscription{ sub := &subscription{
id: rpc.NewID(), id: rpc.NewID(),
typ: PendingLogsSubscription, typ: PendingLogsSubscription,
@ -248,7 +247,7 @@ func (es *EventSystem) SubscribeNewHeads(headers chan *types.Header) *Subscripti
id: rpc.NewID(), id: rpc.NewID(),
typ: BlocksSubscription, typ: BlocksSubscription,
created: time.Now(), created: time.Now(),
logs: make(chan []*vm.Log), logs: make(chan []*types.Log),
hashes: make(chan common.Hash), hashes: make(chan common.Hash),
headers: headers, headers: headers,
installed: make(chan struct{}), installed: make(chan struct{}),
@ -265,7 +264,7 @@ func (es *EventSystem) SubscribePendingTxEvents(hashes chan common.Hash) *Subscr
id: rpc.NewID(), id: rpc.NewID(),
typ: PendingTransactionsSubscription, typ: PendingTransactionsSubscription,
created: time.Now(), created: time.Now(),
logs: make(chan []*vm.Log), logs: make(chan []*types.Log),
hashes: hashes, hashes: hashes,
headers: make(chan *types.Header), headers: make(chan *types.Header),
installed: make(chan struct{}), installed: make(chan struct{}),
@ -284,7 +283,7 @@ func (es *EventSystem) broadcast(filters filterIndex, ev *event.Event) {
} }
switch e := ev.Data.(type) { switch e := ev.Data.(type) {
case vm.Logs: case []*types.Log:
if len(e) > 0 { if len(e) > 0 {
for _, f := range filters[LogsSubscription] { for _, f := range filters[LogsSubscription] {
if ev.Time.After(f.created) { if ev.Time.After(f.created) {
@ -370,7 +369,7 @@ func (es *EventSystem) lightFilterNewHead(newHeader *types.Header, callBack func
} }
// filter logs of a single header in light client mode // filter logs of a single header in light client mode
func (es *EventSystem) lightFilterLogs(header *types.Header, addresses []common.Address, topics [][]common.Hash, remove bool) []*vm.Log { func (es *EventSystem) lightFilterLogs(header *types.Header, addresses []common.Address, topics [][]common.Hash, remove bool) []*types.Log {
if bloomFilter(header.Bloom, addresses, topics) { if bloomFilter(header.Bloom, addresses, topics) {
// Get the logs of the block // Get the logs of the block
ctx, _ := context.WithTimeout(context.Background(), time.Second*5) ctx, _ := context.WithTimeout(context.Background(), time.Second*5)
@ -378,7 +377,7 @@ func (es *EventSystem) lightFilterLogs(header *types.Header, addresses []common.
if err != nil { if err != nil {
return nil return nil
} }
var unfiltered []*vm.Log var unfiltered []*types.Log
for _, receipt := range receipts { for _, receipt := range receipts {
for _, log := range receipt.Logs { for _, log := range receipt.Logs {
logcopy := *log logcopy := *log
@ -396,7 +395,7 @@ func (es *EventSystem) lightFilterLogs(header *types.Header, addresses []common.
func (es *EventSystem) eventLoop() { func (es *EventSystem) eventLoop() {
var ( var (
index = make(filterIndex) index = make(filterIndex)
sub = es.mux.Subscribe(core.PendingLogsEvent{}, core.RemovedLogsEvent{}, vm.Logs{}, core.TxPreEvent{}, core.ChainEvent{}) sub = es.mux.Subscribe(core.PendingLogsEvent{}, core.RemovedLogsEvent{}, []*types.Log{}, core.TxPreEvent{}, core.ChainEvent{})
) )
for i := UnknownSubscription; i < LastIndexSubscription; i++ { for i := UnknownSubscription; i < LastIndexSubscription; i++ {

View File

@ -27,7 +27,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
@ -263,30 +262,30 @@ func TestLogFilter(t *testing.T) {
notUsedTopic = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999") notUsedTopic = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999")
// posted twice, once as vm.Logs and once as core.PendingLogsEvent // posted twice, once as vm.Logs and once as core.PendingLogsEvent
allLogs = vm.Logs{ allLogs = []*types.Log{
vm.NewLog(firstAddr, []common.Hash{}, []byte(""), 0), {Address: firstAddr},
vm.NewLog(firstAddr, []common.Hash{firstTopic}, []byte(""), 1), {Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1},
vm.NewLog(secondAddr, []common.Hash{firstTopic}, []byte(""), 1), {Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1},
vm.NewLog(thirdAddress, []common.Hash{secondTopic}, []byte(""), 2), {Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 2},
vm.NewLog(thirdAddress, []common.Hash{secondTopic}, []byte(""), 3), {Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3},
} }
expectedCase7 = vm.Logs{allLogs[3], allLogs[4], allLogs[0], allLogs[1], allLogs[2], allLogs[3], allLogs[4]} expectedCase7 = []*types.Log{allLogs[3], allLogs[4], allLogs[0], allLogs[1], allLogs[2], allLogs[3], allLogs[4]}
expectedCase11 = vm.Logs{allLogs[1], allLogs[2], allLogs[1], allLogs[2]} expectedCase11 = []*types.Log{allLogs[1], allLogs[2], allLogs[1], allLogs[2]}
testCases = []struct { testCases = []struct {
crit FilterCriteria crit FilterCriteria
expected vm.Logs expected []*types.Log
id rpc.ID id rpc.ID
}{ }{
// match all // match all
0: {FilterCriteria{}, allLogs, ""}, 0: {FilterCriteria{}, allLogs, ""},
// match none due to no matching addresses // match none due to no matching addresses
1: {FilterCriteria{Addresses: []common.Address{common.Address{}, notUsedAddress}, Topics: [][]common.Hash{allLogs[0].Topics}}, vm.Logs{}, ""}, 1: {FilterCriteria{Addresses: []common.Address{common.Address{}, notUsedAddress}, Topics: [][]common.Hash{allLogs[0].Topics}}, []*types.Log{}, ""},
// match logs based on addresses, ignore topics // match logs based on addresses, ignore topics
2: {FilterCriteria{Addresses: []common.Address{firstAddr}}, allLogs[:2], ""}, 2: {FilterCriteria{Addresses: []common.Address{firstAddr}}, allLogs[:2], ""},
// match none due to no matching topics (match with address) // match none due to no matching topics (match with address)
3: {FilterCriteria{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{[]common.Hash{notUsedTopic}}}, vm.Logs{}, ""}, 3: {FilterCriteria{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{[]common.Hash{notUsedTopic}}}, []*types.Log{}, ""},
// match logs based on addresses and topics // match logs based on addresses and topics
4: {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{[]common.Hash{firstTopic, secondTopic}}}, allLogs[3:5], ""}, 4: {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{[]common.Hash{firstTopic, secondTopic}}}, allLogs[3:5], ""},
// match logs based on multiple addresses and "or" topics // match logs based on multiple addresses and "or" topics
@ -321,14 +320,14 @@ func TestLogFilter(t *testing.T) {
} }
for i, tt := range testCases { for i, tt := range testCases {
var fetched []*vm.Log var fetched []*types.Log
for { // fetch all expected logs for { // fetch all expected logs
results, err := api.GetFilterChanges(tt.id) results, err := api.GetFilterChanges(tt.id)
if err != nil { if err != nil {
t.Fatalf("Unable to fetch logs: %v", err) t.Fatalf("Unable to fetch logs: %v", err)
} }
fetched = append(fetched, results.([]*vm.Log)...) fetched = append(fetched, results.([]*types.Log)...)
if len(fetched) >= len(tt.expected) { if len(fetched) >= len(tt.expected) {
break break
} }
@ -373,21 +372,21 @@ func TestPendingLogsSubscription(t *testing.T) {
notUsedTopic = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999") notUsedTopic = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999")
allLogs = []core.PendingLogsEvent{ allLogs = []core.PendingLogsEvent{
core.PendingLogsEvent{Logs: vm.Logs{vm.NewLog(firstAddr, []common.Hash{}, []byte(""), 0)}}, {Logs: []*types.Log{{Address: firstAddr, Topics: []common.Hash{}, BlockNumber: 0}}},
core.PendingLogsEvent{Logs: vm.Logs{vm.NewLog(firstAddr, []common.Hash{firstTopic}, []byte(""), 1)}}, {Logs: []*types.Log{{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}}},
core.PendingLogsEvent{Logs: vm.Logs{vm.NewLog(secondAddr, []common.Hash{firstTopic}, []byte(""), 2)}}, {Logs: []*types.Log{{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 2}}},
core.PendingLogsEvent{Logs: vm.Logs{vm.NewLog(thirdAddress, []common.Hash{secondTopic}, []byte(""), 3)}}, {Logs: []*types.Log{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}}},
core.PendingLogsEvent{Logs: vm.Logs{vm.NewLog(thirdAddress, []common.Hash{secondTopic}, []byte(""), 4)}}, {Logs: []*types.Log{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 4}}},
core.PendingLogsEvent{Logs: vm.Logs{ {Logs: []*types.Log{
vm.NewLog(thirdAddress, []common.Hash{firstTopic}, []byte(""), 5), {Address: thirdAddress, Topics: []common.Hash{firstTopic}, BlockNumber: 5},
vm.NewLog(thirdAddress, []common.Hash{thirdTopic}, []byte(""), 5), {Address: thirdAddress, Topics: []common.Hash{thirdTopic}, BlockNumber: 5},
vm.NewLog(thirdAddress, []common.Hash{forthTopic}, []byte(""), 5), {Address: thirdAddress, Topics: []common.Hash{forthTopic}, BlockNumber: 5},
vm.NewLog(firstAddr, []common.Hash{firstTopic}, []byte(""), 5), {Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 5},
}}, }},
} }
convertLogs = func(pl []core.PendingLogsEvent) vm.Logs { convertLogs = func(pl []core.PendingLogsEvent) []*types.Log {
var logs vm.Logs var logs []*types.Log
for _, l := range pl { for _, l := range pl {
logs = append(logs, l.Logs...) logs = append(logs, l.Logs...)
} }
@ -396,18 +395,18 @@ func TestPendingLogsSubscription(t *testing.T) {
testCases = []struct { testCases = []struct {
crit FilterCriteria crit FilterCriteria
expected vm.Logs expected []*types.Log
c chan []*vm.Log c chan []*types.Log
sub *Subscription sub *Subscription
}{ }{
// match all // match all
{FilterCriteria{}, convertLogs(allLogs), nil, nil}, {FilterCriteria{}, convertLogs(allLogs), nil, nil},
// match none due to no matching addresses // match none due to no matching addresses
{FilterCriteria{Addresses: []common.Address{common.Address{}, notUsedAddress}, Topics: [][]common.Hash{[]common.Hash{}}}, vm.Logs{}, nil, nil}, {FilterCriteria{Addresses: []common.Address{common.Address{}, notUsedAddress}, Topics: [][]common.Hash{[]common.Hash{}}}, []*types.Log{}, nil, nil},
// match logs based on addresses, ignore topics // match logs based on addresses, ignore topics
{FilterCriteria{Addresses: []common.Address{firstAddr}}, append(convertLogs(allLogs[:2]), allLogs[5].Logs[3]), nil, nil}, {FilterCriteria{Addresses: []common.Address{firstAddr}}, append(convertLogs(allLogs[:2]), allLogs[5].Logs[3]), nil, nil},
// match none due to no matching topics (match with address) // match none due to no matching topics (match with address)
{FilterCriteria{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{[]common.Hash{notUsedTopic}}}, vm.Logs{}, nil, nil}, {FilterCriteria{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{[]common.Hash{notUsedTopic}}}, []*types.Log{}, nil, nil},
// match logs based on addresses and topics // match logs based on addresses and topics
{FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{[]common.Hash{firstTopic, secondTopic}}}, append(convertLogs(allLogs[3:5]), allLogs[5].Logs[0]), nil, nil}, {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{[]common.Hash{firstTopic, secondTopic}}}, append(convertLogs(allLogs[3:5]), allLogs[5].Logs[0]), nil, nil},
// match logs based on multiple addresses and "or" topics // match logs based on multiple addresses and "or" topics
@ -415,7 +414,7 @@ func TestPendingLogsSubscription(t *testing.T) {
// block numbers are ignored for filters created with New***Filter, these return all logs that match the given criterias when the state changes // block numbers are ignored for filters created with New***Filter, these return all logs that match the given criterias when the state changes
{FilterCriteria{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(2), ToBlock: big.NewInt(3)}, append(convertLogs(allLogs[:2]), allLogs[5].Logs[3]), nil, nil}, {FilterCriteria{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(2), ToBlock: big.NewInt(3)}, append(convertLogs(allLogs[:2]), allLogs[5].Logs[3]), nil, nil},
// multiple pending logs, should match only 2 topics from the logs in block 5 // multiple pending logs, should match only 2 topics from the logs in block 5
{FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{[]common.Hash{firstTopic, forthTopic}}}, vm.Logs{allLogs[5].Logs[0], allLogs[5].Logs[2]}, nil, nil}, {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{[]common.Hash{firstTopic, forthTopic}}}, []*types.Log{allLogs[5].Logs[0], allLogs[5].Logs[2]}, nil, nil},
} }
) )
@ -423,7 +422,7 @@ func TestPendingLogsSubscription(t *testing.T) {
// on slow machines this could otherwise lead to missing events when the subscription is created after // on slow machines this could otherwise lead to missing events when the subscription is created after
// (some) events are posted. // (some) events are posted.
for i := range testCases { for i := range testCases {
testCases[i].c = make(chan []*vm.Log) testCases[i].c = make(chan []*types.Log)
testCases[i].sub, _ = api.events.SubscribeLogs(testCases[i].crit, testCases[i].c) testCases[i].sub, _ = api.events.SubscribeLogs(testCases[i].crit, testCases[i].c)
} }
@ -431,7 +430,7 @@ func TestPendingLogsSubscription(t *testing.T) {
i := n i := n
tt := test tt := test
go func() { go func() {
var fetched []*vm.Log var fetched []*types.Log
fetchLoop: fetchLoop:
for { for {
logs := <-tt.c logs := <-tt.c

View File

@ -27,17 +27,16 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
) )
func makeReceipt(addr common.Address) *types.Receipt { func makeReceipt(addr common.Address) *types.Receipt {
receipt := types.NewReceipt(nil, new(big.Int)) receipt := types.NewReceipt(nil, new(big.Int))
receipt.Logs = vm.Logs{ receipt.Logs = []*types.Log{
&vm.Log{Address: addr}, {Address: addr},
} }
receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
return receipt return receipt
@ -146,8 +145,8 @@ func TestFilters(t *testing.T) {
switch i { switch i {
case 1: case 1:
receipt := types.NewReceipt(nil, new(big.Int)) receipt := types.NewReceipt(nil, new(big.Int))
receipt.Logs = vm.Logs{ receipt.Logs = []*types.Log{
&vm.Log{ {
Address: addr, Address: addr,
Topics: []common.Hash{hash1}, Topics: []common.Hash{hash1},
}, },
@ -156,8 +155,8 @@ func TestFilters(t *testing.T) {
receipts = types.Receipts{receipt} receipts = types.Receipts{receipt}
case 2: case 2:
receipt := types.NewReceipt(nil, new(big.Int)) receipt := types.NewReceipt(nil, new(big.Int))
receipt.Logs = vm.Logs{ receipt.Logs = []*types.Log{
&vm.Log{ {
Address: addr, Address: addr,
Topics: []common.Hash{hash2}, Topics: []common.Hash{hash2},
}, },
@ -166,8 +165,8 @@ func TestFilters(t *testing.T) {
receipts = types.Receipts{receipt} receipts = types.Receipts{receipt}
case 998: case 998:
receipt := types.NewReceipt(nil, new(big.Int)) receipt := types.NewReceipt(nil, new(big.Int))
receipt.Logs = vm.Logs{ receipt.Logs = []*types.Log{
&vm.Log{ {
Address: addr, Address: addr,
Topics: []common.Hash{hash3}, Topics: []common.Hash{hash3},
}, },
@ -176,8 +175,8 @@ func TestFilters(t *testing.T) {
receipts = types.Receipts{receipt} receipts = types.Receipts{receipt}
case 999: case 999:
receipt := types.NewReceipt(nil, new(big.Int)) receipt := types.NewReceipt(nil, new(big.Int))
receipt.Logs = vm.Logs{ receipt.Logs = []*types.Log{
&vm.Log{ {
Address: addr, Address: addr,
Topics: []common.Hash{hash4}, Topics: []common.Hash{hash4},
}, },

View File

@ -26,7 +26,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"golang.org/x/net/context" "golang.org/x/net/context"
@ -294,14 +293,14 @@ func (ec *Client) NonceAt(ctx context.Context, account common.Address, blockNumb
// Filters // Filters
// FilterLogs executes a filter query. // FilterLogs executes a filter query.
func (ec *Client) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]vm.Log, error) { func (ec *Client) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) {
var result []vm.Log var result []types.Log
err := ec.c.CallContext(ctx, &result, "eth_getLogs", toFilterArg(q)) err := ec.c.CallContext(ctx, &result, "eth_getLogs", toFilterArg(q))
return result, err return result, err
} }
// SubscribeFilterLogs subscribes to the results of a streaming filter query. // SubscribeFilterLogs subscribes to the results of a streaming filter query.
func (ec *Client) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- vm.Log) (ethereum.Subscription, error) { func (ec *Client) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) {
return ec.c.EthSubscribe(ctx, ch, "logs", toFilterArg(q)) return ec.c.EthSubscribe(ctx, ch, "logs", toFilterArg(q))
} }

View File

@ -23,7 +23,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"golang.org/x/net/context" "golang.org/x/net/context"
) )
@ -156,8 +155,8 @@ type FilterQuery struct {
// Logs received through a streaming query subscription may have Removed set to true, // Logs received through a streaming query subscription may have Removed set to true,
// indicating that the log was reverted due to a chain reorganisation. // indicating that the log was reverted due to a chain reorganisation.
type LogFilterer interface { type LogFilterer interface {
FilterLogs(ctx context.Context, q FilterQuery) ([]vm.Log, error) FilterLogs(ctx context.Context, q FilterQuery) ([]types.Log, error)
SubscribeFilterLogs(ctx context.Context, q FilterQuery, ch chan<- vm.Log) (Subscription, error) SubscribeFilterLogs(ctx context.Context, q FilterQuery, ch chan<- types.Log) (Subscription, error)
} }
// TransactionSender wraps transaction sending. The SendTransaction method injects a // TransactionSender wraps transaction sending. The SendTransaction method injects a

View File

@ -963,7 +963,7 @@ func (s *PublicTransactionPoolAPI) GetTransactionReceipt(txHash common.Hash) (ma
"logsBloom": receipt.Bloom, "logsBloom": receipt.Bloom,
} }
if receipt.Logs == nil { if receipt.Logs == nil {
fields["logs"] = []vm.Logs{} fields["logs"] = [][]*types.Log{}
} }
// If the ContractAddress is 20 0x0 bytes, assume it is not a contract creation // If the ContractAddress is 20 0x0 bytes, assume it is not a contract creation
if receipt.ContractAddress != (common.Address{}) { if receipt.ContractAddress != (common.Address{}) {

View File

@ -31,7 +31,7 @@ import (
"github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
rpc "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"golang.org/x/net/context" "golang.org/x/net/context"
) )

View File

@ -20,6 +20,7 @@ import (
"math/big" "math/big"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"golang.org/x/net/context" "golang.org/x/net/context"
@ -42,7 +43,7 @@ func (s *VMState) Error() error {
return s.err return s.err
} }
func (s *VMState) AddLog(log *vm.Log) {} func (s *VMState) AddLog(log *types.Log) {}
// errHandler handles and stores any state error that happens during execution. // errHandler handles and stores any state error that happens during execution.
func (s *VMState) errHandler(err error) { func (s *VMState) errHandler(err error) {

View File

@ -327,7 +327,7 @@ func (self *worker) wait() {
} }
// broadcast before waiting for validation // broadcast before waiting for validation
go func(block *types.Block, logs vm.Logs, receipts []*types.Receipt) { go func(block *types.Block, logs []*types.Log, receipts []*types.Receipt) {
self.mux.Post(core.NewMinedBlockEvent{Block: block}) self.mux.Post(core.NewMinedBlockEvent{Block: block})
self.mux.Post(core.ChainEvent{Block: block, Hash: block.Hash(), Logs: logs}) self.mux.Post(core.ChainEvent{Block: block, Hash: block.Hash(), Logs: logs})
@ -537,7 +537,7 @@ func (self *worker) commitUncle(work *Work, uncle *types.Header) error {
func (env *Work) commitTransactions(mux *event.TypeMux, txs *types.TransactionsByPriceAndNonce, gasPrice *big.Int, bc *core.BlockChain) { func (env *Work) commitTransactions(mux *event.TypeMux, txs *types.TransactionsByPriceAndNonce, gasPrice *big.Int, bc *core.BlockChain) {
gp := new(core.GasPool).AddGas(env.header.GasLimit) gp := new(core.GasPool).AddGas(env.header.GasLimit)
var coalescedLogs vm.Logs var coalescedLogs []*types.Log
for { for {
// Retrieve the next transaction and abort if all done // Retrieve the next transaction and abort if all done
@ -597,12 +597,12 @@ func (env *Work) commitTransactions(mux *event.TypeMux, txs *types.TransactionsB
// make a copy, the state caches the logs and these logs get "upgraded" from pending to mined // make a copy, the state caches the logs and these logs get "upgraded" from pending to mined
// logs by filling in the block hash when the block was mined by the local miner. This can // logs by filling in the block hash when the block was mined by the local miner. This can
// cause a race condition if a log was "upgraded" before the PendingLogsEvent is processed. // cause a race condition if a log was "upgraded" before the PendingLogsEvent is processed.
cpy := make(vm.Logs, len(coalescedLogs)) cpy := make([]*types.Log, len(coalescedLogs))
for i, l := range coalescedLogs { for i, l := range coalescedLogs {
cpy[i] = new(vm.Log) cpy[i] = new(types.Log)
*cpy[i] = *l *cpy[i] = *l
} }
go func(logs vm.Logs, tcount int) { go func(logs []*types.Log, tcount int) {
if len(logs) > 0 { if len(logs) > 0 {
mux.Post(core.PendingLogsEvent{Logs: logs}) mux.Post(core.PendingLogsEvent{Logs: logs})
} }
@ -613,7 +613,7 @@ func (env *Work) commitTransactions(mux *event.TypeMux, txs *types.TransactionsB
} }
} }
func (env *Work) commitTransaction(tx *types.Transaction, bc *core.BlockChain, gp *core.GasPool) (error, vm.Logs) { func (env *Work) commitTransaction(tx *types.Transaction, bc *core.BlockChain, gp *core.GasPool) (error, []*types.Log) {
snap := env.state.Snapshot() snap := env.state.Snapshot()
receipt, _, err := core.ApplyTransaction(env.config, bc, gp, env.state, env.header, tx, env.header.GasUsed, vm.Config{}) receipt, _, err := core.ApplyTransaction(env.config, bc, gp, env.state, env.header, tx, env.header.GasUsed, vm.Config{})

View File

@ -22,7 +22,6 @@ import (
"math/big" "math/big"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
) )
@ -191,7 +190,7 @@ func (ec *EthereumClient) FilterLogs(ctx *Context, query *FilterQuery) (logs *Lo
return nil, err return nil, err
} }
// Temp hack due to vm.Logs being []*vm.Log // Temp hack due to vm.Logs being []*vm.Log
res := make(vm.Logs, len(rawLogs)) res := make([]*types.Log, len(rawLogs))
for i, log := range rawLogs { for i, log := range rawLogs {
res[i] = &log res[i] = &log
} }
@ -208,7 +207,7 @@ type FilterLogsHandler interface {
// SubscribeFilterLogs subscribes to the results of a streaming filter query. // SubscribeFilterLogs subscribes to the results of a streaming filter query.
func (ec *EthereumClient) SubscribeFilterLogs(ctx *Context, query *FilterQuery, handler FilterLogsHandler, buffer int) (sub *Subscription, _ error) { func (ec *EthereumClient) SubscribeFilterLogs(ctx *Context, query *FilterQuery, handler FilterLogsHandler, buffer int) (sub *Subscription, _ error) {
// Subscribe to the event internally // Subscribe to the event internally
ch := make(chan vm.Log, buffer) ch := make(chan types.Log, buffer)
rawSub, err := ec.client.SubscribeFilterLogs(ctx.context, query.query, ch) rawSub, err := ec.client.SubscribeFilterLogs(ctx.context, query.query, ch)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -85,8 +85,8 @@ func NewChainConfig() *ChainConfig {
// by the foundation running the V5 discovery protocol. // by the foundation running the V5 discovery protocol.
func FoundationBootnodes() *Enodes { func FoundationBootnodes() *Enodes {
nodes := &Enodes{nodes: make([]*discv5.Node, len(params.DiscoveryV5Bootnodes))} nodes := &Enodes{nodes: make([]*discv5.Node, len(params.DiscoveryV5Bootnodes))}
for i, node := range params.DiscoveryV5Bootnodes { for i, url := range params.DiscoveryV5Bootnodes {
nodes.nodes[i] = node nodes.nodes[i] = discv5.MustParseNode(url)
} }
return nodes return nodes
} }

View File

@ -21,13 +21,13 @@ package geth
import ( import (
"errors" "errors"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/types"
) )
// Log represents a contract log event. These events are generated by the LOG // Log represents a contract log event. These events are generated by the LOG
// opcode and stored/indexed by the node. // opcode and stored/indexed by the node.
type Log struct { type Log struct {
log *vm.Log log *types.Log
} }
func (l *Log) GetAddress() *Address { return &Address{l.log.Address} } func (l *Log) GetAddress() *Address { return &Address{l.log.Address} }
@ -40,7 +40,7 @@ func (l *Log) GetBlockHash() *Hash { return &Hash{l.log.BlockHash} }
func (l *Log) GetIndex() int { return int(l.log.Index) } func (l *Log) GetIndex() int { return int(l.log.Index) }
// Logs represents a slice of VM logs. // Logs represents a slice of VM logs.
type Logs struct{ logs vm.Logs } type Logs struct{ logs []*types.Log }
// Size returns the number of logs in the slice. // Size returns the number of logs in the slice.
func (l *Logs) Size() int { func (l *Logs) Size() int {

View File

@ -16,37 +16,30 @@
package params package params
import (
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/discv5"
)
// MainnetBootnodes are the enode URLs of the P2P bootstrap nodes running on // MainnetBootnodes are the enode URLs of the P2P bootstrap nodes running on
// the main Ethereum network. // the main Ethereum network.
var MainnetBootnodes = []*discover.Node{ var MainnetBootnodes = []string{
// ETH/DEV Go Bootnodes // ETH/DEV Go Bootnodes
discover.MustParseNode("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303"), // IE "enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303", // IE
discover.MustParseNode("enode://de471bccee3d042261d52e9bff31458daecc406142b401d4cd848f677479f73104b9fdeb090af9583d3391b7f10cb2ba9e26865dd5fca4fcdc0fb1e3b723c786@54.94.239.50:30303"), // BR "enode://de471bccee3d042261d52e9bff31458daecc406142b401d4cd848f677479f73104b9fdeb090af9583d3391b7f10cb2ba9e26865dd5fca4fcdc0fb1e3b723c786@54.94.239.50:30303", // BR
discover.MustParseNode("enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303"), // SG "enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303", // SG
// ETH/DEV Cpp Bootnodes // ETH/DEV Cpp Bootnodes
discover.MustParseNode("enode://979b7fa28feeb35a4741660a16076f1943202cb72b6af70d327f053e248bab9ba81760f39d0701ef1d8f89cc1fbd2cacba0710a12cd5314d5e0c9021aa3637f9@5.1.83.226:30303"), "enode://979b7fa28feeb35a4741660a16076f1943202cb72b6af70d327f053e248bab9ba81760f39d0701ef1d8f89cc1fbd2cacba0710a12cd5314d5e0c9021aa3637f9@5.1.83.226:30303",
} }
// TestnetBootnodes are the enode URLs of the P2P bootstrap nodes running on the // TestnetBootnodes are the enode URLs of the P2P bootstrap nodes running on the
// Morden test network. // Morden test network.
var TestnetBootnodes = []*discover.Node{ var TestnetBootnodes = []string{
// ETH/DEV Go Bootnodes // ETH/DEV Go Bootnodes
discover.MustParseNode("enode://e4533109cc9bd7604e4ff6c095f7a1d807e15b38e9bfeb05d3b7c423ba86af0a9e89abbf40bd9dde4250fef114cd09270fa4e224cbeef8b7bf05a51e8260d6b8@94.242.229.4:40404"), "enode://e4533109cc9bd7604e4ff6c095f7a1d807e15b38e9bfeb05d3b7c423ba86af0a9e89abbf40bd9dde4250fef114cd09270fa4e224cbeef8b7bf05a51e8260d6b8@94.242.229.4:40404",
discover.MustParseNode("enode://8c336ee6f03e99613ad21274f269479bf4413fb294d697ef15ab897598afb931f56beb8e97af530aee20ce2bcba5776f4a312bc168545de4d43736992c814592@94.242.229.203:30303"), "enode://8c336ee6f03e99613ad21274f269479bf4413fb294d697ef15ab897598afb931f56beb8e97af530aee20ce2bcba5776f4a312bc168545de4d43736992c814592@94.242.229.203:30303",
// ETH/DEV Cpp Bootnodes
} }
// DiscoveryV5Bootnodes are the enode URLs of the P2P bootstrap nodes for the // DiscoveryV5Bootnodes are the enode URLs of the P2P bootstrap nodes for the
// experimental RLPx v5 topic-discovery network. // experimental RLPx v5 topic-discovery network.
var DiscoveryV5Bootnodes = []*discv5.Node{ var DiscoveryV5Bootnodes = []string{
discv5.MustParseNode("enode://0cc5f5ffb5d9098c8b8c62325f3797f56509bff942704687b6530992ac706e2cb946b90a34f1f19548cd3c7baccbcaea354531e5983c7d1bc0dee16ce4b6440b@40.118.3.223:30305"), "enode://0cc5f5ffb5d9098c8b8c62325f3797f56509bff942704687b6530992ac706e2cb946b90a34f1f19548cd3c7baccbcaea354531e5983c7d1bc0dee16ce4b6440b@40.118.3.223:30305",
discv5.MustParseNode("enode://1c7a64d76c0334b0418c004af2f67c50e36a3be60b5e4790bdac0439d21603469a85fad36f2473c9a80eb043ae60936df905fa28f1ff614c3e5dc34f15dcd2dc@40.118.3.223:30308"), "enode://1c7a64d76c0334b0418c004af2f67c50e36a3be60b5e4790bdac0439d21603469a85fad36f2473c9a80eb043ae60936df905fa28f1ff614c3e5dc34f15dcd2dc@40.118.3.223:30308",
discv5.MustParseNode("enode://85c85d7143ae8bb96924f2b54f1b3e70d8c4d367af305325d30a61385a432f247d2c75c45c6b4a60335060d072d7f5b35dd1d4c45f76941f62a4f83b6e75daaf@40.118.3.223:30309"), "enode://85c85d7143ae8bb96924f2b54f1b3e70d8c4d367af305325d30a61385a432f247d2c75c45c6b4a60335060d072d7f5b35dd1d4c45f76941f62a4f83b6e75daaf@40.118.3.223:30309",
} }

View File

@ -28,7 +28,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/logger/glog" "github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
@ -146,7 +146,7 @@ func runStateTest(chainConfig *params.ChainConfig, test VmTest) error {
ret []byte ret []byte
// gas *big.Int // gas *big.Int
// err error // err error
logs vm.Logs logs []*types.Log
) )
ret, logs, _, _ = RunState(chainConfig, statedb, env, test.Transaction) ret, logs, _, _ = RunState(chainConfig, statedb, env, test.Transaction)
@ -203,7 +203,7 @@ func runStateTest(chainConfig *params.ChainConfig, test VmTest) error {
return nil return nil
} }
func RunState(chainConfig *params.ChainConfig, statedb *state.StateDB, env, tx map[string]string) ([]byte, vm.Logs, *big.Int, error) { func RunState(chainConfig *params.ChainConfig, statedb *state.StateDB, env, tx map[string]string) ([]byte, []*types.Log, *big.Int, error) {
environment, msg := NewEVMEnvironment(false, chainConfig, statedb, env, tx) environment, msg := NewEVMEnvironment(false, chainConfig, statedb, env, tx)
gaspool := new(core.GasPool).AddGas(common.Big(env["currentGasLimit"])) gaspool := new(core.GasPool).AddGas(common.Big(env["currentGasLimit"]))

View File

@ -47,7 +47,7 @@ func init() {
} }
} }
func checkLogs(tlog []Log, logs vm.Logs) error { func checkLogs(tlog []Log, logs []*types.Log) error {
if len(tlog) != len(logs) { if len(tlog) != len(logs) {
return fmt.Errorf("log length mismatch. Expected %d, got %d", len(tlog), len(logs)) return fmt.Errorf("log length mismatch. Expected %d, got %d", len(tlog), len(logs))
@ -70,7 +70,7 @@ func checkLogs(tlog []Log, logs vm.Logs) error {
} }
} }
} }
genBloom := common.LeftPadBytes(types.LogsBloom(vm.Logs{logs[i]}).Bytes(), 256) genBloom := common.LeftPadBytes(types.LogsBloom([]*types.Log{logs[i]}).Bytes(), 256)
if !bytes.Equal(genBloom, common.Hex2Bytes(log.BloomF)) { if !bytes.Equal(genBloom, common.Hex2Bytes(log.BloomF)) {
return fmt.Errorf("bloom mismatch") return fmt.Errorf("bloom mismatch")

View File

@ -26,6 +26,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/logger/glog" "github.com/ethereum/go-ethereum/logger/glog"
@ -164,7 +165,7 @@ func runVmTest(test VmTest) error {
ret []byte ret []byte
gas *big.Int gas *big.Int
err error err error
logs vm.Logs logs []*types.Log
) )
ret, logs, gas, err = RunVm(statedb, env, test.Exec) ret, logs, gas, err = RunVm(statedb, env, test.Exec)
@ -211,7 +212,7 @@ func runVmTest(test VmTest) error {
return nil return nil
} }
func RunVm(statedb *state.StateDB, env, exec map[string]string) ([]byte, vm.Logs, *big.Int, error) { func RunVm(statedb *state.StateDB, env, exec map[string]string) ([]byte, []*types.Log, *big.Int, error) {
chainConfig := &params.ChainConfig{ chainConfig := &params.ChainConfig{
HomesteadBlock: params.MainNetHomesteadBlock, HomesteadBlock: params.MainNetHomesteadBlock,
DAOForkBlock: params.MainNetDAOForkBlock, DAOForkBlock: params.MainNetDAOForkBlock,

View File

@ -21,7 +21,6 @@ import (
"fmt" "fmt"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"gopkg.in/karalabe/cookiejar.v2/collections/prque" "gopkg.in/karalabe/cookiejar.v2/collections/prque"
) )
@ -58,13 +57,13 @@ type TrieSyncLeafCallback func(leaf []byte, parent common.Hash) error
// unknown trie hashes to retrieve, accepts node data associated with said hashes // unknown trie hashes to retrieve, accepts node data associated with said hashes
// and reconstructs the trie step by step until all is done. // and reconstructs the trie step by step until all is done.
type TrieSync struct { type TrieSync struct {
database ethdb.Database // State database for storing all the assembled node data database DatabaseReader
requests map[common.Hash]*request // Pending requests pertaining to a key hash requests map[common.Hash]*request // Pending requests pertaining to a key hash
queue *prque.Prque // Priority queue with the pending requests queue *prque.Prque // Priority queue with the pending requests
} }
// NewTrieSync creates a new trie data download scheduler. // NewTrieSync creates a new trie data download scheduler.
func NewTrieSync(root common.Hash, database ethdb.Database, callback TrieSyncLeafCallback) *TrieSync { func NewTrieSync(root common.Hash, database DatabaseReader, callback TrieSyncLeafCallback) *TrieSync {
ts := &TrieSync{ ts := &TrieSync{
database: database, database: database,
requests: make(map[common.Hash]*request), requests: make(map[common.Hash]*request),
@ -145,7 +144,7 @@ func (s *TrieSync) Missing(max int) []common.Hash {
// Process injects a batch of retrieved trie nodes data, returning if something // Process injects a batch of retrieved trie nodes data, returning if something
// was committed to the database and also the index of an entry if processing of // was committed to the database and also the index of an entry if processing of
// it failed. // it failed.
func (s *TrieSync) Process(results []SyncResult) (bool, int, error) { func (s *TrieSync) Process(results []SyncResult, dbw DatabaseWriter) (bool, int, error) {
committed := false committed := false
for i, item := range results { for i, item := range results {
@ -157,7 +156,7 @@ func (s *TrieSync) Process(results []SyncResult) (bool, int, error) {
// If the item is a raw entry request, commit directly // If the item is a raw entry request, commit directly
if request.raw { if request.raw {
request.data = item.Data request.data = item.Data
s.commit(request, nil) s.commit(request, dbw)
committed = true committed = true
continue continue
} }
@ -174,7 +173,7 @@ func (s *TrieSync) Process(results []SyncResult) (bool, int, error) {
return committed, i, err return committed, i, err
} }
if len(requests) == 0 && request.deps == 0 { if len(requests) == 0 && request.deps == 0 {
s.commit(request, nil) s.commit(request, dbw)
committed = true committed = true
continue continue
} }
@ -266,16 +265,9 @@ func (s *TrieSync) children(req *request, object node) ([]*request, error) {
// commit finalizes a retrieval request and stores it into the database. If any // commit finalizes a retrieval request and stores it into the database. If any
// of the referencing parent requests complete due to this commit, they are also // of the referencing parent requests complete due to this commit, they are also
// committed themselves. // committed themselves.
func (s *TrieSync) commit(req *request, batch ethdb.Batch) (err error) { func (s *TrieSync) commit(req *request, dbw DatabaseWriter) (err error) {
// Create a new batch if none was specified
if batch == nil {
batch = s.database.NewBatch()
defer func() {
err = batch.Write()
}()
}
// Write the node content to disk // Write the node content to disk
if err := batch.Put(req.hash[:], req.data); err != nil { if err := dbw.Put(req.hash[:], req.data); err != nil {
return err return err
} }
delete(s.requests, req.hash) delete(s.requests, req.hash)
@ -284,7 +276,7 @@ func (s *TrieSync) commit(req *request, batch ethdb.Batch) (err error) {
for _, parent := range req.parents { for _, parent := range req.parents {
parent.deps-- parent.deps--
if parent.deps == 0 { if parent.deps == 0 {
if err := s.commit(parent, batch); err != nil { if err := s.commit(parent, dbw); err != nil {
return err return err
} }
} }

View File

@ -122,7 +122,7 @@ func testIterativeTrieSync(t *testing.T, batch int) {
} }
results[i] = SyncResult{hash, data} results[i] = SyncResult{hash, data}
} }
if _, index, err := sched.Process(results); err != nil { if _, index, err := sched.Process(results, dstDb); err != nil {
t.Fatalf("failed to process result #%d: %v", index, err) t.Fatalf("failed to process result #%d: %v", index, err)
} }
queue = append(queue[:0], sched.Missing(batch)...) queue = append(queue[:0], sched.Missing(batch)...)
@ -152,7 +152,7 @@ func TestIterativeDelayedTrieSync(t *testing.T) {
} }
results[i] = SyncResult{hash, data} results[i] = SyncResult{hash, data}
} }
if _, index, err := sched.Process(results); err != nil { if _, index, err := sched.Process(results, dstDb); err != nil {
t.Fatalf("failed to process result #%d: %v", index, err) t.Fatalf("failed to process result #%d: %v", index, err)
} }
queue = append(queue[len(results):], sched.Missing(10000)...) queue = append(queue[len(results):], sched.Missing(10000)...)
@ -190,7 +190,7 @@ func testIterativeRandomTrieSync(t *testing.T, batch int) {
results = append(results, SyncResult{hash, data}) results = append(results, SyncResult{hash, data})
} }
// Feed the retrieved results back and queue new tasks // Feed the retrieved results back and queue new tasks
if _, index, err := sched.Process(results); err != nil { if _, index, err := sched.Process(results, dstDb); err != nil {
t.Fatalf("failed to process result #%d: %v", index, err) t.Fatalf("failed to process result #%d: %v", index, err)
} }
queue = make(map[common.Hash]struct{}) queue = make(map[common.Hash]struct{})
@ -231,7 +231,7 @@ func TestIterativeRandomDelayedTrieSync(t *testing.T) {
} }
} }
// Feed the retrieved results back and queue new tasks // Feed the retrieved results back and queue new tasks
if _, index, err := sched.Process(results); err != nil { if _, index, err := sched.Process(results, dstDb); err != nil {
t.Fatalf("failed to process result #%d: %v", index, err) t.Fatalf("failed to process result #%d: %v", index, err)
} }
for _, result := range results { for _, result := range results {
@ -272,7 +272,7 @@ func TestDuplicateAvoidanceTrieSync(t *testing.T) {
results[i] = SyncResult{hash, data} results[i] = SyncResult{hash, data}
} }
if _, index, err := sched.Process(results); err != nil { if _, index, err := sched.Process(results, dstDb); err != nil {
t.Fatalf("failed to process result #%d: %v", index, err) t.Fatalf("failed to process result #%d: %v", index, err)
} }
queue = append(queue[:0], sched.Missing(0)...) queue = append(queue[:0], sched.Missing(0)...)
@ -304,7 +304,7 @@ func TestIncompleteTrieSync(t *testing.T) {
results[i] = SyncResult{hash, data} results[i] = SyncResult{hash, data}
} }
// Process each of the trie nodes // Process each of the trie nodes
if _, index, err := sched.Process(results); err != nil { if _, index, err := sched.Process(results, dstDb); err != nil {
t.Fatalf("failed to process result #%d: %v", index, err) t.Fatalf("failed to process result #%d: %v", index, err)
} }
for _, result := range results { for _, result := range results {

View File

@ -60,8 +60,12 @@ func init() {
// Database must be implemented by backing stores for the trie. // Database must be implemented by backing stores for the trie.
type Database interface { type Database interface {
DatabaseReader
DatabaseWriter DatabaseWriter
// Get returns the value for key from the database. }
// DatabaseReader wraps the Get method of a backing store for the trie.
type DatabaseReader interface {
Get(key []byte) (value []byte, err error) Get(key []byte) (value []byte, err error)
} }