WIP: add rpc pubsub interface for account diffs

This commit is contained in:
Rob Mulholand 2019-09-11 16:03:30 -05:00
parent 39b0b1a1a6
commit 9b0042f925
19 changed files with 332 additions and 194 deletions

View File

@ -517,6 +517,10 @@ func (fb *filterBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscr
return fb.bc.SubscribeLogsEvent(ch) return fb.bc.SubscribeLogsEvent(ch)
} }
func (fb *filterBackend) SubscribeStateDiffs(ch chan<- map[common.Address]state.Account) event.Subscription {
return fb.bc.SubscribeStateDiffEvent(ch)
}
func (fb *filterBackend) BloomStatus() (uint64, uint64) { return 4096, 0 } func (fb *filterBackend) BloomStatus() (uint64, uint64) { return 4096, 0 }
func (fb *filterBackend) ServiceFilter(ctx context.Context, ms *bloombits.MatcherSession) { func (fb *filterBackend) ServiceFilter(ctx context.Context, ms *bloombits.MatcherSession) {
panic("not supported") panic("not supported")

View File

@ -138,15 +138,16 @@ type BlockChain struct {
triegc *prque.Prque // Priority queue mapping block numbers to tries to gc triegc *prque.Prque // Priority queue mapping block numbers to tries to gc
gcproc time.Duration // Accumulates canonical block processing for trie dumping gcproc time.Duration // Accumulates canonical block processing for trie dumping
hc *HeaderChain hc *HeaderChain
rmLogsFeed event.Feed rmLogsFeed event.Feed
chainFeed event.Feed chainFeed event.Feed
chainSideFeed event.Feed chainSideFeed event.Feed
chainHeadFeed event.Feed chainHeadFeed event.Feed
logsFeed event.Feed logsFeed event.Feed
blockProcFeed event.Feed blockProcFeed event.Feed
scope event.SubscriptionScope stateDiffsFeed event.Feed
genesisBlock *types.Block scope event.SubscriptionScope
genesisBlock *types.Block
chainmu sync.RWMutex // blockchain insertion lock chainmu sync.RWMutex // blockchain insertion lock
@ -1436,11 +1437,11 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
// Pre-checks passed, start the full block imports // Pre-checks passed, start the full block imports
bc.wg.Add(1) bc.wg.Add(1)
bc.chainmu.Lock() bc.chainmu.Lock()
n, events, logs, err := bc.insertChain(chain, true) n, events, logs, stateDiffs, err := bc.insertChain(chain, true)
bc.chainmu.Unlock() bc.chainmu.Unlock()
bc.wg.Done() bc.wg.Done()
bc.PostChainEvents(events, logs) bc.PostChainEvents(events, logs, stateDiffs)
return n, err return n, err
} }
@ -1452,10 +1453,10 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
// racey behaviour. If a sidechain import is in progress, and the historic state // racey behaviour. If a sidechain import is in progress, and the historic state
// is imported, but then new canon-head is added before the actual sidechain // is imported, but then new canon-head is added before the actual sidechain
// completes, then the historic state could be pruned again // completes, then the historic state could be pruned again
func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []interface{}, []*types.Log, error) { func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []interface{}, []*types.Log, map[common.Address]state.Account, error) {
// If the chain is terminating, don't even bother starting up // If the chain is terminating, don't even bother starting up
if atomic.LoadInt32(&bc.procInterrupt) == 1 { if atomic.LoadInt32(&bc.procInterrupt) == 1 {
return 0, nil, nil, nil return 0, nil, nil, nil, nil
} }
// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss) // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain) senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
@ -1468,6 +1469,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
events = make([]interface{}, 0, len(chain)) events = make([]interface{}, 0, len(chain))
lastCanon *types.Block lastCanon *types.Block
coalescedLogs []*types.Log coalescedLogs []*types.Log
stateDiffs map[common.Address]state.Account
) )
// Start the parallel header verifier // Start the parallel header verifier
headers := make([]*types.Header, len(chain)) headers := make([]*types.Header, len(chain))
@ -1518,7 +1520,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
for block != nil && err == ErrKnownBlock { for block != nil && err == ErrKnownBlock {
log.Debug("Writing previously known block", "number", block.Number(), "hash", block.Hash()) log.Debug("Writing previously known block", "number", block.Number(), "hash", block.Hash())
if err := bc.writeKnownBlock(block); err != nil { if err := bc.writeKnownBlock(block); err != nil {
return it.index, nil, nil, err return it.index, nil, nil, nil, err
} }
lastCanon = block lastCanon = block
@ -1537,7 +1539,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
for block != nil && (it.index == 0 || err == consensus.ErrUnknownAncestor) { for block != nil && (it.index == 0 || err == consensus.ErrUnknownAncestor) {
log.Debug("Future block, postponing import", "number", block.Number(), "hash", block.Hash()) log.Debug("Future block, postponing import", "number", block.Number(), "hash", block.Hash())
if err := bc.addFutureBlock(block); err != nil { if err := bc.addFutureBlock(block); err != nil {
return it.index, events, coalescedLogs, err return it.index, events, coalescedLogs, stateDiffs, err
} }
block, err = it.next() block, err = it.next()
} }
@ -1545,13 +1547,13 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
stats.ignored += it.remaining() stats.ignored += it.remaining()
// If there are any still remaining, mark as ignored // If there are any still remaining, mark as ignored
return it.index, events, coalescedLogs, err return it.index, events, coalescedLogs, stateDiffs, err
// Some other error occurred, abort // Some other error occurred, abort
case err != nil: case err != nil:
stats.ignored += len(it.chain) stats.ignored += len(it.chain)
bc.reportBlock(block, nil, err) bc.reportBlock(block, nil, err)
return it.index, events, coalescedLogs, err return it.index, events, coalescedLogs, stateDiffs, err
} }
// No validation errors for the first block (or chain prefix skipped) // No validation errors for the first block (or chain prefix skipped)
for ; block != nil && err == nil || err == ErrKnownBlock; block, err = it.next() { for ; block != nil && err == nil || err == ErrKnownBlock; block, err = it.next() {
@ -1563,7 +1565,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
// If the header is a banned one, straight out abort // If the header is a banned one, straight out abort
if BadHashes[block.Hash()] { if BadHashes[block.Hash()] {
bc.reportBlock(block, nil, ErrBlacklistedHash) bc.reportBlock(block, nil, ErrBlacklistedHash)
return it.index, events, coalescedLogs, ErrBlacklistedHash return it.index, events, coalescedLogs, stateDiffs, ErrBlacklistedHash
} }
// If the block is known (in the middle of the chain), it's a special case for // If the block is known (in the middle of the chain), it's a special case for
// Clique blocks where they can share state among each other, so importing an // Clique blocks where they can share state among each other, so importing an
@ -1580,7 +1582,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
"root", block.Root()) "root", block.Root())
if err := bc.writeKnownBlock(block); err != nil { if err := bc.writeKnownBlock(block); err != nil {
return it.index, nil, nil, err return it.index, nil, nil, stateDiffs, err
} }
stats.processed++ stats.processed++
@ -1600,7 +1602,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
} }
statedb, err := state.New(parent.Root, bc.stateCache) statedb, err := state.New(parent.Root, bc.stateCache)
if err != nil { if err != nil {
return it.index, events, coalescedLogs, err return it.index, events, coalescedLogs, stateDiffs, err
} }
// If we have a followup block, run that against the current state to pre-cache // If we have a followup block, run that against the current state to pre-cache
// transactions and probabilistically some of the account/storage trie nodes. // transactions and probabilistically some of the account/storage trie nodes.
@ -1621,11 +1623,11 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
} }
// Process block using the parent state as reference point // Process block using the parent state as reference point
substart := time.Now() substart := time.Now()
receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig) receipts, logs, processedStateDiffs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig)
if err != nil { if err != nil {
bc.reportBlock(block, receipts, err) bc.reportBlock(block, receipts, err)
atomic.StoreUint32(&followupInterrupt, 1) atomic.StoreUint32(&followupInterrupt, 1)
return it.index, events, coalescedLogs, err return it.index, events, coalescedLogs, stateDiffs, err
} }
// Update the metrics touched during block processing // Update the metrics touched during block processing
accountReadTimer.Update(statedb.AccountReads) // Account reads are complete, we can mark them accountReadTimer.Update(statedb.AccountReads) // Account reads are complete, we can mark them
@ -1644,7 +1646,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil { if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil {
bc.reportBlock(block, receipts, err) bc.reportBlock(block, receipts, err)
atomic.StoreUint32(&followupInterrupt, 1) atomic.StoreUint32(&followupInterrupt, 1)
return it.index, events, coalescedLogs, err return it.index, events, coalescedLogs, stateDiffs, err
} }
proctime := time.Since(start) proctime := time.Since(start)
@ -1659,7 +1661,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
status, err := bc.writeBlockWithState(block, receipts, statedb) status, err := bc.writeBlockWithState(block, receipts, statedb)
if err != nil { if err != nil {
atomic.StoreUint32(&followupInterrupt, 1) atomic.StoreUint32(&followupInterrupt, 1)
return it.index, events, coalescedLogs, err return it.index, events, coalescedLogs, stateDiffs, err
} }
atomic.StoreUint32(&followupInterrupt, 1) atomic.StoreUint32(&followupInterrupt, 1)
@ -1678,6 +1680,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
"root", block.Root()) "root", block.Root())
coalescedLogs = append(coalescedLogs, logs...) coalescedLogs = append(coalescedLogs, logs...)
stateDiffs = processedStateDiffs
events = append(events, ChainEvent{block, block.Hash(), logs}) events = append(events, ChainEvent{block, block.Hash(), logs})
lastCanon = block lastCanon = block
@ -1708,13 +1711,13 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
// Any blocks remaining here? The only ones we care about are the future ones // Any blocks remaining here? The only ones we care about are the future ones
if block != nil && err == consensus.ErrFutureBlock { if block != nil && err == consensus.ErrFutureBlock {
if err := bc.addFutureBlock(block); err != nil { if err := bc.addFutureBlock(block); err != nil {
return it.index, events, coalescedLogs, err return it.index, events, coalescedLogs, stateDiffs, err
} }
block, err = it.next() block, err = it.next()
for ; block != nil && err == consensus.ErrUnknownAncestor; block, err = it.next() { for ; block != nil && err == consensus.ErrUnknownAncestor; block, err = it.next() {
if err := bc.addFutureBlock(block); err != nil { if err := bc.addFutureBlock(block); err != nil {
return it.index, events, coalescedLogs, err return it.index, events, coalescedLogs, stateDiffs, err
} }
stats.queued++ stats.queued++
} }
@ -1725,7 +1728,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() { if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
events = append(events, ChainHeadEvent{lastCanon}) events = append(events, ChainHeadEvent{lastCanon})
} }
return it.index, events, coalescedLogs, err return it.index, events, coalescedLogs, stateDiffs, err
} }
// insertSideChain is called when an import batch hits upon a pruned ancestor // insertSideChain is called when an import batch hits upon a pruned ancestor
@ -1734,7 +1737,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
// //
// The method writes all (header-and-body-valid) blocks to disk, then tries to // The method writes all (header-and-body-valid) blocks to disk, then tries to
// switch over to the new chain if the TD exceeded the current chain. // switch over to the new chain if the TD exceeded the current chain.
func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (int, []interface{}, []*types.Log, error) { func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (int, []interface{}, []*types.Log, map[common.Address]state.Account, error) {
var ( var (
externTd *big.Int externTd *big.Int
current = bc.CurrentBlock() current = bc.CurrentBlock()
@ -1770,7 +1773,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
// If someone legitimately side-mines blocks, they would still be imported as usual. However, // If someone legitimately side-mines blocks, they would still be imported as usual. However,
// we cannot risk writing unverified blocks to disk when they obviously target the pruning // we cannot risk writing unverified blocks to disk when they obviously target the pruning
// mechanism. // mechanism.
return it.index, nil, nil, errors.New("sidechain ghost-state attack") return it.index, nil, nil, nil, errors.New("sidechain ghost-state attack")
} }
} }
if externTd == nil { if externTd == nil {
@ -1781,7 +1784,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
if !bc.HasBlock(block.Hash(), block.NumberU64()) { if !bc.HasBlock(block.Hash(), block.NumberU64()) {
start := time.Now() start := time.Now()
if err := bc.writeBlockWithoutState(block, externTd); err != nil { if err := bc.writeBlockWithoutState(block, externTd); err != nil {
return it.index, nil, nil, err return it.index, nil, nil, nil, err
} }
log.Debug("Injected sidechain block", "number", block.Number(), "hash", block.Hash(), log.Debug("Injected sidechain block", "number", block.Number(), "hash", block.Hash(),
"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)), "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
@ -1798,7 +1801,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
localTd := bc.GetTd(current.Hash(), current.NumberU64()) localTd := bc.GetTd(current.Hash(), current.NumberU64())
if localTd.Cmp(externTd) > 0 { if localTd.Cmp(externTd) > 0 {
log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().Number, "sidetd", externTd, "localtd", localTd) log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().Number, "sidetd", externTd, "localtd", localTd)
return it.index, nil, nil, err return it.index, nil, nil, nil, err
} }
// Gather all the sidechain hashes (full blocks may be memory heavy) // Gather all the sidechain hashes (full blocks may be memory heavy)
var ( var (
@ -1813,7 +1816,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1) parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1)
} }
if parent == nil { if parent == nil {
return it.index, nil, nil, errors.New("missing parent") return it.index, nil, nil, nil, errors.New("missing parent")
} }
// Import all the pruned blocks to make the state available // Import all the pruned blocks to make the state available
var ( var (
@ -1832,15 +1835,15 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
// memory here. // memory here.
if len(blocks) >= 2048 || memory > 64*1024*1024 { if len(blocks) >= 2048 || memory > 64*1024*1024 {
log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64()) log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64())
if _, _, _, err := bc.insertChain(blocks, false); err != nil { if _, _, _, _, err := bc.insertChain(blocks, false); err != nil {
return 0, nil, nil, err return 0, nil, nil, nil, err
} }
blocks, memory = blocks[:0], 0 blocks, memory = blocks[:0], 0
// If the chain is terminating, stop processing blocks // If the chain is terminating, stop processing blocks
if atomic.LoadInt32(&bc.procInterrupt) == 1 { if atomic.LoadInt32(&bc.procInterrupt) == 1 {
log.Debug("Premature abort during blocks processing") log.Debug("Premature abort during blocks processing")
return 0, nil, nil, nil return 0, nil, nil, nil, nil
} }
} }
} }
@ -1848,7 +1851,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64()) log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64())
return bc.insertChain(blocks, false) return bc.insertChain(blocks, false)
} }
return 0, nil, nil, nil return 0, nil, nil, nil, nil
} }
// reorg takes two blocks, an old chain and a new chain and will reconstruct the // reorg takes two blocks, an old chain and a new chain and will reconstruct the
@ -2003,11 +2006,15 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
// PostChainEvents iterates over the events generated by a chain insertion and // PostChainEvents iterates over the events generated by a chain insertion and
// posts them into the event feed. // posts them into the event feed.
// TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock. // TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock.
func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) { func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log, stateDiffs map[common.Address]state.Account) {
// post event logs for further processing // post event logs for further processing
if logs != nil { if logs != nil {
bc.logsFeed.Send(logs) bc.logsFeed.Send(logs)
} }
// post state diffs for further processing
if stateDiffs != nil {
bc.stateDiffsFeed.Send(stateDiffs)
}
for _, event := range events { for _, event := range events {
switch ev := event.(type) { switch ev := event.(type) {
case ChainEvent: case ChainEvent:
@ -2215,3 +2222,7 @@ func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscript
func (bc *BlockChain) SubscribeBlockProcessingEvent(ch chan<- bool) event.Subscription { func (bc *BlockChain) SubscribeBlockProcessingEvent(ch chan<- bool) event.Subscription {
return bc.scope.Track(bc.blockProcFeed.Subscribe(ch)) return bc.scope.Track(bc.blockProcFeed.Subscribe(ch))
} }
func (bc *BlockChain) SubscribeStateDiffEvent(ch chan<- map[common.Address]state.Account) event.Subscription {
return bc.scope.Track(bc.stateDiffsFeed.Subscribe(ch))
}

View File

@ -147,7 +147,7 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
if err != nil { if err != nil {
return err return err
} }
receipts, _, usedGas, err := blockchain.processor.Process(block, statedb, vm.Config{}) receipts, _, _, usedGas, err := blockchain.processor.Process(block, statedb, vm.Config{})
if err != nil { if err != nil {
blockchain.reportBlock(block, receipts, err) blockchain.reportBlock(block, receipts, err)
return err return err

View File

@ -685,6 +685,23 @@ func (s *StateDB) clearJournalAndRefund() {
s.refund = 0 s.refund = 0
} }
func (s *StateDB) GetDirtyAccounts() map[common.Address]Account {
for addr := range s.journal.dirties {
s.stateObjectsDirty[addr] = struct{}{}
}
results := make(map[common.Address]Account)
for addr, stateObject := range s.stateObjects {
_, isDirty := s.stateObjectsDirty[addr]
if isDirty {
results[addr] = stateObject.data
}
}
return results
}
// Commit writes the state to the underlying in-memory trie database. // Commit writes the state to the underlying in-memory trie database.
func (s *StateDB) Commit(deleteEmptyObjects bool) (root common.Hash, err error) { func (s *StateDB) Commit(deleteEmptyObjects bool) (root common.Hash, err error) {
defer s.clearJournalAndRefund() defer s.clearJournalAndRefund()

View File

@ -53,7 +53,7 @@ func NewStateProcessor(config *params.ChainConfig, bc *BlockChain, engine consen
// Process returns the receipts and logs accumulated during the process and // Process returns the receipts and logs accumulated during the process and
// returns the amount of gas that was used in the process. If any of the // returns the amount of gas that was used in the process. If any of the
// transactions failed to execute due to insufficient gas it will return an error. // transactions failed to execute due to insufficient gas it will return an error.
func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (types.Receipts, []*types.Log, uint64, error) { func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (types.Receipts, []*types.Log, map[common.Address]state.Account, uint64, error) {
var ( var (
receipts types.Receipts receipts types.Receipts
usedGas = new(uint64) usedGas = new(uint64)
@ -70,7 +70,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
statedb.Prepare(tx.Hash(), block.Hash(), i) statedb.Prepare(tx.Hash(), block.Hash(), i)
receipt, _, err := ApplyTransaction(p.config, p.bc, nil, gp, statedb, header, tx, usedGas, cfg) receipt, _, err := ApplyTransaction(p.config, p.bc, nil, gp, statedb, header, tx, usedGas, cfg)
if err != nil { if err != nil {
return nil, nil, 0, err return nil, nil, nil, 0, err
} }
receipts = append(receipts, receipt) receipts = append(receipts, receipt)
allLogs = append(allLogs, receipt.Logs...) allLogs = append(allLogs, receipt.Logs...)
@ -78,7 +78,9 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
// Finalize the block, applying any consensus engine specific extras (e.g. block rewards) // Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
p.engine.Finalize(p.bc, header, statedb, block.Transactions(), block.Uncles()) p.engine.Finalize(p.bc, header, statedb, block.Transactions(), block.Uncles())
return receipts, allLogs, *usedGas, nil stateDiffs := statedb.GetDirtyAccounts()
return receipts, allLogs, stateDiffs, *usedGas, nil
} }
// ApplyTransaction attempts to apply a transaction to the given state database // ApplyTransaction attempts to apply a transaction to the given state database

View File

@ -17,6 +17,7 @@
package core package core
import ( import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
@ -47,5 +48,5 @@ type Processor interface {
// Process processes the state changes according to the Ethereum rules by running // Process processes the state changes according to the Ethereum rules by running
// the transaction messages using the statedb and applying any rewards to both // the transaction messages using the statedb and applying any rewards to both
// the processor (coinbase) and any included uncles. // the processor (coinbase) and any included uncles.
Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (types.Receipts, []*types.Log, uint64, error) Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (types.Receipts, []*types.Log, map[common.Address]state.Account, uint64, error)
} }

View File

@ -159,6 +159,10 @@ func (b *EthAPIBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscri
return b.eth.BlockChain().SubscribeLogsEvent(ch) return b.eth.BlockChain().SubscribeLogsEvent(ch)
} }
func (b *EthAPIBackend) SubscribeStateDiffs(ch chan<- map[common.Address]state.Account) event.Subscription {
return b.eth.BlockChain().SubscribeStateDiffEvent(ch)
}
func (b *EthAPIBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error { func (b *EthAPIBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error {
return b.eth.txPool.AddLocal(signedTx) return b.eth.txPool.AddLocal(signedTx)
} }

View File

@ -289,7 +289,7 @@ func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Bl
traced += uint64(len(txs)) traced += uint64(len(txs))
} }
// Generate the next state snapshot fast without tracing // Generate the next state snapshot fast without tracing
_, _, _, err := api.eth.blockchain.Processor().Process(block, statedb, vm.Config{}) _, _, _, _, err := api.eth.blockchain.Processor().Process(block, statedb, vm.Config{})
if err != nil { if err != nil {
failed = err failed = err
break break
@ -676,7 +676,7 @@ func (api *PrivateDebugAPI) computeStateDB(block *types.Block, reexec uint64) (*
if block = api.eth.blockchain.GetBlockByNumber(block.NumberU64() + 1); block == nil { if block = api.eth.blockchain.GetBlockByNumber(block.NumberU64() + 1); block == nil {
return nil, fmt.Errorf("block #%d not found", block.NumberU64()+1) return nil, fmt.Errorf("block #%d not found", block.NumberU64()+1)
} }
_, _, _, err := api.eth.blockchain.Processor().Process(block, statedb, vm.Config{}) _, _, _, _, err := api.eth.blockchain.Processor().Process(block, statedb, vm.Config{})
if err != nil { if err != nil {
return nil, fmt.Errorf("processing block %d failed: %v", block.NumberU64(), err) return nil, fmt.Errorf("processing block %d failed: %v", block.NumberU64(), err)
} }

View File

@ -28,6 +28,7 @@ import (
ethereum "github.com/ethereum/go-ethereum" ethereum "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
@ -233,6 +234,36 @@ func (api *PublicFilterAPI) NewHeads(ctx context.Context) (*rpc.Subscription, er
return rpcSub, nil return rpcSub, nil
} }
// NewStateDiffs sent a notification each time an account changes in a block.
func (api *PublicFilterAPI) NewStateDiffs(ctx context.Context) (*rpc.Subscription, error) {
notifier, supported := rpc.NotifierFromContext(ctx)
if !supported {
return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported
}
rpcSub := notifier.CreateSubscription()
go func() {
stateDiffs := make(chan map[common.Address]state.Account)
stateDiffsSub := api.events.SubscribeStateDiffs(stateDiffs)
for {
select {
case d := <-stateDiffs:
notifier.Notify(rpcSub.ID, d)
case <-rpcSub.Err():
stateDiffsSub.Unsubscribe()
return
case <-notifier.Closed():
stateDiffsSub.Unsubscribe()
return
}
}
}()
return rpcSub, nil
}
// Logs creates a subscription that fires for all new log that match the given filter criteria. // Logs creates a subscription that fires for all new log that match the given filter criteria.
func (api *PublicFilterAPI) Logs(ctx context.Context, crit FilterCriteria) (*rpc.Subscription, error) { func (api *PublicFilterAPI) Logs(ctx context.Context, crit FilterCriteria) (*rpc.Subscription, error) {
notifier, supported := rpc.NotifierFromContext(ctx) notifier, supported := rpc.NotifierFromContext(ctx)

View File

@ -129,7 +129,7 @@ func benchmarkBloomBits(b *testing.B, sectionSize uint64) {
if i%20 == 0 { if i%20 == 0 {
db.Close() db.Close()
db, _ = rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "") db, _ = rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "")
backend = &testBackend{mux, db, cnt, new(event.Feed), new(event.Feed), new(event.Feed), new(event.Feed)} backend = &testBackend{mux, db, cnt, new(event.Feed), new(event.Feed), new(event.Feed), new(event.Feed), new(event.Feed)}
} }
var addr common.Address var addr common.Address
addr[0] = byte(i) addr[0] = byte(i)
@ -174,7 +174,7 @@ func BenchmarkNoBloomBits(b *testing.B) {
b.Log("Running filter benchmarks...") b.Log("Running filter benchmarks...")
start := time.Now() start := time.Now()
mux := new(event.TypeMux) mux := new(event.TypeMux)
backend := &testBackend{mux, db, 0, new(event.Feed), new(event.Feed), new(event.Feed), new(event.Feed)} backend := &testBackend{mux, db, 0, new(event.Feed), new(event.Feed), new(event.Feed), new(event.Feed), new(event.Feed)}
filter := NewRangeFilter(backend, 0, int64(*headNum), []common.Address{{}}, nil) filter := NewRangeFilter(backend, 0, int64(*headNum), []common.Address{{}}, nil)
filter.Logs(context.Background()) filter.Logs(context.Background())
d := time.Since(start) d := time.Since(start)

View File

@ -24,6 +24,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/bloombits" "github.com/ethereum/go-ethereum/core/bloombits"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
@ -42,6 +43,7 @@ type Backend interface {
SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription
SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription
SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription
SubscribeStateDiffs(ch chan<- map[common.Address]state.Account) event.Subscription
BloomStatus() (uint64, uint64) BloomStatus() (uint64, uint64)
ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession)

View File

@ -29,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
@ -55,6 +56,8 @@ const (
BlocksSubscription BlocksSubscription
// LastSubscription keeps track of the last index // LastSubscription keeps track of the last index
LastIndexSubscription LastIndexSubscription
// StateDiffsSubscription queries for new account changes
StateDiffsSubscription
) )
const ( const (
@ -68,6 +71,8 @@ const (
logsChanSize = 10 logsChanSize = 10
// chainEvChanSize is the size of channel listening to ChainEvent. // chainEvChanSize is the size of channel listening to ChainEvent.
chainEvChanSize = 10 chainEvChanSize = 10
// stateDiffChanSize is the size of channel listening to StateDiffEvent.
stateDiffChanSize = 10
) )
var ( var (
@ -75,15 +80,16 @@ var (
) )
type subscription struct { type subscription struct {
id rpc.ID id rpc.ID
typ Type typ Type
created time.Time created time.Time
logsCrit ethereum.FilterQuery logsCrit ethereum.FilterQuery
logs chan []*types.Log logs chan []*types.Log
hashes chan []common.Hash hashes chan []common.Hash
headers chan *types.Header headers chan *types.Header
installed chan struct{} // closed when the filter is installed stateDiffs chan map[common.Address]state.Account
err chan error // closed when the filter is uninstalled installed chan struct{} // closed when the filter is installed
err chan error // closed when the filter is uninstalled
} }
// EventSystem creates subscriptions, processes events and broadcasts them to the // EventSystem creates subscriptions, processes events and broadcasts them to the
@ -100,14 +106,16 @@ type EventSystem struct {
rmLogsSub event.Subscription // Subscription for removed log event rmLogsSub event.Subscription // Subscription for removed log event
chainSub event.Subscription // Subscription for new chain event chainSub event.Subscription // Subscription for new chain event
pendingLogSub *event.TypeMuxSubscription // Subscription for pending log event pendingLogSub *event.TypeMuxSubscription // Subscription for pending log event
stateDiffsSub event.Subscription // Subscription for new state diff events
// Channels // Channels
install chan *subscription // install filter for event notification install chan *subscription // install filter for event notification
uninstall chan *subscription // remove filter for event notification uninstall chan *subscription // remove filter for event notification
txsCh chan core.NewTxsEvent // Channel to receive new transactions event txsCh chan core.NewTxsEvent // Channel to receive new transactions event
logsCh chan []*types.Log // Channel to receive new log event logsCh chan []*types.Log // Channel to receive new log event
rmLogsCh chan core.RemovedLogsEvent // Channel to receive removed log event rmLogsCh chan core.RemovedLogsEvent // Channel to receive removed log event
chainCh chan core.ChainEvent // Channel to receive new chain event chainCh chan core.ChainEvent // Channel to receive new chain event
stateDiffCh chan map[common.Address]state.Account // Channel to receive new state diff events
} }
// NewEventSystem creates a new manager that listens for event on the given mux, // NewEventSystem creates a new manager that listens for event on the given mux,
@ -118,15 +126,16 @@ type EventSystem struct {
// or by stopping the given mux. // or by stopping the given mux.
func NewEventSystem(mux *event.TypeMux, backend Backend, lightMode bool) *EventSystem { func NewEventSystem(mux *event.TypeMux, backend Backend, lightMode bool) *EventSystem {
m := &EventSystem{ m := &EventSystem{
mux: mux, mux: mux,
backend: backend, backend: backend,
lightMode: lightMode, lightMode: lightMode,
install: make(chan *subscription), install: make(chan *subscription),
uninstall: make(chan *subscription), uninstall: make(chan *subscription),
txsCh: make(chan core.NewTxsEvent, txChanSize), txsCh: make(chan core.NewTxsEvent, txChanSize),
logsCh: make(chan []*types.Log, logsChanSize), logsCh: make(chan []*types.Log, logsChanSize),
rmLogsCh: make(chan core.RemovedLogsEvent, rmLogsChanSize), rmLogsCh: make(chan core.RemovedLogsEvent, rmLogsChanSize),
chainCh: make(chan core.ChainEvent, chainEvChanSize), chainCh: make(chan core.ChainEvent, chainEvChanSize),
stateDiffCh: make(chan map[common.Address]state.Account, stateDiffChanSize),
} }
// Subscribe events // Subscribe events
@ -134,6 +143,7 @@ func NewEventSystem(mux *event.TypeMux, backend Backend, lightMode bool) *EventS
m.logsSub = m.backend.SubscribeLogsEvent(m.logsCh) m.logsSub = m.backend.SubscribeLogsEvent(m.logsCh)
m.rmLogsSub = m.backend.SubscribeRemovedLogsEvent(m.rmLogsCh) m.rmLogsSub = m.backend.SubscribeRemovedLogsEvent(m.rmLogsCh)
m.chainSub = m.backend.SubscribeChainEvent(m.chainCh) m.chainSub = m.backend.SubscribeChainEvent(m.chainCh)
m.stateDiffsSub = m.backend.SubscribeStateDiffs(m.stateDiffCh)
// TODO(rjl493456442): use feed to subscribe pending log event // TODO(rjl493456442): use feed to subscribe pending log event
m.pendingLogSub = m.mux.Subscribe(core.PendingLogsEvent{}) m.pendingLogSub = m.mux.Subscribe(core.PendingLogsEvent{})
@ -175,6 +185,7 @@ func (sub *Subscription) Unsubscribe() {
case <-sub.f.logs: case <-sub.f.logs:
case <-sub.f.hashes: case <-sub.f.hashes:
case <-sub.f.headers: case <-sub.f.headers:
case <-sub.f.stateDiffs:
} }
} }
@ -252,15 +263,16 @@ func (es *EventSystem) subscribeMinedPendingLogs(crit ethereum.FilterQuery, logs
// given criteria to the given logs channel. // given criteria to the given logs channel.
func (es *EventSystem) subscribeLogs(crit ethereum.FilterQuery, logs chan []*types.Log) *Subscription { func (es *EventSystem) subscribeLogs(crit ethereum.FilterQuery, logs chan []*types.Log) *Subscription {
sub := &subscription{ sub := &subscription{
id: rpc.NewID(), id: rpc.NewID(),
typ: LogsSubscription, typ: LogsSubscription,
logsCrit: crit, logsCrit: crit,
created: time.Now(), created: time.Now(),
logs: logs, logs: logs,
hashes: make(chan []common.Hash), hashes: make(chan []common.Hash),
headers: make(chan *types.Header), headers: make(chan *types.Header),
installed: make(chan struct{}), stateDiffs: make(chan map[common.Address]state.Account),
err: make(chan error), installed: make(chan struct{}),
err: make(chan error),
} }
return es.subscribe(sub) return es.subscribe(sub)
} }
@ -269,15 +281,32 @@ func (es *EventSystem) subscribeLogs(crit ethereum.FilterQuery, logs chan []*typ
// transactions that enter the transaction pool. // transactions that enter the transaction pool.
func (es *EventSystem) subscribePendingLogs(crit ethereum.FilterQuery, logs chan []*types.Log) *Subscription { func (es *EventSystem) subscribePendingLogs(crit ethereum.FilterQuery, logs chan []*types.Log) *Subscription {
sub := &subscription{ sub := &subscription{
id: rpc.NewID(), id: rpc.NewID(),
typ: PendingLogsSubscription, typ: PendingLogsSubscription,
logsCrit: crit, logsCrit: crit,
created: time.Now(), created: time.Now(),
logs: logs, logs: logs,
hashes: make(chan []common.Hash), hashes: make(chan []common.Hash),
headers: make(chan *types.Header), headers: make(chan *types.Header),
installed: make(chan struct{}), stateDiffs: make(chan map[common.Address]state.Account),
err: make(chan error), installed: make(chan struct{}),
err: make(chan error),
}
return es.subscribe(sub)
}
// SubscribeStateDiffs
func (es *EventSystem) SubscribeStateDiffs(diffs chan map[common.Address]state.Account) *Subscription {
sub := &subscription{
id: rpc.NewID(),
typ: StateDiffsSubscription,
created: time.Now(),
logs: make(chan []*types.Log),
hashes: make(chan []common.Hash),
headers: make(chan *types.Header),
stateDiffs: diffs,
installed: make(chan struct{}),
err: make(chan error),
} }
return es.subscribe(sub) return es.subscribe(sub)
} }
@ -286,14 +315,15 @@ func (es *EventSystem) subscribePendingLogs(crit ethereum.FilterQuery, logs chan
// imported in the chain. // imported in the chain.
func (es *EventSystem) SubscribeNewHeads(headers chan *types.Header) *Subscription { func (es *EventSystem) SubscribeNewHeads(headers chan *types.Header) *Subscription {
sub := &subscription{ sub := &subscription{
id: rpc.NewID(), id: rpc.NewID(),
typ: BlocksSubscription, typ: BlocksSubscription,
created: time.Now(), created: time.Now(),
logs: make(chan []*types.Log), logs: make(chan []*types.Log),
hashes: make(chan []common.Hash), hashes: make(chan []common.Hash),
headers: headers, headers: headers,
installed: make(chan struct{}), stateDiffs: make(chan map[common.Address]state.Account),
err: make(chan error), installed: make(chan struct{}),
err: make(chan error),
} }
return es.subscribe(sub) return es.subscribe(sub)
} }
@ -302,14 +332,15 @@ func (es *EventSystem) SubscribeNewHeads(headers chan *types.Header) *Subscripti
// transactions that enter the transaction pool. // transactions that enter the transaction pool.
func (es *EventSystem) SubscribePendingTxs(hashes chan []common.Hash) *Subscription { func (es *EventSystem) SubscribePendingTxs(hashes chan []common.Hash) *Subscription {
sub := &subscription{ sub := &subscription{
id: rpc.NewID(), id: rpc.NewID(),
typ: PendingTransactionsSubscription, typ: PendingTransactionsSubscription,
created: time.Now(), created: time.Now(),
logs: make(chan []*types.Log), logs: make(chan []*types.Log),
hashes: hashes, hashes: hashes,
headers: make(chan *types.Header), headers: make(chan *types.Header),
installed: make(chan struct{}), stateDiffs: make(chan map[common.Address]state.Account),
err: make(chan error), installed: make(chan struct{}),
err: make(chan error),
} }
return es.subscribe(sub) return es.subscribe(sub)
} }
@ -368,6 +399,10 @@ func (es *EventSystem) broadcast(filters filterIndex, ev interface{}) {
} }
}) })
} }
case map[common.Address]state.Account:
for _, f := range filters[StateDiffsSubscription] {
f.stateDiffs <- e
}
} }
} }
@ -453,6 +488,7 @@ func (es *EventSystem) eventLoop() {
es.logsSub.Unsubscribe() es.logsSub.Unsubscribe()
es.rmLogsSub.Unsubscribe() es.rmLogsSub.Unsubscribe()
es.chainSub.Unsubscribe() es.chainSub.Unsubscribe()
es.stateDiffsSub.Unsubscribe()
}() }()
index := make(filterIndex) index := make(filterIndex)
@ -471,6 +507,8 @@ func (es *EventSystem) eventLoop() {
es.broadcast(index, ev) es.broadcast(index, ev)
case ev := <-es.chainCh: case ev := <-es.chainCh:
es.broadcast(index, ev) es.broadcast(index, ev)
case ev := <-es.stateDiffCh:
es.broadcast(index, ev)
case ev, active := <-es.pendingLogSub.Chan(): case ev, active := <-es.pendingLogSub.Chan():
if !active { // system stopped if !active { // system stopped
return return
@ -506,6 +544,8 @@ func (es *EventSystem) eventLoop() {
return return
case <-es.chainSub.Err(): case <-es.chainSub.Err():
return return
case <-es.stateDiffsSub.Err():
return
} }
} }
} }

View File

@ -31,6 +31,7 @@ import (
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/bloombits" "github.com/ethereum/go-ethereum/core/bloombits"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
@ -39,13 +40,14 @@ import (
) )
type testBackend struct { type testBackend struct {
mux *event.TypeMux mux *event.TypeMux
db ethdb.Database db ethdb.Database
sections uint64 sections uint64
txFeed *event.Feed txFeed *event.Feed
rmLogsFeed *event.Feed rmLogsFeed *event.Feed
logsFeed *event.Feed logsFeed *event.Feed
chainFeed *event.Feed chainFeed *event.Feed
stateDiffsFeed *event.Feed
} }
func (b *testBackend) ChainDb() ethdb.Database { func (b *testBackend) ChainDb() ethdb.Database {
@ -120,6 +122,10 @@ func (b *testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subsc
return b.chainFeed.Subscribe(ch) return b.chainFeed.Subscribe(ch)
} }
func (b *testBackend) SubscribeStateDiffs(ch chan<- map[common.Address]state.Account) event.Subscription {
return b.stateDiffsFeed.Subscribe(ch)
}
func (b *testBackend) BloomStatus() (uint64, uint64) { func (b *testBackend) BloomStatus() (uint64, uint64) {
return params.BloomBitsBlocks, b.sections return params.BloomBitsBlocks, b.sections
} }
@ -160,17 +166,18 @@ func TestBlockSubscription(t *testing.T) {
t.Parallel() t.Parallel()
var ( var (
mux = new(event.TypeMux) mux = new(event.TypeMux)
db = rawdb.NewMemoryDatabase() db = rawdb.NewMemoryDatabase()
txFeed = new(event.Feed) txFeed = new(event.Feed)
rmLogsFeed = new(event.Feed) rmLogsFeed = new(event.Feed)
logsFeed = new(event.Feed) logsFeed = new(event.Feed)
chainFeed = new(event.Feed) chainFeed = new(event.Feed)
backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} stateDiffsFeed = new(event.Feed)
api = NewPublicFilterAPI(backend, false) backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed, stateDiffsFeed}
genesis = new(core.Genesis).MustCommit(db) api = NewPublicFilterAPI(backend, false)
chain, _ = core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 10, func(i int, gen *core.BlockGen) {}) genesis = new(core.Genesis).MustCommit(db)
chainEvents = []core.ChainEvent{} chain, _ = core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 10, func(i int, gen *core.BlockGen) {})
chainEvents = []core.ChainEvent{}
) )
for _, blk := range chain { for _, blk := range chain {
@ -217,14 +224,15 @@ func TestPendingTxFilter(t *testing.T) {
t.Parallel() t.Parallel()
var ( var (
mux = new(event.TypeMux) mux = new(event.TypeMux)
db = rawdb.NewMemoryDatabase() db = rawdb.NewMemoryDatabase()
txFeed = new(event.Feed) txFeed = new(event.Feed)
rmLogsFeed = new(event.Feed) rmLogsFeed = new(event.Feed)
logsFeed = new(event.Feed) logsFeed = new(event.Feed)
chainFeed = new(event.Feed) chainFeed = new(event.Feed)
backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} stateDiffsFeed = new(event.Feed)
api = NewPublicFilterAPI(backend, false) backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed, stateDiffsFeed}
api = NewPublicFilterAPI(backend, false)
transactions = []*types.Transaction{ transactions = []*types.Transaction{
types.NewTransaction(0, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), types.NewTransaction(0, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
@ -277,14 +285,15 @@ func TestPendingTxFilter(t *testing.T) {
// If not it must return an error. // If not it must return an error.
func TestLogFilterCreation(t *testing.T) { func TestLogFilterCreation(t *testing.T) {
var ( var (
mux = new(event.TypeMux) mux = new(event.TypeMux)
db = rawdb.NewMemoryDatabase() db = rawdb.NewMemoryDatabase()
txFeed = new(event.Feed) txFeed = new(event.Feed)
rmLogsFeed = new(event.Feed) rmLogsFeed = new(event.Feed)
logsFeed = new(event.Feed) logsFeed = new(event.Feed)
chainFeed = new(event.Feed) chainFeed = new(event.Feed)
backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} stateDiffsFeed = new(event.Feed)
api = NewPublicFilterAPI(backend, false) backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed, stateDiffsFeed}
api = NewPublicFilterAPI(backend, false)
testCases = []struct { testCases = []struct {
crit FilterCriteria crit FilterCriteria
@ -326,14 +335,15 @@ func TestInvalidLogFilterCreation(t *testing.T) {
t.Parallel() t.Parallel()
var ( var (
mux = new(event.TypeMux) mux = new(event.TypeMux)
db = rawdb.NewMemoryDatabase() db = rawdb.NewMemoryDatabase()
txFeed = new(event.Feed) txFeed = new(event.Feed)
rmLogsFeed = new(event.Feed) rmLogsFeed = new(event.Feed)
logsFeed = new(event.Feed) logsFeed = new(event.Feed)
chainFeed = new(event.Feed) chainFeed = new(event.Feed)
backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} stateDiffsFeed = new(event.Feed)
api = NewPublicFilterAPI(backend, false) backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed, stateDiffsFeed}
api = NewPublicFilterAPI(backend, false)
) )
// different situations where log filter creation should fail. // different situations where log filter creation should fail.
@ -353,15 +363,16 @@ func TestInvalidLogFilterCreation(t *testing.T) {
func TestInvalidGetLogsRequest(t *testing.T) { func TestInvalidGetLogsRequest(t *testing.T) {
var ( var (
mux = new(event.TypeMux) mux = new(event.TypeMux)
db = rawdb.NewMemoryDatabase() db = rawdb.NewMemoryDatabase()
txFeed = new(event.Feed) txFeed = new(event.Feed)
rmLogsFeed = new(event.Feed) rmLogsFeed = new(event.Feed)
logsFeed = new(event.Feed) logsFeed = new(event.Feed)
chainFeed = new(event.Feed) chainFeed = new(event.Feed)
backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} stateDiffsFeed = new(event.Feed)
api = NewPublicFilterAPI(backend, false) backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed, stateDiffsFeed}
blockHash = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") api = NewPublicFilterAPI(backend, false)
blockHash = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
) )
// Reason: Cannot specify both BlockHash and FromBlock/ToBlock) // Reason: Cannot specify both BlockHash and FromBlock/ToBlock)
@ -383,14 +394,15 @@ func TestLogFilter(t *testing.T) {
t.Parallel() t.Parallel()
var ( var (
mux = new(event.TypeMux) mux = new(event.TypeMux)
db = rawdb.NewMemoryDatabase() db = rawdb.NewMemoryDatabase()
txFeed = new(event.Feed) txFeed = new(event.Feed)
rmLogsFeed = new(event.Feed) rmLogsFeed = new(event.Feed)
logsFeed = new(event.Feed) logsFeed = new(event.Feed)
chainFeed = new(event.Feed) chainFeed = new(event.Feed)
backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} stateDiffsFeed = new(event.Feed)
api = NewPublicFilterAPI(backend, false) backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed, stateDiffsFeed}
api = NewPublicFilterAPI(backend, false)
firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111") firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111")
secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222") secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222")
@ -502,14 +514,15 @@ func TestPendingLogsSubscription(t *testing.T) {
t.Parallel() t.Parallel()
var ( var (
mux = new(event.TypeMux) mux = new(event.TypeMux)
db = rawdb.NewMemoryDatabase() db = rawdb.NewMemoryDatabase()
txFeed = new(event.Feed) txFeed = new(event.Feed)
rmLogsFeed = new(event.Feed) rmLogsFeed = new(event.Feed)
logsFeed = new(event.Feed) logsFeed = new(event.Feed)
chainFeed = new(event.Feed) chainFeed = new(event.Feed)
backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} stateDiffsFeed = new(event.Feed)
api = NewPublicFilterAPI(backend, false) backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed, stateDiffsFeed}
api = NewPublicFilterAPI(backend, false)
firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111") firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111")
secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222") secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222")

View File

@ -50,18 +50,19 @@ func BenchmarkFilters(b *testing.B) {
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
var ( var (
db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "") db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "")
mux = new(event.TypeMux) mux = new(event.TypeMux)
txFeed = new(event.Feed) txFeed = new(event.Feed)
rmLogsFeed = new(event.Feed) rmLogsFeed = new(event.Feed)
logsFeed = new(event.Feed) logsFeed = new(event.Feed)
chainFeed = new(event.Feed) chainFeed = new(event.Feed)
backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} stateDiffsFeed = new(event.Feed)
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed, stateDiffsFeed}
addr1 = crypto.PubkeyToAddress(key1.PublicKey) key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
addr2 = common.BytesToAddress([]byte("jeff")) addr1 = crypto.PubkeyToAddress(key1.PublicKey)
addr3 = common.BytesToAddress([]byte("ethereum")) addr2 = common.BytesToAddress([]byte("jeff"))
addr4 = common.BytesToAddress([]byte("random addresses please")) addr3 = common.BytesToAddress([]byte("ethereum"))
addr4 = common.BytesToAddress([]byte("random addresses please"))
) )
defer db.Close() defer db.Close()
@ -109,15 +110,16 @@ func TestFilters(t *testing.T) {
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
var ( var (
db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "") db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "")
mux = new(event.TypeMux) mux = new(event.TypeMux)
txFeed = new(event.Feed) txFeed = new(event.Feed)
rmLogsFeed = new(event.Feed) rmLogsFeed = new(event.Feed)
logsFeed = new(event.Feed) logsFeed = new(event.Feed)
chainFeed = new(event.Feed) chainFeed = new(event.Feed)
backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} stateDiffsFeed = new(event.Feed)
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed, stateDiffsFeed}
addr = crypto.PubkeyToAddress(key1.PublicKey) key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
addr = crypto.PubkeyToAddress(key1.PublicKey)
hash1 = common.BytesToHash([]byte("topic1")) hash1 = common.BytesToHash([]byte("topic1"))
hash2 = common.BytesToHash([]byte("topic2")) hash2 = common.BytesToHash([]byte("topic2"))

View File

@ -78,6 +78,7 @@ type Backend interface {
ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession)
SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription
SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription
SubscribeStateDiffs(ch chan<- map[common.Address]state.Account) event.Subscription
ChainConfig() *params.ChainConfig ChainConfig() *params.ChainConfig
CurrentBlock() *types.Block CurrentBlock() *types.Block

View File

@ -172,6 +172,10 @@ func (b *LesApiBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEven
return b.eth.blockchain.SubscribeRemovedLogsEvent(ch) return b.eth.blockchain.SubscribeRemovedLogsEvent(ch)
} }
func (b *LesApiBackend) SubscribeStateDiffs(ch chan<- map[common.Address]state.Account) event.Subscription {
return b.eth.blockchain.SubscribeStateDiffs(ch)
}
func (b *LesApiBackend) Downloader() *downloader.Downloader { func (b *LesApiBackend) Downloader() *downloader.Downloader {
return b.eth.Downloader() return b.eth.Downloader()
} }

View File

@ -531,6 +531,12 @@ func (lc *LightChain) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent)
return lc.scope.Track(new(event.Feed).Subscribe(ch)) return lc.scope.Track(new(event.Feed).Subscribe(ch))
} }
// SubscribeStateDiffs implements the interface of filters.Backend
// LightChain does not sen state diffs, so return an empty subscription.
func (lc *LightChain) SubscribeStateDiffs(ch chan<- map[common.Address]state.Account) event.Subscription {
return lc.scope.Track(new(event.Feed).Subscribe(ch))
}
// DisableCheckFreq disables header validation. This is used for ultralight mode. // DisableCheckFreq disables header validation. This is used for ultralight mode.
func (lc *LightChain) DisableCheckFreq() { func (lc *LightChain) DisableCheckFreq() {
atomic.StoreInt32(&lc.disableCheckFreq, 1) atomic.StoreInt32(&lc.disableCheckFreq, 1)

View File

@ -608,7 +608,7 @@ func (w *worker) resultLoop() {
case core.SideStatTy: case core.SideStatTy:
events = append(events, core.ChainSideEvent{Block: block}) events = append(events, core.ChainSideEvent{Block: block})
} }
w.chain.PostChainEvents(events, logs) w.chain.PostChainEvents(events, logs, task.state.GetDirtyAccounts())
// Insert the block into the set of pending ones to resultLoop for confirmations // Insert the block into the set of pending ones to resultLoop for confirmations
w.unconfirmed.Insert(block.NumberU64(), block.Hash()) w.unconfirmed.Insert(block.NumberU64(), block.Hash())

View File

@ -134,7 +134,7 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine
func (b *testWorkerBackend) BlockChain() *core.BlockChain { return b.chain } func (b *testWorkerBackend) BlockChain() *core.BlockChain { return b.chain }
func (b *testWorkerBackend) TxPool() *core.TxPool { return b.txPool } func (b *testWorkerBackend) TxPool() *core.TxPool { return b.txPool }
func (b *testWorkerBackend) PostChainEvents(events []interface{}) { func (b *testWorkerBackend) PostChainEvents(events []interface{}) {
b.chain.PostChainEvents(events, nil) b.chain.PostChainEvents(events, nil, nil)
} }
func newTestWorker(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, blocks int) (*worker, *testWorkerBackend) { func newTestWorker(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, blocks int) (*worker, *testWorkerBackend) {