Merge pull request #16720 from rjl493456442/PreTxsEvent

all: collate new transaction events together
This commit is contained in:
Péter Szilágyi 2018-05-19 19:39:28 +03:00 committed by GitHub
commit 953b5ac015
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 172 additions and 121 deletions

View File

@ -454,7 +454,7 @@ func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*ty
return logs, nil return logs, nil
} }
func (fb *filterBackend) SubscribeTxPreEvent(ch chan<- core.TxPreEvent) event.Subscription { func (fb *filterBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
return event.NewSubscription(func(quit <-chan struct{}) error { return event.NewSubscription(func(quit <-chan struct{}) error {
<-quit <-quit
return nil return nil

View File

@ -21,8 +21,8 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
) )
// TxPreEvent is posted when a transaction enters the transaction pool. // NewTxsEvent is posted when a batch of transactions enter the transaction pool.
type TxPreEvent struct{ Tx *types.Transaction } type NewTxsEvent struct{ Txs []*types.Transaction }
// PendingLogsEvent is posted pre mining and notifies of pending logs. // PendingLogsEvent is posted pre mining and notifies of pending logs.
type PendingLogsEvent struct { type PendingLogsEvent struct {
@ -35,9 +35,6 @@ type PendingStateEvent struct{}
// NewMinedBlockEvent is posted when a block has been imported. // NewMinedBlockEvent is posted when a block has been imported.
type NewMinedBlockEvent struct{ Block *types.Block } type NewMinedBlockEvent struct{ Block *types.Block }
// RemovedTransactionEvent is posted when a reorg happens
type RemovedTransactionEvent struct{ Txs types.Transactions }
// RemovedLogsEvent is posted when a reorg happens // RemovedLogsEvent is posted when a reorg happens
type RemovedLogsEvent struct{ Logs []*types.Log } type RemovedLogsEvent struct{ Logs []*types.Log }

View File

@ -56,7 +56,7 @@ func newTxJournal(path string) *txJournal {
// load parses a transaction journal dump from disk, loading its contents into // load parses a transaction journal dump from disk, loading its contents into
// the specified pool. // the specified pool.
func (journal *txJournal) load(add func(*types.Transaction) error) error { func (journal *txJournal) load(add func([]*types.Transaction) []error) error {
// Skip the parsing if the journal file doens't exist at all // Skip the parsing if the journal file doens't exist at all
if _, err := os.Stat(journal.path); os.IsNotExist(err) { if _, err := os.Stat(journal.path); os.IsNotExist(err) {
return nil return nil
@ -76,7 +76,21 @@ func (journal *txJournal) load(add func(*types.Transaction) error) error {
stream := rlp.NewStream(input, 0) stream := rlp.NewStream(input, 0)
total, dropped := 0, 0 total, dropped := 0, 0
var failure error // Create a method to load a limited batch of transactions and bump the
// appropriate progress counters. Then use this method to load all the
// journalled transactions in small-ish batches.
loadBatch := func(txs types.Transactions) {
for _, err := range add(txs) {
if err != nil {
log.Debug("Failed to add journaled transaction", "err", err)
dropped++
}
}
}
var (
failure error
batch types.Transactions
)
for { for {
// Parse the next transaction and terminate on error // Parse the next transaction and terminate on error
tx := new(types.Transaction) tx := new(types.Transaction)
@ -84,14 +98,17 @@ func (journal *txJournal) load(add func(*types.Transaction) error) error {
if err != io.EOF { if err != io.EOF {
failure = err failure = err
} }
if batch.Len() > 0 {
loadBatch(batch)
}
break break
} }
// Import the transaction and bump the appropriate progress counters // New transaction parsed, queue up for later, import if threnshold is reached
total++ total++
if err = add(tx); err != nil {
log.Debug("Failed to add journaled transaction", "err", err) if batch = append(batch, tx); batch.Len() > 1024 {
dropped++ loadBatch(batch)
continue batch = batch[:0]
} }
} }
log.Info("Loaded local transaction journal", "transactions", total, "dropped", dropped) log.Info("Loaded local transaction journal", "transactions", total, "dropped", dropped)

View File

@ -238,7 +238,7 @@ func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain block
if !config.NoLocals && config.Journal != "" { if !config.NoLocals && config.Journal != "" {
pool.journal = newTxJournal(config.Journal) pool.journal = newTxJournal(config.Journal)
if err := pool.journal.load(pool.AddLocal); err != nil { if err := pool.journal.load(pool.AddLocals); err != nil {
log.Warn("Failed to load transaction journal", "err", err) log.Warn("Failed to load transaction journal", "err", err)
} }
if err := pool.journal.rotate(pool.local()); err != nil { if err := pool.journal.rotate(pool.local()); err != nil {
@ -444,9 +444,9 @@ func (pool *TxPool) Stop() {
log.Info("Transaction pool stopped") log.Info("Transaction pool stopped")
} }
// SubscribeTxPreEvent registers a subscription of TxPreEvent and // SubscribeNewTxsEvent registers a subscription of NewTxsEvent and
// starts sending event to the given channel. // starts sending event to the given channel.
func (pool *TxPool) SubscribeTxPreEvent(ch chan<- TxPreEvent) event.Subscription { func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- NewTxsEvent) event.Subscription {
return pool.scope.Track(pool.txFeed.Subscribe(ch)) return pool.scope.Track(pool.txFeed.Subscribe(ch))
} }
@ -653,7 +653,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) {
log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To())
// We've directly injected a replacement transaction, notify subsystems // We've directly injected a replacement transaction, notify subsystems
go pool.txFeed.Send(TxPreEvent{tx}) go pool.txFeed.Send(NewTxsEvent{types.Transactions{tx}})
return old != nil, nil return old != nil, nil
} }
@ -712,10 +712,11 @@ func (pool *TxPool) journalTx(from common.Address, tx *types.Transaction) {
} }
} }
// promoteTx adds a transaction to the pending (processable) list of transactions. // promoteTx adds a transaction to the pending (processable) list of transactions
// and returns whether it was inserted or an older was better.
// //
// Note, this method assumes the pool lock is held! // Note, this method assumes the pool lock is held!
func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) { func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool {
// Try to insert the transaction into the pending queue // Try to insert the transaction into the pending queue
if pool.pending[addr] == nil { if pool.pending[addr] == nil {
pool.pending[addr] = newTxList(true) pool.pending[addr] = newTxList(true)
@ -729,7 +730,7 @@ func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.T
pool.priced.Removed() pool.priced.Removed()
pendingDiscardCounter.Inc(1) pendingDiscardCounter.Inc(1)
return return false
} }
// Otherwise discard any previous transaction and mark this // Otherwise discard any previous transaction and mark this
if old != nil { if old != nil {
@ -747,7 +748,7 @@ func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.T
pool.beats[addr] = time.Now() pool.beats[addr] = time.Now()
pool.pendingState.SetNonce(addr, tx.Nonce()+1) pool.pendingState.SetNonce(addr, tx.Nonce()+1)
go pool.txFeed.Send(TxPreEvent{tx}) return true
} }
// AddLocal enqueues a single transaction into the pool if it is valid, marking // AddLocal enqueues a single transaction into the pool if it is valid, marking
@ -907,6 +908,9 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) {
// future queue to the set of pending transactions. During this process, all // future queue to the set of pending transactions. During this process, all
// invalidated transactions (low nonce, low balance) are deleted. // invalidated transactions (low nonce, low balance) are deleted.
func (pool *TxPool) promoteExecutables(accounts []common.Address) { func (pool *TxPool) promoteExecutables(accounts []common.Address) {
// Track the promoted transactions to broadcast them at once
var promoted []*types.Transaction
// Gather all the accounts potentially needing updates // Gather all the accounts potentially needing updates
if accounts == nil { if accounts == nil {
accounts = make([]common.Address, 0, len(pool.queue)) accounts = make([]common.Address, 0, len(pool.queue))
@ -939,8 +943,10 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) {
// Gather all executable transactions and promote them // Gather all executable transactions and promote them
for _, tx := range list.Ready(pool.pendingState.GetNonce(addr)) { for _, tx := range list.Ready(pool.pendingState.GetNonce(addr)) {
hash := tx.Hash() hash := tx.Hash()
if pool.promoteTx(addr, hash, tx) {
log.Trace("Promoting queued transaction", "hash", hash) log.Trace("Promoting queued transaction", "hash", hash)
pool.promoteTx(addr, hash, tx) promoted = append(promoted, tx)
}
} }
// Drop all transactions over the allowed limit // Drop all transactions over the allowed limit
if !pool.locals.contains(addr) { if !pool.locals.contains(addr) {
@ -957,6 +963,10 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) {
delete(pool.queue, addr) delete(pool.queue, addr)
} }
} }
// Notify subsystem for new promoted transactions.
if len(promoted) > 0 {
pool.txFeed.Send(NewTxsEvent{promoted})
}
// If the pending limit is overflown, start equalizing allowances // If the pending limit is overflown, start equalizing allowances
pending := uint64(0) pending := uint64(0)
for _, list := range pool.pending { for _, list := range pool.pending {

View File

@ -118,21 +118,27 @@ func validateTxPoolInternals(pool *TxPool) error {
// validateEvents checks that the correct number of transaction addition events // validateEvents checks that the correct number of transaction addition events
// were fired on the pool's event feed. // were fired on the pool's event feed.
func validateEvents(events chan TxPreEvent, count int) error { func validateEvents(events chan NewTxsEvent, count int) error {
for i := 0; i < count; i++ { var received []*types.Transaction
for len(received) < count {
select { select {
case <-events: case ev := <-events:
received = append(received, ev.Txs...)
case <-time.After(time.Second): case <-time.After(time.Second):
return fmt.Errorf("event #%d not fired", i) return fmt.Errorf("event #%d not fired", received)
} }
} }
if len(received) > count {
return fmt.Errorf("more than %d events fired: %v", count, received[count:])
}
select { select {
case tx := <-events: case ev := <-events:
return fmt.Errorf("more than %d events fired: %v", count, tx.Tx) return fmt.Errorf("more than %d events fired: %v", count, ev.Txs)
case <-time.After(50 * time.Millisecond): case <-time.After(50 * time.Millisecond):
// This branch should be "default", but it's a data race between goroutines, // This branch should be "default", but it's a data race between goroutines,
// reading the event channel and pushng into it, so better wait a bit ensuring // reading the event channel and pushing into it, so better wait a bit ensuring
// really nothing gets injected. // really nothing gets injected.
} }
return nil return nil
@ -669,7 +675,7 @@ func TestTransactionGapFilling(t *testing.T) {
pool.currentState.AddBalance(account, big.NewInt(1000000)) pool.currentState.AddBalance(account, big.NewInt(1000000))
// Keep track of transaction events to ensure all executables get announced // Keep track of transaction events to ensure all executables get announced
events := make(chan TxPreEvent, testTxPoolConfig.AccountQueue+5) events := make(chan NewTxsEvent, testTxPoolConfig.AccountQueue+5)
sub := pool.txFeed.Subscribe(events) sub := pool.txFeed.Subscribe(events)
defer sub.Unsubscribe() defer sub.Unsubscribe()
@ -920,7 +926,7 @@ func TestTransactionPendingLimiting(t *testing.T) {
pool.currentState.AddBalance(account, big.NewInt(1000000)) pool.currentState.AddBalance(account, big.NewInt(1000000))
// Keep track of transaction events to ensure all executables get announced // Keep track of transaction events to ensure all executables get announced
events := make(chan TxPreEvent, testTxPoolConfig.AccountQueue+5) events := make(chan NewTxsEvent, testTxPoolConfig.AccountQueue+5)
sub := pool.txFeed.Subscribe(events) sub := pool.txFeed.Subscribe(events)
defer sub.Unsubscribe() defer sub.Unsubscribe()
@ -1140,7 +1146,7 @@ func TestTransactionPoolRepricing(t *testing.T) {
defer pool.Stop() defer pool.Stop()
// Keep track of transaction events to ensure all executables get announced // Keep track of transaction events to ensure all executables get announced
events := make(chan TxPreEvent, 32) events := make(chan NewTxsEvent, 32)
sub := pool.txFeed.Subscribe(events) sub := pool.txFeed.Subscribe(events)
defer sub.Unsubscribe() defer sub.Unsubscribe()
@ -1327,7 +1333,7 @@ func TestTransactionPoolUnderpricing(t *testing.T) {
defer pool.Stop() defer pool.Stop()
// Keep track of transaction events to ensure all executables get announced // Keep track of transaction events to ensure all executables get announced
events := make(chan TxPreEvent, 32) events := make(chan NewTxsEvent, 32)
sub := pool.txFeed.Subscribe(events) sub := pool.txFeed.Subscribe(events)
defer sub.Unsubscribe() defer sub.Unsubscribe()
@ -1433,7 +1439,7 @@ func TestTransactionPoolStableUnderpricing(t *testing.T) {
defer pool.Stop() defer pool.Stop()
// Keep track of transaction events to ensure all executables get announced // Keep track of transaction events to ensure all executables get announced
events := make(chan TxPreEvent, 32) events := make(chan NewTxsEvent, 32)
sub := pool.txFeed.Subscribe(events) sub := pool.txFeed.Subscribe(events)
defer sub.Unsubscribe() defer sub.Unsubscribe()
@ -1495,7 +1501,7 @@ func TestTransactionReplacement(t *testing.T) {
defer pool.Stop() defer pool.Stop()
// Keep track of transaction events to ensure all executables get announced // Keep track of transaction events to ensure all executables get announced
events := make(chan TxPreEvent, 32) events := make(chan NewTxsEvent, 32)
sub := pool.txFeed.Subscribe(events) sub := pool.txFeed.Subscribe(events)
defer sub.Unsubscribe() defer sub.Unsubscribe()

View File

@ -188,8 +188,8 @@ func (b *EthAPIBackend) TxPoolContent() (map[common.Address]types.Transactions,
return b.eth.TxPool().Content() return b.eth.TxPool().Content()
} }
func (b *EthAPIBackend) SubscribeTxPreEvent(ch chan<- core.TxPreEvent) event.Subscription { func (b *EthAPIBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
return b.eth.TxPool().SubscribeTxPreEvent(ch) return b.eth.TxPool().SubscribeNewTxsEvent(ch)
} }
func (b *EthAPIBackend) Downloader() *downloader.Downloader { func (b *EthAPIBackend) Downloader() *downloader.Downloader {

View File

@ -104,8 +104,8 @@ func (api *PublicFilterAPI) timeoutLoop() {
// https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_newpendingtransactionfilter // https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_newpendingtransactionfilter
func (api *PublicFilterAPI) NewPendingTransactionFilter() rpc.ID { func (api *PublicFilterAPI) NewPendingTransactionFilter() rpc.ID {
var ( var (
pendingTxs = make(chan common.Hash) pendingTxs = make(chan []common.Hash)
pendingTxSub = api.events.SubscribePendingTxEvents(pendingTxs) pendingTxSub = api.events.SubscribePendingTxs(pendingTxs)
) )
api.filtersMu.Lock() api.filtersMu.Lock()
@ -118,7 +118,7 @@ func (api *PublicFilterAPI) NewPendingTransactionFilter() rpc.ID {
case ph := <-pendingTxs: case ph := <-pendingTxs:
api.filtersMu.Lock() api.filtersMu.Lock()
if f, found := api.filters[pendingTxSub.ID]; found { if f, found := api.filters[pendingTxSub.ID]; found {
f.hashes = append(f.hashes, ph) f.hashes = append(f.hashes, ph...)
} }
api.filtersMu.Unlock() api.filtersMu.Unlock()
case <-pendingTxSub.Err(): case <-pendingTxSub.Err():
@ -144,13 +144,17 @@ func (api *PublicFilterAPI) NewPendingTransactions(ctx context.Context) (*rpc.Su
rpcSub := notifier.CreateSubscription() rpcSub := notifier.CreateSubscription()
go func() { go func() {
txHashes := make(chan common.Hash) txHashes := make(chan []common.Hash, 128)
pendingTxSub := api.events.SubscribePendingTxEvents(txHashes) pendingTxSub := api.events.SubscribePendingTxs(txHashes)
for { for {
select { select {
case h := <-txHashes: case hashes := <-txHashes:
// To keep the original behaviour, send a single tx hash in one notification.
// TODO(rjl493456442) Send a batch of tx hashes in one notification
for _, h := range hashes {
notifier.Notify(rpcSub.ID, h) notifier.Notify(rpcSub.ID, h)
}
case <-rpcSub.Err(): case <-rpcSub.Err():
pendingTxSub.Unsubscribe() pendingTxSub.Unsubscribe()
return return

View File

@ -36,7 +36,7 @@ type Backend interface {
GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error)
GetLogs(ctx context.Context, blockHash common.Hash) ([][]*types.Log, error) GetLogs(ctx context.Context, blockHash common.Hash) ([][]*types.Log, error)
SubscribeTxPreEvent(chan<- core.TxPreEvent) event.Subscription SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription
SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription
SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription
SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription

View File

@ -59,7 +59,7 @@ const (
const ( const (
// txChanSize is the size of channel listening to TxPreEvent. // txChanSize is the size of channel listening to NewTxsEvent.
// The number is referenced from the size of tx pool. // The number is referenced from the size of tx pool.
txChanSize = 4096 txChanSize = 4096
// rmLogsChanSize is the size of channel listening to RemovedLogsEvent. // rmLogsChanSize is the size of channel listening to RemovedLogsEvent.
@ -80,7 +80,7 @@ type subscription struct {
created time.Time created time.Time
logsCrit ethereum.FilterQuery logsCrit ethereum.FilterQuery
logs chan []*types.Log logs chan []*types.Log
hashes chan common.Hash hashes chan []common.Hash
headers chan *types.Header headers chan *types.Header
installed chan struct{} // closed when the filter is installed installed chan struct{} // closed when the filter is installed
err chan error // closed when the filter is uninstalled err chan error // closed when the filter is uninstalled
@ -95,7 +95,7 @@ type EventSystem struct {
lastHead *types.Header lastHead *types.Header
// Subscriptions // Subscriptions
txSub event.Subscription // Subscription for new transaction event txsSub event.Subscription // Subscription for new transaction event
logsSub event.Subscription // Subscription for new log event logsSub event.Subscription // Subscription for new log event
rmLogsSub event.Subscription // Subscription for removed log event rmLogsSub event.Subscription // Subscription for removed log event
chainSub event.Subscription // Subscription for new chain event chainSub event.Subscription // Subscription for new chain event
@ -104,7 +104,7 @@ type EventSystem struct {
// Channels // Channels
install chan *subscription // install filter for event notification install chan *subscription // install filter for event notification
uninstall chan *subscription // remove filter for event notification uninstall chan *subscription // remove filter for event notification
txCh chan core.TxPreEvent // Channel to receive new transaction event txsCh chan core.NewTxsEvent // Channel to receive new transactions event
logsCh chan []*types.Log // Channel to receive new log event logsCh chan []*types.Log // Channel to receive new log event
rmLogsCh chan core.RemovedLogsEvent // Channel to receive removed log event rmLogsCh chan core.RemovedLogsEvent // Channel to receive removed log event
chainCh chan core.ChainEvent // Channel to receive new chain event chainCh chan core.ChainEvent // Channel to receive new chain event
@ -123,14 +123,14 @@ func NewEventSystem(mux *event.TypeMux, backend Backend, lightMode bool) *EventS
lightMode: lightMode, lightMode: lightMode,
install: make(chan *subscription), install: make(chan *subscription),
uninstall: make(chan *subscription), uninstall: make(chan *subscription),
txCh: make(chan core.TxPreEvent, txChanSize), txsCh: make(chan core.NewTxsEvent, txChanSize),
logsCh: make(chan []*types.Log, logsChanSize), logsCh: make(chan []*types.Log, logsChanSize),
rmLogsCh: make(chan core.RemovedLogsEvent, rmLogsChanSize), rmLogsCh: make(chan core.RemovedLogsEvent, rmLogsChanSize),
chainCh: make(chan core.ChainEvent, chainEvChanSize), chainCh: make(chan core.ChainEvent, chainEvChanSize),
} }
// Subscribe events // Subscribe events
m.txSub = m.backend.SubscribeTxPreEvent(m.txCh) m.txsSub = m.backend.SubscribeNewTxsEvent(m.txsCh)
m.logsSub = m.backend.SubscribeLogsEvent(m.logsCh) m.logsSub = m.backend.SubscribeLogsEvent(m.logsCh)
m.rmLogsSub = m.backend.SubscribeRemovedLogsEvent(m.rmLogsCh) m.rmLogsSub = m.backend.SubscribeRemovedLogsEvent(m.rmLogsCh)
m.chainSub = m.backend.SubscribeChainEvent(m.chainCh) m.chainSub = m.backend.SubscribeChainEvent(m.chainCh)
@ -138,7 +138,7 @@ func NewEventSystem(mux *event.TypeMux, backend Backend, lightMode bool) *EventS
m.pendingLogSub = m.mux.Subscribe(core.PendingLogsEvent{}) m.pendingLogSub = m.mux.Subscribe(core.PendingLogsEvent{})
// Make sure none of the subscriptions are empty // Make sure none of the subscriptions are empty
if m.txSub == nil || m.logsSub == nil || m.rmLogsSub == nil || m.chainSub == nil || if m.txsSub == nil || m.logsSub == nil || m.rmLogsSub == nil || m.chainSub == nil ||
m.pendingLogSub.Closed() { m.pendingLogSub.Closed() {
log.Crit("Subscribe for event system failed") log.Crit("Subscribe for event system failed")
} }
@ -240,7 +240,7 @@ func (es *EventSystem) subscribeMinedPendingLogs(crit ethereum.FilterQuery, logs
logsCrit: crit, logsCrit: crit,
created: time.Now(), created: time.Now(),
logs: logs, logs: logs,
hashes: make(chan common.Hash), hashes: make(chan []common.Hash),
headers: make(chan *types.Header), headers: make(chan *types.Header),
installed: make(chan struct{}), installed: make(chan struct{}),
err: make(chan error), err: make(chan error),
@ -257,7 +257,7 @@ func (es *EventSystem) subscribeLogs(crit ethereum.FilterQuery, logs chan []*typ
logsCrit: crit, logsCrit: crit,
created: time.Now(), created: time.Now(),
logs: logs, logs: logs,
hashes: make(chan common.Hash), hashes: make(chan []common.Hash),
headers: make(chan *types.Header), headers: make(chan *types.Header),
installed: make(chan struct{}), installed: make(chan struct{}),
err: make(chan error), err: make(chan error),
@ -274,7 +274,7 @@ func (es *EventSystem) subscribePendingLogs(crit ethereum.FilterQuery, logs chan
logsCrit: crit, logsCrit: crit,
created: time.Now(), created: time.Now(),
logs: logs, logs: logs,
hashes: make(chan common.Hash), hashes: make(chan []common.Hash),
headers: make(chan *types.Header), headers: make(chan *types.Header),
installed: make(chan struct{}), installed: make(chan struct{}),
err: make(chan error), err: make(chan error),
@ -290,7 +290,7 @@ func (es *EventSystem) SubscribeNewHeads(headers chan *types.Header) *Subscripti
typ: BlocksSubscription, typ: BlocksSubscription,
created: time.Now(), created: time.Now(),
logs: make(chan []*types.Log), logs: make(chan []*types.Log),
hashes: make(chan common.Hash), hashes: make(chan []common.Hash),
headers: headers, headers: headers,
installed: make(chan struct{}), installed: make(chan struct{}),
err: make(chan error), err: make(chan error),
@ -298,9 +298,9 @@ func (es *EventSystem) SubscribeNewHeads(headers chan *types.Header) *Subscripti
return es.subscribe(sub) return es.subscribe(sub)
} }
// SubscribePendingTxEvents creates a subscription that writes transaction hashes for // SubscribePendingTxs creates a subscription that writes transaction hashes for
// transactions that enter the transaction pool. // transactions that enter the transaction pool.
func (es *EventSystem) SubscribePendingTxEvents(hashes chan common.Hash) *Subscription { func (es *EventSystem) SubscribePendingTxs(hashes chan []common.Hash) *Subscription {
sub := &subscription{ sub := &subscription{
id: rpc.NewID(), id: rpc.NewID(),
typ: PendingTransactionsSubscription, typ: PendingTransactionsSubscription,
@ -348,9 +348,13 @@ func (es *EventSystem) broadcast(filters filterIndex, ev interface{}) {
} }
} }
} }
case core.TxPreEvent: case core.NewTxsEvent:
hashes := make([]common.Hash, 0, len(e.Txs))
for _, tx := range e.Txs {
hashes = append(hashes, tx.Hash())
}
for _, f := range filters[PendingTransactionsSubscription] { for _, f := range filters[PendingTransactionsSubscription] {
f.hashes <- e.Tx.Hash() f.hashes <- hashes
} }
case core.ChainEvent: case core.ChainEvent:
for _, f := range filters[BlocksSubscription] { for _, f := range filters[BlocksSubscription] {
@ -446,7 +450,7 @@ func (es *EventSystem) eventLoop() {
// Ensure all subscriptions get cleaned up // Ensure all subscriptions get cleaned up
defer func() { defer func() {
es.pendingLogSub.Unsubscribe() es.pendingLogSub.Unsubscribe()
es.txSub.Unsubscribe() es.txsSub.Unsubscribe()
es.logsSub.Unsubscribe() es.logsSub.Unsubscribe()
es.rmLogsSub.Unsubscribe() es.rmLogsSub.Unsubscribe()
es.chainSub.Unsubscribe() es.chainSub.Unsubscribe()
@ -460,7 +464,7 @@ func (es *EventSystem) eventLoop() {
for { for {
select { select {
// Handle subscribed events // Handle subscribed events
case ev := <-es.txCh: case ev := <-es.txsCh:
es.broadcast(index, ev) es.broadcast(index, ev)
case ev := <-es.logsCh: case ev := <-es.logsCh:
es.broadcast(index, ev) es.broadcast(index, ev)
@ -495,7 +499,7 @@ func (es *EventSystem) eventLoop() {
close(f.err) close(f.err)
// System stopped // System stopped
case <-es.txSub.Err(): case <-es.txsSub.Err():
return return
case <-es.logsSub.Err(): case <-es.logsSub.Err():
return return

View File

@ -96,7 +96,7 @@ func (b *testBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types
return logs, nil return logs, nil
} }
func (b *testBackend) SubscribeTxPreEvent(ch chan<- core.TxPreEvent) event.Subscription { func (b *testBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
return b.txFeed.Subscribe(ch) return b.txFeed.Subscribe(ch)
} }
@ -232,10 +232,7 @@ func TestPendingTxFilter(t *testing.T) {
fid0 := api.NewPendingTransactionFilter() fid0 := api.NewPendingTransactionFilter()
time.Sleep(1 * time.Second) time.Sleep(1 * time.Second)
for _, tx := range transactions { txFeed.Send(core.NewTxsEvent{Txs: transactions})
ev := core.TxPreEvent{Tx: tx}
txFeed.Send(ev)
}
timeout := time.Now().Add(1 * time.Second) timeout := time.Now().Add(1 * time.Second)
for { for {

View File

@ -46,7 +46,7 @@ const (
softResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data. softResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data.
estHeaderRlpSize = 500 // Approximate size of an RLP encoded block header estHeaderRlpSize = 500 // Approximate size of an RLP encoded block header
// txChanSize is the size of channel listening to TxPreEvent. // txChanSize is the size of channel listening to NewTxsEvent.
// The number is referenced from the size of tx pool. // The number is referenced from the size of tx pool.
txChanSize = 4096 txChanSize = 4096
) )
@ -81,8 +81,8 @@ type ProtocolManager struct {
SubProtocols []p2p.Protocol SubProtocols []p2p.Protocol
eventMux *event.TypeMux eventMux *event.TypeMux
txCh chan core.TxPreEvent txsCh chan core.NewTxsEvent
txSub event.Subscription txsSub event.Subscription
minedBlockSub *event.TypeMuxSubscription minedBlockSub *event.TypeMuxSubscription
// channels for fetcher, syncer, txsyncLoop // channels for fetcher, syncer, txsyncLoop
@ -204,8 +204,8 @@ func (pm *ProtocolManager) Start(maxPeers int) {
pm.maxPeers = maxPeers pm.maxPeers = maxPeers
// broadcast transactions // broadcast transactions
pm.txCh = make(chan core.TxPreEvent, txChanSize) pm.txsCh = make(chan core.NewTxsEvent, txChanSize)
pm.txSub = pm.txpool.SubscribeTxPreEvent(pm.txCh) pm.txsSub = pm.txpool.SubscribeNewTxsEvent(pm.txsCh)
go pm.txBroadcastLoop() go pm.txBroadcastLoop()
// broadcast mined blocks // broadcast mined blocks
@ -220,7 +220,7 @@ func (pm *ProtocolManager) Start(maxPeers int) {
func (pm *ProtocolManager) Stop() { func (pm *ProtocolManager) Stop() {
log.Info("Stopping Ethereum protocol") log.Info("Stopping Ethereum protocol")
pm.txSub.Unsubscribe() // quits txBroadcastLoop pm.txsSub.Unsubscribe() // quits txBroadcastLoop
pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop
// Quit the sync loop. // Quit the sync loop.
@ -712,16 +712,23 @@ func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
} }
} }
// BroadcastTx will propagate a transaction to all peers which are not known to // BroadcastTxs will propagate a batch of transactions to all peers which are not known to
// already have the given transaction. // already have the given transaction.
func (pm *ProtocolManager) BroadcastTx(hash common.Hash, tx *types.Transaction) { func (pm *ProtocolManager) BroadcastTxs(txs types.Transactions) {
// Broadcast transaction to a batch of peers not knowing about it var txset = make(map[*peer]types.Transactions)
peers := pm.peers.PeersWithoutTx(hash)
//FIXME include this again: peers = peers[:int(math.Sqrt(float64(len(peers))))] // Broadcast transactions to a batch of peers not knowing about it
for _, tx := range txs {
peers := pm.peers.PeersWithoutTx(tx.Hash())
for _, peer := range peers { for _, peer := range peers {
peer.SendTransactions(types.Transactions{tx}) txset[peer] = append(txset[peer], tx)
}
log.Trace("Broadcast transaction", "hash", tx.Hash(), "recipients", len(peers))
}
// FIXME include this again: peers = peers[:int(math.Sqrt(float64(len(peers))))]
for peer, txs := range txset {
peer.SendTransactions(txs)
} }
log.Trace("Broadcast transaction", "hash", hash, "recipients", len(peers))
} }
// Mined broadcast loop // Mined broadcast loop
@ -739,11 +746,11 @@ func (pm *ProtocolManager) minedBroadcastLoop() {
func (pm *ProtocolManager) txBroadcastLoop() { func (pm *ProtocolManager) txBroadcastLoop() {
for { for {
select { select {
case event := <-pm.txCh: case event := <-pm.txsCh:
pm.BroadcastTx(event.Tx.Hash(), event.Tx) pm.BroadcastTxs(event.Txs)
// Err() channel will be closed when unsubscribing. // Err() channel will be closed when unsubscribing.
case <-pm.txSub.Err(): case <-pm.txsSub.Err():
return return
} }
} }

View File

@ -124,7 +124,7 @@ func (p *testTxPool) Pending() (map[common.Address]types.Transactions, error) {
return batches, nil return batches, nil
} }
func (p *testTxPool) SubscribeTxPreEvent(ch chan<- core.TxPreEvent) event.Subscription { func (p *testTxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
return p.txFeed.Subscribe(ch) return p.txFeed.Subscribe(ch)
} }

View File

@ -103,9 +103,9 @@ type txPool interface {
// The slice should be modifiable by the caller. // The slice should be modifiable by the caller.
Pending() (map[common.Address]types.Transactions, error) Pending() (map[common.Address]types.Transactions, error)
// SubscribeTxPreEvent should return an event subscription of // SubscribeNewTxsEvent should return an event subscription of
// TxPreEvent and send events to the given channel. // NewTxsEvent and send events to the given channel.
SubscribeTxPreEvent(chan<- core.TxPreEvent) event.Subscription SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription
} }
// statusData is the network packet for the status message. // statusData is the network packet for the status message.

View File

@ -116,7 +116,7 @@ func testRecvTransactions(t *testing.T, protocol int) {
t.Errorf("added wrong tx hash: got %v, want %v", added[0].Hash(), tx.Hash()) t.Errorf("added wrong tx hash: got %v, want %v", added[0].Hash(), tx.Hash())
} }
case <-time.After(2 * time.Second): case <-time.After(2 * time.Second):
t.Errorf("no TxPreEvent received within 2 seconds") t.Errorf("no NewTxsEvent received within 2 seconds")
} }
} }

View File

@ -49,7 +49,7 @@ const (
// history request. // history request.
historyUpdateRange = 50 historyUpdateRange = 50
// txChanSize is the size of channel listening to TxPreEvent. // txChanSize is the size of channel listening to NewTxsEvent.
// The number is referenced from the size of tx pool. // The number is referenced from the size of tx pool.
txChanSize = 4096 txChanSize = 4096
// chainHeadChanSize is the size of channel listening to ChainHeadEvent. // chainHeadChanSize is the size of channel listening to ChainHeadEvent.
@ -57,9 +57,9 @@ const (
) )
type txPool interface { type txPool interface {
// SubscribeTxPreEvent should return an event subscription of // SubscribeNewTxsEvent should return an event subscription of
// TxPreEvent and send events to the given channel. // NewTxsEvent and send events to the given channel.
SubscribeTxPreEvent(chan<- core.TxPreEvent) event.Subscription SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription
} }
type blockChain interface { type blockChain interface {
@ -150,8 +150,8 @@ func (s *Service) loop() {
headSub := blockchain.SubscribeChainHeadEvent(chainHeadCh) headSub := blockchain.SubscribeChainHeadEvent(chainHeadCh)
defer headSub.Unsubscribe() defer headSub.Unsubscribe()
txEventCh := make(chan core.TxPreEvent, txChanSize) txEventCh := make(chan core.NewTxsEvent, txChanSize)
txSub := txpool.SubscribeTxPreEvent(txEventCh) txSub := txpool.SubscribeNewTxsEvent(txEventCh)
defer txSub.Unsubscribe() defer txSub.Unsubscribe()
// Start a goroutine that exhausts the subsciptions to avoid events piling up // Start a goroutine that exhausts the subsciptions to avoid events piling up

View File

@ -65,7 +65,7 @@ type Backend interface {
GetPoolNonce(ctx context.Context, addr common.Address) (uint64, error) GetPoolNonce(ctx context.Context, addr common.Address) (uint64, error)
Stats() (pending int, queued int) Stats() (pending int, queued int)
TxPoolContent() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) TxPoolContent() (map[common.Address]types.Transactions, map[common.Address]types.Transactions)
SubscribeTxPreEvent(chan<- core.TxPreEvent) event.Subscription SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription
ChainConfig() *params.ChainConfig ChainConfig() *params.ChainConfig
CurrentBlock() *types.Block CurrentBlock() *types.Block

View File

@ -136,8 +136,8 @@ func (b *LesApiBackend) TxPoolContent() (map[common.Address]types.Transactions,
return b.eth.txPool.Content() return b.eth.txPool.Content()
} }
func (b *LesApiBackend) SubscribeTxPreEvent(ch chan<- core.TxPreEvent) event.Subscription { func (b *LesApiBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
return b.eth.txPool.SubscribeTxPreEvent(ch) return b.eth.txPool.SubscribeNewTxsEvent(ch)
} }
func (b *LesApiBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription { func (b *LesApiBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {

View File

@ -321,9 +321,9 @@ func (pool *TxPool) Stop() {
log.Info("Transaction pool stopped") log.Info("Transaction pool stopped")
} }
// SubscribeTxPreEvent registers a subscription of core.TxPreEvent and // SubscribeNewTxsEvent registers a subscription of core.NewTxsEvent and
// starts sending event to the given channel. // starts sending event to the given channel.
func (pool *TxPool) SubscribeTxPreEvent(ch chan<- core.TxPreEvent) event.Subscription { func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
return pool.scope.Track(pool.txFeed.Subscribe(ch)) return pool.scope.Track(pool.txFeed.Subscribe(ch))
} }
@ -412,7 +412,7 @@ func (self *TxPool) add(ctx context.Context, tx *types.Transaction) error {
// Notify the subscribers. This event is posted in a goroutine // Notify the subscribers. This event is posted in a goroutine
// because it's possible that somewhere during the post "Remove transaction" // because it's possible that somewhere during the post "Remove transaction"
// gets called which will then wait for the global tx pool lock and deadlock. // gets called which will then wait for the global tx pool lock and deadlock.
go self.txFeed.Send(core.TxPreEvent{Tx: tx}) go self.txFeed.Send(core.NewTxsEvent{Txs: types.Transactions{tx}})
} }
// Print a log message if low enough level is set // Print a log message if low enough level is set

View File

@ -42,7 +42,7 @@ const (
resultQueueSize = 10 resultQueueSize = 10
miningLogAtDepth = 5 miningLogAtDepth = 5
// txChanSize is the size of channel listening to TxPreEvent. // txChanSize is the size of channel listening to NewTxsEvent.
// The number is referenced from the size of tx pool. // The number is referenced from the size of tx pool.
txChanSize = 4096 txChanSize = 4096
// chainHeadChanSize is the size of channel listening to ChainHeadEvent. // chainHeadChanSize is the size of channel listening to ChainHeadEvent.
@ -71,6 +71,7 @@ type Work struct {
family *set.Set // family set (used for checking uncle invalidity) family *set.Set // family set (used for checking uncle invalidity)
uncles *set.Set // uncle set uncles *set.Set // uncle set
tcount int // tx count in cycle tcount int // tx count in cycle
gasPool *core.GasPool // available gas used to pack transactions
Block *types.Block // the new block Block *types.Block // the new block
@ -95,8 +96,8 @@ type worker struct {
// update loop // update loop
mux *event.TypeMux mux *event.TypeMux
txCh chan core.TxPreEvent txsCh chan core.NewTxsEvent
txSub event.Subscription txsSub event.Subscription
chainHeadCh chan core.ChainHeadEvent chainHeadCh chan core.ChainHeadEvent
chainHeadSub event.Subscription chainHeadSub event.Subscription
chainSideCh chan core.ChainSideEvent chainSideCh chan core.ChainSideEvent
@ -137,7 +138,7 @@ func newWorker(config *params.ChainConfig, engine consensus.Engine, coinbase com
engine: engine, engine: engine,
eth: eth, eth: eth,
mux: mux, mux: mux,
txCh: make(chan core.TxPreEvent, txChanSize), txsCh: make(chan core.NewTxsEvent, txChanSize),
chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize), chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize),
chainSideCh: make(chan core.ChainSideEvent, chainSideChanSize), chainSideCh: make(chan core.ChainSideEvent, chainSideChanSize),
chainDb: eth.ChainDb(), chainDb: eth.ChainDb(),
@ -149,8 +150,8 @@ func newWorker(config *params.ChainConfig, engine consensus.Engine, coinbase com
agents: make(map[Agent]struct{}), agents: make(map[Agent]struct{}),
unconfirmed: newUnconfirmedBlocks(eth.BlockChain(), miningLogAtDepth), unconfirmed: newUnconfirmedBlocks(eth.BlockChain(), miningLogAtDepth),
} }
// Subscribe TxPreEvent for tx pool // Subscribe NewTxsEvent for tx pool
worker.txSub = eth.TxPool().SubscribeTxPreEvent(worker.txCh) worker.txsSub = eth.TxPool().SubscribeNewTxsEvent(worker.txsCh)
// Subscribe events for blockchain // Subscribe events for blockchain
worker.chainHeadSub = eth.BlockChain().SubscribeChainHeadEvent(worker.chainHeadCh) worker.chainHeadSub = eth.BlockChain().SubscribeChainHeadEvent(worker.chainHeadCh)
worker.chainSideSub = eth.BlockChain().SubscribeChainSideEvent(worker.chainSideCh) worker.chainSideSub = eth.BlockChain().SubscribeChainSideEvent(worker.chainSideCh)
@ -241,7 +242,7 @@ func (self *worker) unregister(agent Agent) {
} }
func (self *worker) update() { func (self *worker) update() {
defer self.txSub.Unsubscribe() defer self.txsSub.Unsubscribe()
defer self.chainHeadSub.Unsubscribe() defer self.chainHeadSub.Unsubscribe()
defer self.chainSideSub.Unsubscribe() defer self.chainSideSub.Unsubscribe()
@ -258,15 +259,21 @@ func (self *worker) update() {
self.possibleUncles[ev.Block.Hash()] = ev.Block self.possibleUncles[ev.Block.Hash()] = ev.Block
self.uncleMu.Unlock() self.uncleMu.Unlock()
// Handle TxPreEvent // Handle NewTxsEvent
case ev := <-self.txCh: case ev := <-self.txsCh:
// Apply transaction to the pending state if we're not mining // Apply transactions to the pending state if we're not mining.
//
// Note all transactions received may not be continuous with transactions
// already included in the current mining block. These transactions will
// be automatically eliminated.
if atomic.LoadInt32(&self.mining) == 0 { if atomic.LoadInt32(&self.mining) == 0 {
self.currentMu.Lock() self.currentMu.Lock()
acc, _ := types.Sender(self.current.signer, ev.Tx) txs := make(map[common.Address]types.Transactions)
txs := map[common.Address]types.Transactions{acc: {ev.Tx}} for _, tx := range ev.Txs {
acc, _ := types.Sender(self.current.signer, tx)
txs[acc] = append(txs[acc], tx)
}
txset := types.NewTransactionsByPriceAndNonce(self.current.signer, txs) txset := types.NewTransactionsByPriceAndNonce(self.current.signer, txs)
self.current.commitTransactions(self.mux, txset, self.chain, self.coinbase) self.current.commitTransactions(self.mux, txset, self.chain, self.coinbase)
self.updateSnapshot() self.updateSnapshot()
self.currentMu.Unlock() self.currentMu.Unlock()
@ -278,7 +285,7 @@ func (self *worker) update() {
} }
// System stopped // System stopped
case <-self.txSub.Err(): case <-self.txsSub.Err():
return return
case <-self.chainHeadSub.Err(): case <-self.chainHeadSub.Err():
return return
@ -522,14 +529,16 @@ func (self *worker) updateSnapshot() {
} }
func (env *Work) commitTransactions(mux *event.TypeMux, txs *types.TransactionsByPriceAndNonce, bc *core.BlockChain, coinbase common.Address) { func (env *Work) commitTransactions(mux *event.TypeMux, txs *types.TransactionsByPriceAndNonce, bc *core.BlockChain, coinbase common.Address) {
gp := new(core.GasPool).AddGas(env.header.GasLimit) if env.gasPool == nil {
env.gasPool = new(core.GasPool).AddGas(env.header.GasLimit)
}
var coalescedLogs []*types.Log var coalescedLogs []*types.Log
for { for {
// If we don't have enough gas for any further transactions then we're done // If we don't have enough gas for any further transactions then we're done
if gp.Gas() < params.TxGas { if env.gasPool.Gas() < params.TxGas {
log.Trace("Not enough gas for further transactions", "gp", gp) log.Trace("Not enough gas for further transactions", "have", env.gasPool, "want", params.TxGas)
break break
} }
// Retrieve the next transaction and abort if all done // Retrieve the next transaction and abort if all done
@ -553,7 +562,7 @@ func (env *Work) commitTransactions(mux *event.TypeMux, txs *types.TransactionsB
// Start executing the transaction // Start executing the transaction
env.state.Prepare(tx.Hash(), common.Hash{}, env.tcount) env.state.Prepare(tx.Hash(), common.Hash{}, env.tcount)
err, logs := env.commitTransaction(tx, bc, coinbase, gp) err, logs := env.commitTransaction(tx, bc, coinbase, env.gasPool)
switch err { switch err {
case core.ErrGasLimitReached: case core.ErrGasLimitReached:
// Pop the current out-of-gas transaction without shifting in the next from the account // Pop the current out-of-gas transaction without shifting in the next from the account